• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tonegas / nnodely / 12162489906

04 Dec 2024 03:04PM UTC coverage: 93.934% (-0.2%) from 94.155%
12162489906

push

github

web-flow
Merge pull request #6 from tonegas/release/0.15.0

Release/0.15.0

37 of 42 new or added lines in 8 files covered. (88.1%)

59 existing lines in 7 files now uncovered.

8625 of 9182 relevant lines covered (93.93%)

0.94 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

89.52
/nnodely/nnodely.py
1
# Extern packages
2
import random, torch, copy, os
1✔
3
import numpy as np
1✔
4
import pandas as pd
1✔
5

6
# nnodely packages
7
from nnodely.visualizer import TextVisualizer, Visualizer
1✔
8
from nnodely.loss import CustomLoss
1✔
9
from nnodely.model import Model
1✔
10
from nnodely.optimizer import Optimizer, SGD, Adam
1✔
11
from nnodely.exporter import Exporter, StandardExporter
1✔
12
from nnodely.modeldef import ModelDef
1✔
13

14
from nnodely.utils import check, argmax_max, argmin_min, tensor_to_list
1✔
15

16
from nnodely.logger import logging, nnLogger
1✔
17
log = nnLogger(__name__, logging.INFO)
1✔
18

19

20
class Modely:
1✔
21
    """
22
    Create the main object, the nnodely object, that will be used to create the network, train and export it.
23

24
    :param seed: It is the seed used for the random number generator
25
    :type seed: int or None
26

27
    Example:
28
        >>> model = Modely()
29
    """
30
    def __init__(self,
1✔
31
                 visualizer:str|Visualizer|None = 'Standard',
32
                 exporter:str|Exporter|None = 'Standard',
33
                 seed:int|None = None,
34
                 workspace:str|None = None,
35
                 log_internal:bool = False,
36
                 save_history:bool = False):
37

38
        # Visualizer
39
        if visualizer == 'Standard':
1✔
40
            self.visualizer = TextVisualizer(1)
×
41
        elif visualizer != None:
1✔
42
            self.visualizer = visualizer
1✔
43
        else:
44
            self.visualizer = Visualizer()
1✔
45
        self.visualizer.set_n4m(self)
1✔
46

47
        # Exporter
48
        if exporter == 'Standard':
1✔
49
            self.exporter = StandardExporter(workspace, self.visualizer, save_history)
1✔
50
        elif exporter != None:
×
51
            self.exporter = exporter
×
52
        else:
53
            self.exporter = Exporter()
×
54

55
        ## Set the random seed for reproducibility
56
        if seed is not None:
1✔
57
            self.resetSeed(seed)
1✔
58

59
        # Save internal
60
        self.log_internal = log_internal
1✔
61
        if self.log_internal == True:
1✔
62
            self.internals = {}
1✔
63

64
        # Models definition
65
        self.model_def = ModelDef()
1✔
66
        self.input_n_samples = {}
1✔
67
        self.max_n_samples = 0
1✔
68
        self.neuralized = False
1✔
69
        self.traced = False
1✔
70
        self.model = None
1✔
71

72
        # Dataaset Parameters
73
        self.data_loaded = False
1✔
74
        self.file_count = 0
1✔
75
        self.num_of_samples = {}
1✔
76
        self.data = {}
1✔
77
        self.n_datasets = 0
1✔
78
        self.datasets_loaded = set()
1✔
79

80
        # Training Parameters
81
        self.standard_train_parameters = {
1✔
82
            'models' : None,
83
            'train_dataset' : None, 'validation_dataset' : None, 'test_dataset' : None, 'splits' : [70, 20, 10],
84
            'closed_loop' : {}, 'connect' : {}, 'step' : 1, 'prediction_samples' : 0,
85
            'shuffle_data' : True,
86
            'early_stopping' : None, 'early_stopping_params' : {},
87
            'select_model' : 'last', 'select_model_params' : {},
88
            'minimize_gain' : {},
89
            'num_of_epochs': 100,
90
            'train_batch_size' : 128, 'val_batch_size' : None, 'test_batch_size' : None,
91
            'optimizer' : 'Adam',
92
            'lr' : 0.001, 'lr_param' : {},
93
            'optimizer_params' : [], 'add_optimizer_params' : [],
94
            'optimizer_defaults' : {}, 'add_optimizer_defaults' : {}
95
        }
96

97
        # Optimizer
98
        self.optimizer = None
1✔
99

100
        # Training Losses
101
        self.loss_functions = {}
1✔
102

103
        # Validation Parameters
104
        self.training = {}
1✔
105
        self.performance = {}
1✔
106
        self.prediction = {}
1✔
107

108

109
    def resetSeed(self, seed):
1✔
110
        """
111
        Resets the random seed for reproducibility.
112

113
        This method sets the seed for various random number generators used in the project to ensure reproducibility of results.
114

115
        :param seed: The seed value to be used for the random number generators.
116
        :type seed: int
117

118
        Example:
119
            >>> model = nnodely()
120
            >>> model.resetSeed(42)
121
        """
122
        torch.manual_seed(seed)  ## set the pytorch seed
1✔
123
        torch.cuda.manual_seed_all(seed)
1✔
124
        random.seed(seed)  ## set the random module seed
1✔
125
        np.random.seed(seed)  ## set the numpy seed
1✔
126

127

128
    def __call__(self, inputs = {}, sampled = False, closed_loop = {}, connect = {}, prediction_samples = 'auto', num_of_samples = 'auto'):#, align_input = False):
1✔
129
        ## Copy dict for avoid python bug
130
        inputs = copy.deepcopy(inputs)
1✔
131
        closed_loop = copy.deepcopy(closed_loop)
1✔
132
        connect = copy.deepcopy(connect)
1✔
133

134
        ## Check neuralize
135
        check(self.neuralized, RuntimeError, "The network is not neuralized.")
1✔
136

137
        ## Bild the list of inputs
138
        model_inputs = list(self.model_def['Inputs'].keys())
1✔
139
        model_states = list(self.model_def['States'].keys())
1✔
140
        provided_inputs = list(inputs.keys())
1✔
141
        missing_inputs = list(set(model_inputs) - set(provided_inputs)) #- set(connect.keys()))
1✔
142
        extra_inputs = list(set(provided_inputs) - set(model_inputs) - set(model_states))
1✔
143
        if not set(provided_inputs).issubset(set(model_inputs) | set(model_states)):
1✔
144
            ## Ignoring extra inputs
145
            log.warning(f'The complete model inputs are {model_inputs}, the provided input are {provided_inputs}. Ignoring {extra_inputs}...')
1✔
146
            for key in extra_inputs:
1✔
147
                del inputs[key]
1✔
148
            provided_inputs = list(inputs.keys())
1✔
149
        non_recurrent_inputs = list(set(provided_inputs) - set(closed_loop.keys()) - set(connect.keys()) - set(model_states))
1✔
150
        recurrent_inputs = set(closed_loop.keys())|set(connect.keys())|set(model_states)
1✔
151

152
        ## Define input windows and check closed loop and connect
153
        input_windows = {}
1✔
154
        for in_var, out_var in (closed_loop.items() | connect.items()):
1✔
155
            check(in_var in self.model_def['Inputs'], ValueError, f'the tag {in_var} is not an input variable.')
1✔
156
            check(out_var in self.model_def['Outputs'], ValueError, f'the tag {out_var} is not an output of the network')
1✔
157
            if in_var in inputs.keys():
1✔
158
                input_windows[in_var] = len(inputs[in_var]) if sampled else len(inputs[in_var]) - self.input_n_samples[in_var] + 1
1✔
159
            else:
160
                input_windows[in_var] = 1
1✔
161
        for key in model_states:
1✔
162
            if key in inputs.keys():
1✔
163
                input_windows[key] = len(inputs[key]) if sampled else len(inputs[key]) - self.input_n_samples[key] + 1
1✔
164
            else:
165
                input_windows[key] = 1
1✔
166

167
        ## Determine the Maximal number of samples that can be created
168
        if non_recurrent_inputs:
1✔
169
            if sampled:
1✔
170
                min_dim_ind, min_dim  = argmin_min([len(inputs[key]) for key in non_recurrent_inputs])
1✔
171
                max_dim_ind, max_dim = argmax_max([len(inputs[key]) for key in non_recurrent_inputs])
1✔
172
            else:
173
                min_dim_ind, min_dim = argmin_min([len(inputs[key])-self.input_n_samples[key]+1 for key in non_recurrent_inputs])
1✔
174
                max_dim_ind, max_dim  = argmax_max([len(inputs[key])-self.input_n_samples[key]+1 for key in non_recurrent_inputs])
1✔
175
            min_din_key = non_recurrent_inputs[min_dim_ind]
1✔
176
            max_din_key = non_recurrent_inputs[max_dim_ind]
1✔
177
        else:
178
            if recurrent_inputs:
1✔
179
                #ps = 0 if prediction_samples=='auto' or prediction_samples is None else prediction_samples
180
                if provided_inputs:
1✔
181
                    min_dim_ind, min_dim = argmin_min([input_windows[key]  for key in provided_inputs])
1✔
182
                    max_dim_ind, max_dim = argmax_max([input_windows[key]  for key in provided_inputs])
1✔
183
                    min_din_key = provided_inputs[min_dim_ind]
1✔
184
                    max_din_key = provided_inputs[max_dim_ind]
1✔
185
                else:
186
                    min_dim = max_dim =  1
1✔
187
            else:
188
                min_dim = max_dim = 0
1✔
189

190
        ## Define the number of samples
191
        if num_of_samples != 'auto':
1✔
192
            window_dim = min_dim = max_dim = num_of_samples
1✔
193
        else:
194
            # Use the minimum number of input samples if the net is not autonoma otherwise the minimum number of state samples
195
            window_dim = min_dim
1✔
196
        check(window_dim > 0, StopIteration, f'Missing at least {abs(min_dim)+1} samples in the input window')
1✔
197

198
        ## Autofill the missing inputs
199
        if missing_inputs:
1✔
200
            log.warning(f'Inputs not provided: {missing_inputs}. Autofilling with zeros..')
1✔
201
            for key in missing_inputs:
1✔
202
                inputs[key] = np.zeros(
1✔
203
                    shape=(self.input_n_samples[key] + window_dim - 1, self.model_def['Inputs'][key]['dim']),
204
                    dtype=np.float32).tolist()
205

206
        n_samples_input = {}
1✔
207
        for key in inputs.keys():
1✔
208
            if key in missing_inputs:
1✔
209
                n_samples_input[key] = 1
1✔
210
            else:
211
                n_samples_input[key] = len(inputs[key]) if sampled else len(inputs[key]) - self.input_n_samples[key] + 1
1✔
212

213
        # Vettore di input
214
        if num_of_samples != 'auto':
1✔
215
            for key in inputs.keys():
1✔
216
                if key in model_inputs:
1✔
217
                    input_dim = self.model_def['Inputs'][key]['dim']
1✔
218
                elif key in model_states:
1✔
219
                    input_dim = self.model_def['States'][key]['dim']
1✔
220
                if input_dim > 1:
1✔
221
                    inputs[key] += [[0 for val in range(input_dim)] for val in
1✔
222
                                    range(num_of_samples - (len(inputs[key]) - self.input_n_samples[key] + 1))]
223
                else:
224
                    inputs[key] += [0 for val in range(num_of_samples - (len(inputs[key]) - self.input_n_samples[key] + 1))]
1✔
225
                #n_samples_input[key] = num_of_samples
226

227
        ## Warning the users about different time windows between samples
228
        if min_dim != max_dim:
1✔
229
            log.warning(f'Different number of samples between inputs [MAX {max_din_key} = {max_dim}; MIN {min_din_key} = {min_dim}]')
1✔
230

231
        result_dict = {} ## initialize the resulting dictionary
1✔
232
        for key in self.model_def['Outputs'].keys():
1✔
233
            result_dict[key] = []
1✔
234

235
        ## Initialize the state variables
236
        if prediction_samples == None:
1✔
237
            # If the prediction sample is None the connection are removed
238
            self.model.init_states({}, connect = connect)
1✔
239
        else:
240
            self.model.init_states(self.model_def['States'], connect = connect, reset_states = False)
1✔
241

242
        ## Cycle through all the samples provided
243
        with torch.inference_mode():
1✔
244
            X = {}
1✔
245
            for i in range(window_dim):
1✔
246
                for key, val in inputs.items():
1✔
247
                    # If the prediction sample is None take the input
248
                    # If the prediction sample is auto and the sample is less than the available samples take the input
249
                    # Every prediction sample take the input
250
                    # Otherwise if the key is a state or a connect or a closed_loop variable keep the same input
251
                    # If the key is a state or connect input remove the input
252
                    if not (prediction_samples is None \
1✔
253
                        or ((prediction_samples is not None and prediction_samples != 'auto') and i % (prediction_samples + 1) == 0) \
254
                        or (prediction_samples == 'auto' and i < n_samples_input[key])):
255
                        if key in (closed_loop|connect).keys() or key in model_states:
1✔
256
                            if (key in model_states or key in connect.keys()) and key in X.keys():
1✔
257
                                del X[key]
1✔
258
                            continue
1✔
259
                    X[key] = torch.from_numpy(np.array(val[i])).to(torch.float32) if sampled else torch.from_numpy(
1✔
260
                            np.array(val[i:i + self.input_n_samples[key]])).to(torch.float32)
261

262
                    if key in model_inputs:
1✔
263
                        input_dim = self.model_def['Inputs'][key]['dim']
1✔
264
                    elif key in model_states:
1✔
265
                        input_dim = self.model_def['States'][key]['dim']
1✔
266

267
                    if input_dim > 1:
1✔
268
                        check(len(X[key].shape) == 2, ValueError,
1✔
269
                                f'The input {key} must have two dimensions')
270
                        check(X[key].shape[1] == input_dim, ValueError,
1✔
271
                                f'The second dimension of the input "{key}" must be equal to {input_dim}')
272

273
                    if input_dim == 1 and X[key].shape[-1] != 1: ## add the input dimension
1✔
274
                        X[key] = X[key].unsqueeze(-1)
1✔
275
                    if X[key].ndim <= 1: ## add the batch dimension
1✔
276
                        X[key] = X[key].unsqueeze(0)
1✔
277
                    if X[key].ndim <= 2: ## add the time dimension
1✔
278
                        X[key] = X[key].unsqueeze(0)
1✔
279

280
                ## Reset the state variable
281
                if  prediction_samples is None:
1✔
282
                    ## If prediction sample is None the state is reset every step
283
                    self.model.reset_states(X, only=False)
1✔
284
                    self.model.reset_connect_variables(connect, X, only=False)
1✔
285
                elif prediction_samples == 'auto':
1✔
286
                    ## If prediction sample is auto is reset with the available samples
287
                    self.model.reset_states(X)
1✔
288
                    self.model.reset_connect_variables(connect, X)
1✔
289
                else:
290
                    ## Otherwise the variable are reset every prediction samples
291
                    if i%(prediction_samples+1) == 0:
1✔
292
                        self.model.reset_states(X, only=False)
1✔
293
                        self.model.reset_connect_variables(connect, X, only=False)
1✔
294

295
                result, _ = self.model(X)
1✔
296

297
                ## Update the recurrent variable
298
                for close_in, out_var in closed_loop.items():
1✔
299
                    #if i >= input_windows[close_in]-1:
300
                    shift = result[out_var].shape[1]  ## take the output time dimension
1✔
301
                    X[close_in] = torch.roll(X[close_in], shifts=-1, dims=1) ## Roll the time window
1✔
302
                    X[close_in][:, -shift:, :] = result[out_var] ## substitute with the predicted value
1✔
303

304
                ## Append the prediction of the current sample to the result dictionary
305
                for key in self.model_def['Outputs'].keys():
1✔
306
                    if result[key].shape[-1] == 1:
1✔
307
                        result[key] = result[key].squeeze(-1)
1✔
308
                        if result[key].shape[-1] == 1:
1✔
309
                            result[key] = result[key].squeeze(-1)
1✔
310
                    result_dict[key].append(result[key].detach().squeeze(dim=0).tolist())
1✔
311

312
        return result_dict
1✔
313

314
    def getSamples(self, dataset, index = None, window=1):
1✔
315
        if index is None:
×
316
            index = random.randint(0, self.num_of_samples[dataset] - window)
×
317
        check(self.data_loaded, ValueError, 'The Dataset must first be loaded using <loadData> function!')
×
318
        if self.data_loaded:
×
319
            result_dict = {}
×
320
            for key in (self.model_def['Inputs'].keys() | self.model_def['States'].keys()):
×
321
                result_dict[key] = []
×
322
            for idx in range(window):
×
323
                for key ,samples in self.data[dataset].items():
×
324
                    if key in (self.model_def['Inputs'].keys() | self.model_def['States'].keys()):
×
325
                        result_dict[key].append(samples[index+idx])
×
326
            return result_dict
×
327

328
    def addConnect(self, stream_out, state_list_in):
1✔
329
        self.model_def.addConnect(stream_out, state_list_in)
1✔
330

331
    def addClosedLoop(self, stream_out, state_list_in):
1✔
332
        self.model_def.addClosedLoop(stream_out, state_list_in)
1✔
333

334
    def addModel(self, name, stream_list):
1✔
335
        self.model_def.addModel(name, stream_list)
1✔
336

337
    def removeModel(self, name_list):
1✔
338
        self.model_def.removeModel(name_list)
×
339

340
    def addMinimize(self, name, streamA, streamB, loss_function='mse'):
1✔
341
        self.model_def.addMinimize(name, streamA, streamB, loss_function)
1✔
342
        self.visualizer.showaddMinimize(name)
1✔
343

344
    def removeMinimize(self, name_list):
1✔
345
        self.model_def.removeMinimize(name_list)
1✔
346

347
    def neuralizeModel(self, sample_time = None, clear_model = False, model_def = None):
1✔
348
        if model_def is not None:
1✔
349
            check(sample_time == None, ValueError, 'The sample_time must be None if a model_def is provided')
1✔
350
            check(clear_model == False, ValueError, 'The clear_model must be False if a model_def is provided')
1✔
351
            self.model_def = ModelDef(model_def)
1✔
352
        else:
353
            if clear_model:
1✔
354
                self.model_def.update()
1✔
355
            else:
356
                self.model_def.updateParameters(self.model)
1✔
357

358
        self.model_def.setBuildWindow(sample_time)
1✔
359
        self.model = Model(self.model_def.json)
1✔
360

361
        input_ns_backward = {key:value['ns'][0] for key, value in (self.model_def['Inputs']|self.model_def['States']).items()}
1✔
362
        input_ns_forward = {key:value['ns'][1] for key, value in (self.model_def['Inputs']|self.model_def['States']).items()}
1✔
363
        self.input_n_samples = {}
1✔
364
        for key, value in (self.model_def['Inputs'] | self.model_def['States']).items():
1✔
365
            self.input_n_samples[key] = input_ns_backward[key] + input_ns_forward[key]
1✔
366
        self.max_n_samples = max(input_ns_backward.values()) + max(input_ns_forward.values())
1✔
367

368
        self.neuralized = True
1✔
369
        self.traced = False
1✔
370
        self.visualizer.showModel(self.model_def.json)
1✔
371
        self.visualizer.showModelInputWindow()
1✔
372
        self.visualizer.showBuiltModel()
1✔
373

374
    def loadData(self, name, source, format=None, skiplines=0, delimiter=',', header=None):
1✔
375
        check(self.neuralized, ValueError, "The network is not neuralized.")
1✔
376
        check(delimiter in ['\t', '\n', ';', ',', ' '], ValueError, 'delimiter not valid!')
1✔
377

378
        json_inputs = self.model_def['Inputs'] | self.model_def['States']
1✔
379
        model_inputs = list(json_inputs.keys())
1✔
380
        ## Initialize the dictionary containing the data
381
        if name in list(self.data.keys()):
1✔
382
            log.warning(f'Dataset named {name} already loaded! overriding the existing one..')
1✔
383
        self.data[name] = {}
1✔
384

385
        input_ns_backward = {key:value['ns'][0] for key, value in json_inputs.items()}
1✔
386
        input_ns_forward = {key:value['ns'][1] for key, value in json_inputs.items()}
1✔
387
        max_samples_backward = max(input_ns_backward.values())
1✔
388
        max_samples_forward = max(input_ns_forward.values())
1✔
389
        max_n_samples = max_samples_backward + max_samples_forward
1✔
390

391
        num_of_samples = {}
1✔
392
        if type(source) is str: ## we have a directory path containing the files
1✔
393
            ## collect column indexes
394
            format_idx = {}
1✔
395
            idx = 0
1✔
396
            for item in format:
1✔
397
                if isinstance(item, tuple):
1✔
398
                    for key in item:
×
399
                        if key not in model_inputs:
×
400
                            idx += 1
×
401
                            break
×
402
                        n_cols = json_inputs[key]['dim']
×
403
                        format_idx[key] = (idx, idx+n_cols)
×
404
                    idx += n_cols
×
405
                else:
406
                    if item not in model_inputs:
1✔
407
                        idx += 1
1✔
408
                        continue
1✔
409
                    n_cols = json_inputs[item]['dim']
1✔
410
                    format_idx[item] = (idx, idx+n_cols)
1✔
411
                    idx += n_cols
1✔
412

413
            ## Initialize each input key
414
            for key in format_idx.keys():
1✔
415
                self.data[name][key] = []
1✔
416

417
            ## obtain the file names
418
            try:
1✔
419
                _,_,files = next(os.walk(source))
1✔
420
                files.sort()
1✔
421
            except StopIteration as e:
×
422
                check(False,StopIteration, f'ERROR: The path "{source}" does not exist!')
×
UNCOV
423
                return
×
424
            self.file_count = len(files)
1✔
425

426
            ## Cycle through all the files
427
            for file in files:
1✔
428
                try:
1✔
429
                    ## read the csv
430
                    df = pd.read_csv(os.path.join(source,file), skiprows=skiplines, delimiter=delimiter, header=header)
1✔
431
                except:
×
432
                    log.warning(f'Cannot read file {os.path.join(source,file)}')
×
UNCOV
433
                    continue
×
434
                ## Cycle through all the windows
435
                for key, idxs in format_idx.items():
1✔
436
                    back, forw = input_ns_backward[key], input_ns_forward[key]
1✔
437
                    ## Save as numpy array the data
438
                    data = df.iloc[:, idxs[0]:idxs[1]].to_numpy()
1✔
439
                    self.data[name][key] += [data[i-back:i+forw] for i in range(max_samples_backward, len(df)-max_samples_forward+1)]
1✔
440

441
            ## Stack the files
442
            for key in format_idx.keys():
1✔
443
                self.data[name][key] = np.stack(self.data[name][key])
1✔
444
                num_of_samples[key] = self.data[name][key].shape[0]
1✔
445

446
        elif type(source) is dict:  ## we have a crafted dataset
1✔
447
            self.file_count = 1
1✔
448

449
            ## Check if the inputs are correct
450
            #assert set(model_inputs).issubset(source.keys()), f'The dataset is missing some inputs. Inputs needed for the model: {model_inputs}'
451

452
            # Merge a list of
453
            for key in model_inputs:
1✔
454
                if key not in source.keys():
1✔
455
                    continue
1✔
456

457
                self.data[name][key] = []  ## Initialize the dataset
1✔
458

459
                back, forw = input_ns_backward[key], input_ns_forward[key]
1✔
460
                for idx in range(len(source[key]) - max_n_samples+1):
1✔
461
                    self.data[name][key].append(source[key][idx + (max_samples_backward - back):idx + (max_samples_backward + forw)])
1✔
462

463
            ## Stack the files
464
            for key in model_inputs:
1✔
465
                if key not in source.keys():
1✔
466
                    continue
1✔
467
                self.data[name][key] = np.stack(self.data[name][key])
1✔
468
                if self.data[name][key].ndim == 2: ## Add the sample dimension
1✔
469
                    self.data[name][key] = np.expand_dims(self.data[name][key], axis=-1)
1✔
470
                if self.data[name][key].ndim > 3:
1✔
UNCOV
471
                    self.data[name][key] = np.squeeze(self.data[name][key], axis=1)
×
472
                num_of_samples[key] = self.data[name][key].shape[0]
1✔
473

474
        # Check dim of the samples
475
        check(len(set(num_of_samples.values())) == 1, ValueError,
1✔
476
              f"The number of the sample of the dataset {name} are not the same for all input in the dataset: {num_of_samples}")
477
        self.num_of_samples[name] = num_of_samples[list(num_of_samples.keys())[0]]
1✔
478

479
        ## Set the Loaded flag to True
480
        self.data_loaded = True
1✔
481
        ## Update the number of datasets loaded
482
        self.n_datasets = len(self.data.keys())
1✔
483
        self.datasets_loaded.add(name)
1✔
484
        ## Show the dataset
485
        self.visualizer.showDataset(name=name)
1✔
486

487
    def filterData(self, filter_function, dataset_name = None):
1✔
488
        idx_to_remove = []
×
489
        if dataset_name is None:
×
490
            for name in self.data.keys():
×
491
                dataset = self.data[name]
×
UNCOV
492
                n_samples = len(dataset[list(dataset.keys())[0]])
×
493

494
                data_for_filter = []
×
495
                for i in range(n_samples):
×
496
                    new_sample = {key: val[i] for key, val in dataset.items()}
×
UNCOV
497
                    data_for_filter.append(new_sample)
×
498

499
                for idx, sample in enumerate(data_for_filter):
×
500
                    if not filter_function(sample):
×
UNCOV
501
                        idx_to_remove.append(idx)
×
502

503
                for key in self.data[name].keys():
×
504
                    self.data[name][key] = np.delete(self.data[name][key], idx_to_remove, axis=0)
×
505
                    self.num_of_samples[name] = self.data[name][key].shape[0]
×
UNCOV
506
                self.visualizer.showDataset(name=name)
×
507

508
        else:
509
            dataset = self.data[dataset_name]
×
UNCOV
510
            n_samples = len(dataset[list(dataset.keys())[0]])
×
511

512
            data_for_filter = []
×
513
            for i in range(n_samples):
×
514
                new_sample = {key: val[i] for key, val in dataset.items()}
×
UNCOV
515
                data_for_filter.append(new_sample)
×
516

517
            for idx, sample in enumerate(data_for_filter):
×
518
                if not filter_function(sample):
×
UNCOV
519
                    idx_to_remove.append(idx)
×
520

521
            for key in self.data[dataset_name].keys():
×
522
                self.data[dataset_name][key] = np.delete(self.data[dataset_name][key], idx_to_remove, axis=0)
×
523
                self.num_of_samples[dataset_name] = self.data[dataset_name][key].shape[0]
×
UNCOV
524
            self.visualizer.showDataset(name=dataset_name)
×
525

526
    def resetStates(self, values = None, only = True):
1✔
527
        self.model.init_states(self.model_def['States'], reset_states=False)
1✔
528
        self.model.reset_states(values, only)
1✔
529

530
    def __save_internal(self, key, value):
1✔
531
        self.internals[key] = tensor_to_list(value)
1✔
532

533
    def __get_train_parameters(self, training_params):
1✔
534
        run_train_parameters = copy.deepcopy(self.standard_train_parameters)
1✔
535
        if training_params is None:
1✔
536
            return run_train_parameters
1✔
537
        for key, value in training_params.items():
1✔
538
            check(key in run_train_parameters, KeyError, f"The param {key} is not exist as standard parameters")
1✔
539
            run_train_parameters[key] = value
1✔
540
        return run_train_parameters
1✔
541

542
    def __get_parameter(self, **parameter):
1✔
543
        assert len(parameter) == 1
1✔
544
        name = list(parameter.keys())[0]
1✔
545
        self.run_training_params[name] =  parameter[name] if parameter[name] is not None else self.run_training_params[name]
1✔
546
        return self.run_training_params[name]
1✔
547

548
    def __get_batch_sizes(self, train_batch_size, val_batch_size, test_batch_size):
1✔
549
        ## Check if the batch_size can be used for the current dataset, otherwise set the batch_size to the maximum value
550
        self.__get_parameter(train_batch_size = train_batch_size)
1✔
551
        self.__get_parameter(val_batch_size = val_batch_size)
1✔
552
        self.__get_parameter(test_batch_size = test_batch_size)
1✔
553

554
        if self.run_training_params['recurrent_train']:
1✔
555
            if self.run_training_params['train_batch_size'] > self.run_training_params['n_samples_train']:
1✔
556
                self.run_training_params['train_batch_size'] = self.run_training_params['n_samples_train'] - self.run_training_params['prediction_samples']
1✔
557
            if self.run_training_params['val_batch_size'] is None or self.run_training_params['val_batch_size'] > self.run_training_params['n_samples_val']:
1✔
558
                self.run_training_params['val_batch_size'] = max(0,self.run_training_params['n_samples_val'] - self.run_training_params['prediction_samples'])
1✔
559
            if self.run_training_params['test_batch_size'] is None or self.run_training_params['test_batch_size'] > self.run_training_params['n_samples_test']:
1✔
560
                self.run_training_params['test_batch_size'] = max(0,self.run_training_params['n_samples_test'] - self.run_training_params['prediction_samples'])
1✔
561
        else:
562
            if self.run_training_params['train_batch_size'] > self.run_training_params['n_samples_train']:
1✔
563
                self.run_training_params['train_batch_size'] = self.run_training_params['n_samples_train']
1✔
564
            if self.run_training_params['val_batch_size'] is None or self.run_training_params['val_batch_size'] > self.run_training_params['n_samples_val']:
1✔
565
                self.run_training_params['val_batch_size'] = self.run_training_params['n_samples_val']
1✔
566
            if self.run_training_params['test_batch_size'] is None or self.run_training_params['test_batch_size'] > self.run_training_params['n_samples_test']:
1✔
567
                self.run_training_params['test_batch_size'] = self.run_training_params['n_samples_test']
1✔
568

569
        check(self.run_training_params['train_batch_size'] > 0, ValueError, f'The auto train_batch_size ({self.run_training_params["train_batch_size"] }) = n_samples_train ({self.run_training_params["n_samples_train"]}) - prediction_samples ({self.run_training_params["prediction_samples"]}), must be greater than 0.')
1✔
570

571
        return self.run_training_params['train_batch_size'], self.run_training_params['val_batch_size'], self.run_training_params['test_batch_size']
1✔
572

573
    def __inizilize_optimizer(self, optimizer, optimizer_params, optimizer_defaults, add_optimizer_params, add_optimizer_defaults, models, lr, lr_param):
1✔
574
        # Get optimizer and initialization parameters
575
        optimizer = copy.deepcopy(self.__get_parameter(optimizer=optimizer))
1✔
576
        optimizer_params = copy.deepcopy(self.__get_parameter(optimizer_params=optimizer_params))
1✔
577
        optimizer_defaults = copy.deepcopy(self.__get_parameter(optimizer_defaults=optimizer_defaults))
1✔
578
        add_optimizer_params = copy.deepcopy(self.__get_parameter(add_optimizer_params=add_optimizer_params))
1✔
579
        add_optimizer_defaults = copy.deepcopy(self.__get_parameter(add_optimizer_defaults=add_optimizer_defaults))
1✔
580

581
        ## Get parameter to be trained
582
        json_models = []
1✔
583
        models = self.__get_parameter(models=models)
1✔
584
        if 'Models' in self.model_def:
1✔
585
            json_models = list(self.model_def['Models'].keys()) if type(self.model_def['Models']) is dict else [self.model_def['Models']]
1✔
586
        if models is None:
1✔
587
            models = json_models
1✔
588
        self.run_training_params['models'] = models
1✔
589
        params_to_train = set()
1✔
590
        if isinstance(models, str):
1✔
591
            models = [models]
1✔
592
        for model in models:
1✔
593
            check(model in json_models, ValueError, f'The model {model} is not in the model definition')
1✔
594
            if type(self.model_def['Models']) is dict:
1✔
595
                params_to_train |= set(self.model_def['Models'][model]['Parameters'])
1✔
596
            else:
597
                params_to_train |= set(self.model_def['Parameters'].keys())
1✔
598

599
        # Get the optimizer
600
        if type(optimizer) is str:
1✔
601
            if optimizer == 'SGD':
1✔
602
                optimizer = SGD({},[])
1✔
603
            elif optimizer == 'Adam':
1✔
604
                optimizer = Adam({},[])
1✔
605
        else:
606
            check(issubclass(type(optimizer), Optimizer), TypeError,
1✔
607
                  "The optimizer must be an Optimizer or str")
608

609
        optimizer.set_params_to_train(self.model.all_parameters, params_to_train)
1✔
610

611
        optimizer.add_defaults('lr', self.run_training_params['lr'])
1✔
612
        optimizer.add_option_to_params('lr', self.run_training_params['lr_param'])
1✔
613

614
        if optimizer_defaults != {}:
1✔
615
            optimizer.set_defaults(optimizer_defaults)
1✔
616
        if optimizer_params != []:
1✔
617
            optimizer.set_params(optimizer_params)
1✔
618

619
        for key, value in add_optimizer_defaults.items():
1✔
620
            optimizer.add_defaults(key, value)
1✔
621

622
        add_optimizer_params = optimizer.unfold(add_optimizer_params)
1✔
623
        for param in add_optimizer_params:
1✔
624
            par = param['params']
1✔
625
            del param['params']
1✔
626
            for key, value in param.items():
1✔
627
                optimizer.add_option_to_params(key, {par:value})
1✔
628

629
        # Modify the parameter
630
        optimizer.add_defaults('lr', lr)
1✔
631
        optimizer.add_option_to_params('lr', lr_param)
1✔
632

633
        return optimizer
1✔
634

635
    def trainModel(self,
1✔
636
                    models=None,
637
                    train_dataset = None, validation_dataset = None, test_dataset = None, splits = None,
638
                    closed_loop = None, connect = None, step = None, prediction_samples = None,
639
                    shuffle_data = None,
640
                    early_stopping = None, early_stopping_params = None,
641
                    select_model = None, select_model_params = None,
642
                    minimize_gain = None,
643
                    num_of_epochs = None,
644
                    train_batch_size = None, val_batch_size = None, test_batch_size = None,
645
                    optimizer = None,
646
                    lr = None, lr_param = None,
647
                    optimizer_params = None, optimizer_defaults = None,
648
                    training_params = None,
649
                    add_optimizer_params = None, add_optimizer_defaults = None
650
                   ):
651

652
        check(self.data_loaded, RuntimeError, 'There is no data loaded! The Training will stop.')
1✔
653
        check(list(self.model.parameters()), RuntimeError, 'There are no modules with learnable parameters! The Training will stop.')
1✔
654

655
        ## Get running parameter from dict
656
        self.run_training_params = copy.deepcopy(self.__get_train_parameters(training_params))
1✔
657

658
        ## Get connect and closed_loop
659
        prediction_samples = self.__get_parameter(prediction_samples = prediction_samples)
1✔
660
        check(prediction_samples >= 0, KeyError, 'The sample horizon must be positive!')
1✔
661

662
        ## Check close loop and connect
663
        step = self.__get_parameter(step = step)
1✔
664
        closed_loop = self.__get_parameter(closed_loop = closed_loop)
1✔
665
        connect = self.__get_parameter(connect = connect)
1✔
666
        recurrent_train = True
1✔
667
        if closed_loop:
1✔
668
            for input, output in closed_loop.items():
1✔
669
                check(input in self.model_def['Inputs'], ValueError, f'the tag {input} is not an input variable.')
1✔
670
                check(output in self.model_def['Outputs'], ValueError, f'the tag {output} is not an output of the network')
1✔
671
                log.warning(f'Recurrent train: closing the loop between the the input ports {input} and the output ports {output} for {prediction_samples} samples')
1✔
672
        elif connect:
1✔
673
            for connect_in, connect_out in connect.items():
1✔
674
                check(connect_in in self.model_def['Inputs'], ValueError, f'the tag {connect_in} is not an input variable.')
1✔
675
                check(connect_out in self.model_def['Outputs'], ValueError, f'the tag {connect_out} is not an output of the network')
1✔
676
                log.warning(f'Recurrent train: connecting the input ports {connect_in} with output ports {connect_out} for {prediction_samples} samples')
1✔
677
        elif self.model_def['States']: ## if we have state variables we have to do the recurrent train
1✔
678
            log.warning(f"Recurrent train: update States variables {list(self.model_def['States'].keys())} for {prediction_samples} samples")
1✔
679
        else:
680
            if prediction_samples != 0:
1✔
681
                log.warning(
1✔
682
                    f"The value of the prediction_samples={prediction_samples} is not used in not recursive network.")
683
            recurrent_train = False
1✔
684
        self.run_training_params['recurrent_train'] = recurrent_train
1✔
685

686
        ## Get early stopping
687
        early_stopping = self.__get_parameter(early_stopping = early_stopping)
1✔
688
        if early_stopping:
1✔
UNCOV
689
            self.run_training_params['early_stopping'] = early_stopping.__name__
×
690
        early_stopping_params = self.__get_parameter(early_stopping_params = early_stopping_params)
1✔
691

692
        ## Get dataset for training
693
        shuffle_data = self.__get_parameter(shuffle_data = shuffle_data)
1✔
694

695
        ## Get the dataset name
696
        train_dataset = self.__get_parameter(train_dataset = train_dataset)
1✔
697
        #TODO manage multiple datasets
698
        if train_dataset is None: ## If we use all datasets with the splits
1✔
699
            splits = self.__get_parameter(splits = splits)
1✔
700
            check(len(splits)==3, ValueError, '3 elements must be inserted for the dataset split in training, validation and test')
1✔
701
            check(sum(splits)==100, ValueError, 'Training, Validation and Test splits must sum up to 100.')
1✔
702
            check(splits[0] > 0, ValueError, 'The training split cannot be zero.')
1✔
703

704
            ## Get the dataset name
705
            dataset = list(self.data.keys())[0] ## take the dataset name
1✔
706

707
            ## Collect the split sizes
708
            train_size = splits[0] / 100.0
1✔
709
            val_size = splits[1] / 100.0
1✔
710
            test_size = 1 - (train_size + val_size)
1✔
711
            num_of_samples = self.num_of_samples[dataset]
1✔
712
            n_samples_train = round(num_of_samples*train_size)
1✔
713
            n_samples_val = round(num_of_samples*val_size)
1✔
714
            n_samples_test = round(num_of_samples*test_size)
1✔
715

716
            ## Split into train, validation and test
717
            XY_train, XY_val, XY_test = {}, {}, {}
1✔
718
            for key, samples in self.data[dataset].items():
1✔
719
                if val_size == 0.0 and test_size == 0.0: ## we have only training set
1✔
720
                    XY_train[key] = torch.from_numpy(samples).to(torch.float32)
1✔
721
                elif val_size == 0.0 and test_size != 0.0: ## we have only training and test set
1✔
722
                    XY_train[key] = torch.from_numpy(samples[:n_samples_train]).to(torch.float32)
1✔
723
                    XY_test[key] = torch.from_numpy(samples[n_samples_train:]).to(torch.float32)
1✔
724
                elif val_size != 0.0 and test_size == 0.0: ## we have only training and validation set
1✔
725
                    XY_train[key] = torch.from_numpy(samples[:n_samples_train]).to(torch.float32)
1✔
726
                    XY_val[key] = torch.from_numpy(samples[n_samples_train:]).to(torch.float32)
1✔
727
                else: ## we have training, validation and test set
728
                    XY_train[key] = torch.from_numpy(samples[:n_samples_train]).to(torch.float32)
1✔
729
                    XY_val[key] = torch.from_numpy(samples[n_samples_train:-n_samples_test]).to(torch.float32)
1✔
730
                    XY_test[key] = torch.from_numpy(samples[n_samples_train+n_samples_val:]).to(torch.float32)
1✔
731

732
            ## Set name for resultsAnalysis
733
            train_dataset = self.__get_parameter(train_dataset = f"train_{dataset}_{train_size:0.2f}")
1✔
734
            validation_dataset = self.__get_parameter(validation_dataset =f"validation_{dataset}_{val_size:0.2f}")
1✔
735
            test_dataset = self.__get_parameter(test_dataset = f"test_{dataset}_{test_size:0.2f}")
1✔
736
        else: ## Multi-Dataset
737
            ## Get the names of the datasets
738
            datasets = list(self.data.keys())
1✔
739
            validation_dataset = self.__get_parameter(validation_dataset=validation_dataset)
1✔
740
            test_dataset = self.__get_parameter(test_dataset=test_dataset)
1✔
741

742
            ## Collect the number of samples for each dataset
743
            n_samples_train, n_samples_val, n_samples_test = 0, 0, 0
1✔
744

745
            check(train_dataset in datasets, KeyError, f'{train_dataset} Not Loaded!')
1✔
746
            if validation_dataset is not None and validation_dataset not in datasets:
1✔
UNCOV
747
                log.warning(f'Validation Dataset [{validation_dataset}] Not Loaded. The training will continue without validation')
×
748
            if test_dataset is not None and test_dataset not in datasets:
1✔
UNCOV
749
                log.warning(f'Test Dataset [{test_dataset}] Not Loaded. The training will continue without test')
×
750

751
            ## Split into train, validation and test
752
            XY_train, XY_val, XY_test = {}, {}, {}
1✔
753
            n_samples_train = self.num_of_samples[train_dataset]
1✔
754
            XY_train = {key: torch.from_numpy(val).to(torch.float32) for key, val in self.data[train_dataset].items()}
1✔
755
            if validation_dataset in datasets:
1✔
756
                n_samples_val = self.num_of_samples[validation_dataset]
1✔
757
                XY_val = {key: torch.from_numpy(val).to(torch.float32) for key, val in self.data[validation_dataset].items()}
1✔
758
            if test_dataset in datasets:
1✔
759
                n_samples_test = self.num_of_samples[test_dataset]
1✔
760
                XY_test = {key: torch.from_numpy(val).to(torch.float32) for key, val in self.data[test_dataset].items()}
1✔
761

762
        for key in XY_train.keys():
1✔
763
            assert n_samples_train == XY_train[key].shape[0], f'The number of train samples {n_samples_train}!={XY_train[key].shape[0]} not compliant.'
1✔
764
            if key in XY_val:
1✔
765
                assert n_samples_val == XY_val[key].shape[0], f'The number of val samples {n_samples_val}!={XY_val[key].shape[0]} not compliant.'
1✔
766
            if key in XY_test:
1✔
767
                assert n_samples_test == XY_test[key].shape[0], f'The number of test samples {n_samples_test}!={XY_test[key].shape[0]} not compliant.'
1✔
768

769
        assert n_samples_train > 0, f'There are {n_samples_train} samples for training.'
1✔
770
        self.run_training_params['n_samples_train'] = n_samples_train
1✔
771
        self.run_training_params['n_samples_val'] = n_samples_val
1✔
772
        self.run_training_params['n_samples_test'] = n_samples_test
1✔
773
        train_batch_size, val_batch_size, test_batch_size = self.__get_batch_sizes(train_batch_size, val_batch_size, test_batch_size)
1✔
774

775
        ## Define the optimizer
776
        optimizer = self.__inizilize_optimizer(optimizer, optimizer_params, optimizer_defaults, add_optimizer_params, add_optimizer_defaults, models, lr, lr_param)
1✔
777
        self.run_training_params['optimizer'] = optimizer.name
1✔
778
        self.run_training_params['optimizer_params'] = optimizer.optimizer_params
1✔
779
        self.run_training_params['optimizer_defaults'] = optimizer.optimizer_defaults
1✔
780
        self.optimizer = optimizer.get_torch_optimizer()
1✔
781

782
        ## Get num_of_epochs
783
        num_of_epochs = self.__get_parameter(num_of_epochs = num_of_epochs)
1✔
784

785
        ## Define the loss functions
786
        minimize_gain = self.__get_parameter(minimize_gain = minimize_gain)
1✔
787
        self.run_training_params['minimizers'] = {}
1✔
788
        for name, values in self.model_def['Minimizers'].items():
1✔
789
            self.loss_functions[name] = CustomLoss(values['loss'])
1✔
790
            self.run_training_params['minimizers'][name] = {}
1✔
791
            self.run_training_params['minimizers'][name]['A'] = values['A']
1✔
792
            self.run_training_params['minimizers'][name]['B'] = values['B']
1✔
793
            self.run_training_params['minimizers'][name]['loss'] = values['loss']
1✔
794
            if name in minimize_gain:
1✔
795
                self.run_training_params['minimizers'][name]['gain'] = minimize_gain[name]
1✔
796

797
        ## Clean the dict of the training parameter
798
        del self.run_training_params['minimize_gain']
1✔
799
        del self.run_training_params['lr']
1✔
800
        del self.run_training_params['lr_param']
1✔
801
        if not recurrent_train:
1✔
802
            del self.run_training_params['connect']
1✔
803
            del self.run_training_params['closed_loop']
1✔
804
            del self.run_training_params['step']
1✔
805
            del self.run_training_params['prediction_samples']
1✔
806
        if early_stopping is None:
1✔
807
            del self.run_training_params['early_stopping']
1✔
808
            del self.run_training_params['early_stopping_params']
1✔
809

810
        ## Create the train, validation and test loss dictionaries
811
        train_losses, val_losses, test_losses = {}, {}, {}
1✔
812
        for key in self.model_def['Minimizers'].keys():
1✔
813
            train_losses[key] = []
1✔
814
            if n_samples_val > 0:
1✔
815
                val_losses[key] = []
1✔
816

817
        ## Check the needed keys are in the datasets
818
        keys = set(self.model_def['Inputs'].keys())
1✔
819
        keys |= {value['A'] for value in self.model_def['Minimizers'].values()}|{value['B'] for value in self.model_def['Minimizers'].values()}
1✔
820
        keys -= set(self.model_def['Relations'].keys())
1✔
821
        keys -= set(self.model_def['States'].keys())
1✔
822
        keys -= set(self.model_def['Outputs'].keys())
1✔
823
        if 'connect' in self.run_training_params:
1✔
824
            keys -= set(self.run_training_params['connect'].keys())
1✔
825
        if 'closed_loop' in self.run_training_params:
1✔
826
            keys -= set(self.run_training_params['closed_loop'].keys())
1✔
827
        check(set(keys).issubset(set(XY_train.keys())), KeyError, f"Not all the mandatory keys {keys} are present in the training dataset {set(XY_train.keys())}.")
1✔
828

829
        # Evaluate the number of update for epochs and the unsued samples
830
        if recurrent_train:
1✔
831
            list_of_batch_indexes = range(0, (n_samples_train - train_batch_size - prediction_samples + 1), (train_batch_size + step - 1))
1✔
832
            check(n_samples_train - train_batch_size - prediction_samples + 1 > 0, ValueError,
1✔
833
                  f"The number of available sample are (n_samples_train ({n_samples_train}) - train_batch_size ({train_batch_size}) - prediction_samples ({prediction_samples}) + 1) = {n_samples_train - train_batch_size - prediction_samples + 1}.")
834
            update_per_epochs = (n_samples_train - train_batch_size - prediction_samples + 1)//(train_batch_size + step - 1) + 1
1✔
835
            unused_samples = n_samples_train - list_of_batch_indexes[-1] - train_batch_size - prediction_samples
1✔
836
        else:
837
            update_per_epochs =  (n_samples_train - train_batch_size)/train_batch_size + 1
1✔
838
            unused_samples = n_samples_train - update_per_epochs * train_batch_size
1✔
839

840
        self.run_training_params['update_per_epochs'] = update_per_epochs
1✔
841
        self.run_training_params['unused_samples'] = unused_samples
1✔
842

843
        ## Select the model
844
        select_model = self.__get_parameter(select_model = select_model)
1✔
845
        select_model_params = self.__get_parameter(select_model_params = select_model_params)
1✔
846
        selected_model_def = ModelDef(self.model_def.json)
1✔
847

848
        ## Show the training parameters
849
        self.visualizer.showTrainParams()
1✔
850

851
        import time
1✔
852
        ## start the train timer
853
        start = time.time()
1✔
854
        self.visualizer.showStartTraining()
1✔
855

856
        for epoch in range(num_of_epochs):
1✔
857
            ## TRAIN
858
            self.model.train()
1✔
859
            if recurrent_train:
1✔
860
                losses = self.__recurrentTrain(XY_train, n_samples_train, train_batch_size, minimize_gain, closed_loop, connect, prediction_samples, step, shuffle=shuffle_data, train=True)
1✔
861
            else:
862
                losses = self.__Train(XY_train,n_samples_train, train_batch_size, minimize_gain, shuffle=shuffle_data, train=True)
1✔
863
            ## save the losses
864
            for ind, key in enumerate(self.model_def['Minimizers'].keys()):
1✔
865
                train_losses[key].append(torch.mean(losses[ind]).tolist())
1✔
866

867
            if n_samples_val > 0:
1✔
868
                ## VALIDATION
869
                self.model.eval()
1✔
870
                if recurrent_train:
1✔
871
                    losses = self.__recurrentTrain(XY_val, n_samples_val, val_batch_size, minimize_gain, closed_loop, connect, prediction_samples, step, shuffle=False, train=False)
1✔
872
                else:
873
                    losses = self.__Train(XY_val, n_samples_val, val_batch_size, minimize_gain, shuffle=False, train=False)
1✔
874
                ## save the losses
875
                for ind, key in enumerate(self.model_def['Minimizers'].keys()):
1✔
876
                    val_losses[key].append(torch.mean(losses[ind]).tolist())
1✔
877

878
            ## Early-stopping
879
            if callable(early_stopping):
1✔
880
                if early_stopping(train_losses, val_losses, early_stopping_params):
×
881
                    log.info(f'Stopping the training at epoch {epoch} due to early stopping.')
×
UNCOV
882
                    break
×
883

884
            if callable(select_model):
1✔
885
                if select_model(train_losses, val_losses, select_model_params):
×
886
                    best_model_epoch = epoch
×
UNCOV
887
                    selected_model_def.updateParameters(self.model)
×
888

889
            ## Visualize the training...
890
            self.visualizer.showTraining(epoch, train_losses, val_losses)
1✔
891
            self.visualizer.showWeightsInTrain(epoch = epoch)
1✔
892

893
        ## Save the training time
894
        end = time.time()
1✔
895
        ## Visualize the training time
896
        for key in self.model_def['Minimizers'].keys():
1✔
897
            self.training[key] = {'train': train_losses[key]}
1✔
898
            if n_samples_val > 0:
1✔
899
                self.training[key]['val'] = val_losses[key]
1✔
900
        self.visualizer.showEndTraining(num_of_epochs-1, train_losses, val_losses)
1✔
901
        self.visualizer.showTrainingTime(end-start)
1✔
902

903
        ## Select the model
904
        if callable(select_model):
1✔
905
            log.info(f'Selected the model at the epoch {best_model_epoch+1}.')
×
UNCOV
906
            self.model = Model(selected_model_def)
×
907
        else:
908
            log.info('The selected model is the LAST model of the training.')
1✔
909

910
        self.resultAnalysis(train_dataset, XY_train, minimize_gain, closed_loop, connect,  prediction_samples, step, train_batch_size)
1✔
911
        if self.run_training_params['n_samples_val'] > 0:
1✔
912
            self.resultAnalysis(validation_dataset, XY_val, minimize_gain, closed_loop, connect,  prediction_samples, step, val_batch_size)
1✔
913
        if self.run_training_params['n_samples_test'] > 0:
1✔
914
            self.resultAnalysis(test_dataset, XY_test, minimize_gain, closed_loop, connect,  prediction_samples, step, test_batch_size)
1✔
915

916
        self.visualizer.showResults()
1✔
917

918
        ## Get trained model from torch and set the model_def
919
        self.model_def.updateParameters(self.model)
1✔
920

921
    def __recurrentTrain(self, data, n_samples, batch_size, loss_gains, closed_loop, connect, prediction_samples, step, shuffle=True, train=True):
1✔
922
        ## Sample Shuffle
923
        initial_value = 0 #random.randint(0, step - 1) if shuffle else 0
1✔
924

925
        n_available_samples = n_samples - batch_size - prediction_samples + 1
1✔
926
        check(n_available_samples > 0, ValueError, f"The number of available sample are (n_samples_train - train_batch_size - prediction_samples + 1) = {n_available_samples}.")
1✔
927
        list_of_batch_indexes = range(initial_value, n_available_samples, (batch_size + step - 1))
1✔
928

929
        ## Initialize the train losses vector
930
        aux_losses = torch.zeros([len(self.model_def['Minimizers']), len(list_of_batch_indexes)])
1✔
931

932
        json_inputs = self.model_def['Inputs'] | self.model_def['States']
1✔
933

934
        ## +1 means that n_samples = 1 - batch_size = 1 - prediction_samples = 1 + 1 = 0 # zero epochs
935
        ## +1 means that n_samples = 2 - batch_size = 1 - prediction_samples = 1 + 1 = 1 # one epochs
936
        for batch_val, idx in enumerate(list_of_batch_indexes):
1✔
937
            if train:
1✔
938
                self.optimizer.zero_grad() ## Reset the gradient
1✔
939

940
            ## Build the input tensor
941
            XY = {key: val[idx:idx+batch_size] for key, val in data.items()}
1✔
942
            # Add missing inputs
943
            for key in closed_loop:
1✔
944
                if key not in XY:
1✔
945
                    XY[key] = torch.zeros([batch_size, json_inputs[key]['ntot'], json_inputs[key]['dim']]).to(torch.float32)
1✔
946

947
            ## collect the horizon labels
948
            XY_horizon = {key: val[idx:idx+batch_size+prediction_samples] for key, val in data.items()}
1✔
949
            horizon_losses = {ind: [] for ind in range(len(self.model_def['Minimizers']))}
1✔
950

951
            ## Reset state variables with zeros or using inputs
952
            self.model.reset_states(XY, only = False)
1✔
953
            self.model.reset_connect_variables(connect, XY, only= False)
1✔
954

955
            for horizon_idx in range(prediction_samples + 1):
1✔
956
                out, minimize_out = self.model(XY)  ## Forward pass
1✔
957
                if self.log_internal:
1✔
958
                    self.__save_internal('inout_'+str(idx)+'_'+str(horizon_idx),{'XY':XY,'out':out,'state':self.model.states,'param':self.model.all_parameters,'connect':self.model.connect_variables})
1✔
959

960
                ## Loss Calculation
961
                for ind, (key, value) in enumerate(self.model_def['Minimizers'].items()):
1✔
962
                    loss = self.loss_functions[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
963
                    loss = (loss * loss_gains[key]) if key in loss_gains.keys() else loss  ## Multiply by the gain if necessary
1✔
964
                    horizon_losses[ind].append(loss)
1✔
965

966
                ## remove the states variables from the data
967
                if prediction_samples > 1:
1✔
968
                    for state_key in self.model_def['States'].keys():
1✔
969
                        if state_key in XY.keys():
1✔
970
                            del XY[state_key]
1✔
971

972
                ## Update the input with the recurrent prediction
973
                if horizon_idx < prediction_samples:
1✔
974
                    for key in XY.keys():
1✔
975
                        if key in closed_loop.keys(): ## the input is recurrent
1✔
976
                            shift = out[closed_loop[key]].shape[1]  ## take the output time dimension
1✔
977
                            XY[key] = torch.roll(XY[key], shifts=-1, dims=1) ## Roll the time window
1✔
978
                            XY[key][:, -shift:, :] = out[closed_loop[key]] ## substitute with the predicted value
1✔
979
                        else: ## the input is not recurrent
980
                            XY[key] = torch.roll(XY[key], shifts=-1, dims=0)  ## Roll the sample window
1✔
981
                            XY[key][-1] = XY_horizon[key][batch_size+horizon_idx]  ## take the next sample from the dataset
1✔
982

983

984
            ## Calculate the total loss
985
            total_loss = 0
1✔
986
            for ind in range(len(self.model_def['Minimizers'])):
1✔
987
                loss = sum(horizon_losses[ind])/(prediction_samples+1)
1✔
988
                aux_losses[ind][batch_val] = loss.item()
1✔
989
                total_loss += loss
1✔
990

991
            ## Gradient Step
992
            if train:
1✔
993
                total_loss.backward() ## Backpropagate the error
1✔
994
                self.optimizer.step()
1✔
995
                self.visualizer.showWeightsInTrain(batch = batch_val)
1✔
996

997
        ## return the losses
998
        return aux_losses
1✔
999

1000
    def __Train(self, data, n_samples, batch_size, loss_gains, shuffle=True, train=True):
1✔
1001
        check((n_samples - batch_size + 1) > 0, ValueError,
1✔
1002
              f"The number of available sample are (n_samples_train - train_batch_size + 1) = {n_samples - batch_size + 1}.")
1003
        if shuffle:
1✔
1004
            randomize = torch.randperm(n_samples)
1✔
1005
            data = {key: val[randomize] for key, val in data.items()}
1✔
1006
        ## Initialize the train losses vector
1007
        aux_losses = torch.zeros([len(self.model_def['Minimizers']),n_samples//batch_size])
1✔
1008
        for idx in range(0, (n_samples - batch_size + 1), batch_size):
1✔
1009
            ## Build the input tensor
1010
            XY = {key: val[idx:idx+batch_size] for key, val in data.items()}
1✔
1011
            ## Reset gradient
1012
            if train:
1✔
1013
                self.optimizer.zero_grad()
1✔
1014
            ## Model Forward
1015
            _, minimize_out = self.model(XY)  ## Forward pass
1✔
1016
            ## Loss Calculation
1017
            total_loss = 0
1✔
1018
            for ind, (key, value) in enumerate(self.model_def['Minimizers'].items()):
1✔
1019
                loss = self.loss_functions[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
1020
                loss = (loss * loss_gains[key]) if key in loss_gains.keys() else loss  ## Multiply by the gain if necessary
1✔
1021
                aux_losses[ind][idx//batch_size] = loss.item()
1✔
1022
                total_loss += loss
1✔
1023
            ## Gradient step
1024
            if train:
1✔
1025
                total_loss.backward()
1✔
1026
                self.optimizer.step()
1✔
1027
                self.visualizer.showWeightsInTrain(batch = idx//batch_size)
1✔
1028

1029
        ## return the losses
1030
        return aux_losses
1✔
1031

1032
    def resultAnalysis(self, dataset, data = None, minimize_gain = {}, closed_loop = {}, connect = {},  prediction_samples = None, step = 1, batch_size = None):
1✔
1033
        import warnings
1✔
1034
        with torch.inference_mode():
1✔
1035
            ## Init model for retults analysis
1036
            self.model.eval()
1✔
1037
            self.performance[dataset] = {}
1✔
1038
            self.prediction[dataset] = {}
1✔
1039
            A = {}
1✔
1040
            B = {}
1✔
1041
            total_losses = {}
1✔
1042

1043
            # Create the losses
1044
            losses = {}
1✔
1045
            for name, values in self.model_def['Minimizers'].items():
1✔
1046
                losses[name] = CustomLoss(values['loss'])
1✔
1047

1048
            recurrent = False
1✔
1049
            if (closed_loop or connect or self.model_def['States']) and prediction_samples is not None:
1✔
1050
                recurrent = True
1✔
1051

1052
            if data is None:
1✔
1053
                check(dataset in self.data.keys(), ValueError, f'The dataset {dataset} is not loaded!')
1✔
1054
                data = {key: torch.from_numpy(val).to(torch.float32) for key, val in self.data[dataset].items()}
1✔
1055
            n_samples = len(data[list(data.keys())[0]])
1✔
1056

1057
            if recurrent:
1✔
1058
                json_inputs = self.model_def['Inputs'] | self.model_def['States']
1✔
1059
                batch_size = batch_size if batch_size is not None else n_samples - prediction_samples
1✔
1060
                initial_value = 0
1✔
1061

1062
                for key, value in self.model_def['Minimizers'].items():
1✔
1063
                    total_losses[key], A[key], B[key] = [], [], []
1✔
1064
                    for horizon_idx in range(prediction_samples + 1):
1✔
1065
                        A[key].append([])
1✔
1066
                        B[key].append([])
1✔
1067

1068
                for idx in range(initial_value, (n_samples - batch_size - prediction_samples + 1), (batch_size + step - 1)):
1✔
1069
                    ## Build the input tensor
1070
                    XY = {key: val[idx:idx + batch_size] for key, val in data.items()}
1✔
1071
                    # Add missing inputs
1072
                    for key in closed_loop:
1✔
1073
                        if key not in XY:
1✔
1074
                            XY[key] = torch.zeros([batch_size, json_inputs[key]['ntot'], json_inputs[key]['dim']]).to(
1✔
1075
                                torch.float32)
1076
                    ## collect the horizon labels
1077
                    XY_horizon = {key: val[idx:idx + batch_size + prediction_samples] for key, val in data.items()}
1✔
1078
                    horizon_losses = {key: [] for key in self.model_def['Minimizers'].keys()}
1✔
1079

1080
                    ## Reset state variables with zeros or using inputs
1081
                    self.model.reset_states(XY, only=False)
1✔
1082
                    self.model.reset_connect_variables(connect, XY, only=False)
1✔
1083

1084
                    for horizon_idx in range(prediction_samples + 1):
1✔
1085
                        out, minimize_out = self.model(XY)  ## Forward pass
1✔
1086

1087
                        ## Loss Calculation
1088
                        for key, value in self.model_def['Minimizers'].items():
1✔
1089
                            A[key][horizon_idx].append(minimize_out[value['A']])
1✔
1090
                            B[key][horizon_idx].append(minimize_out[value['B']])
1✔
1091
                            loss = losses[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
1092
                            loss = (loss * minimize_gain[key]) if key in minimize_gain.keys() else loss  ## Multiply by the gain if necessary
1✔
1093
                            horizon_losses[key].append(loss)
1✔
1094

1095
                        ## remove the states variables from the data
1096
                        if prediction_samples > 1:
1✔
1097
                            for state_key in self.model_def['States'].keys():
1✔
1098
                                if state_key in XY.keys():
1✔
1099
                                    del XY[state_key]
1✔
1100

1101
                        ## Update the input with the recurrent prediction
1102
                        if horizon_idx < prediction_samples:
1✔
1103
                            for key in XY.keys():
1✔
1104
                                if key in closed_loop.keys():  ## the input is recurrent
1✔
1105
                                    shift = out[closed_loop[key]].shape[1]  ## take the output time dimension
1✔
1106
                                    XY[key] = torch.roll(XY[key], shifts=-1, dims=1)  ## Roll the time window
1✔
1107
                                    XY[key][:, -shift:, :] = out[closed_loop[key]]  ## substitute with the predicted value
1✔
1108
                                else:  ## the input is not recurrent
1109
                                    XY[key] = torch.roll(XY[key], shifts=-1, dims=0)  ## Roll the sample window
1✔
1110
                                    XY[key][-1] = XY_horizon[key][
1✔
1111
                                        batch_size + horizon_idx]  ## take the next sample from the dataset
1112

1113
                    ## Calculate the total loss
1114
                    for key in self.model_def['Minimizers'].keys():
1✔
1115
                        loss = sum(horizon_losses[key]) / (prediction_samples + 1)
1✔
1116
                        total_losses[key].append(loss.detach().numpy())
1✔
1117

1118
                for key, value in self.model_def['Minimizers'].items():
1✔
1119
                    for horizon_idx in range(prediction_samples + 1):
1✔
1120
                        A[key][horizon_idx] = np.concatenate(A[key][horizon_idx])
1✔
1121
                        B[key][horizon_idx] = np.concatenate(B[key][horizon_idx])
1✔
1122
                    total_losses[key] = np.mean(total_losses[key])
1✔
1123

1124
            else:
1125
                if batch_size is None:
1✔
1126
                    batch_size = n_samples
1✔
1127

1128
                for key, value in self.model_def['Minimizers'].items():
1✔
1129
                    total_losses[key], A[key], B[key] = [], [], []
1✔
1130

1131
                for idx in range(0, (n_samples - batch_size + 1), batch_size):
1✔
1132
                    ## Build the input tensor
1133
                    XY = {key: val[idx:idx + batch_size] for key, val in data.items()}
1✔
1134
                    if (closed_loop or connect or self.model_def['States']):
1✔
1135
                        ## Reset state variables with zeros or using inputs
1136
                        self.model.reset_states(XY, only=False)
1✔
1137
                        self.model.reset_connect_variables(connect, XY, only=False)
1✔
1138

1139
                    ## Model Forward
1140
                    _, minimize_out = self.model(XY)  ## Forward pass
1✔
1141
                    ## Loss Calculation
1142
                    for key, value in self.model_def['Minimizers'].items():
1✔
1143
                        A[key].append(minimize_out[value['A']].numpy())
1✔
1144
                        B[key].append(minimize_out[value['B']].numpy())
1✔
1145
                        loss = losses[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
1146
                        loss = (loss * minimize_gain[key]) if key in minimize_gain.keys() else loss
1✔
1147
                        total_losses[key].append(loss.detach().numpy())
1✔
1148

1149
                for key, value in self.model_def['Minimizers'].items():
1✔
1150
                    A[key] = np.concatenate(A[key])
1✔
1151
                    B[key] = np.concatenate(B[key])
1✔
1152
                    total_losses[key] = np.mean(total_losses[key])
1✔
1153

1154
            for ind, (key, value) in enumerate(self.model_def['Minimizers'].items()):
1✔
1155
                A_np = np.array(A[key])
1✔
1156
                B_np = np.array(B[key])
1✔
1157
                self.performance[dataset][key] = {}
1✔
1158
                self.performance[dataset][key][value['loss']] = np.mean(total_losses[key]).item()
1✔
1159
                self.performance[dataset][key]['fvu'] = {}
1✔
1160
                # Compute FVU
1161
                residual = A_np - B_np
1✔
1162
                error_var = np.var(residual)
1✔
1163
                error_mean = np.mean(residual)
1✔
1164
                #error_var_manual = np.sum((residual-error_mean) ** 2) / (len(self.prediction['B'][ind]) - 0)
1165
                #print(f"{key} var np:{new_error_var} and var manual:{error_var_manual}")
1166
                with warnings.catch_warnings(record=True) as w:
1✔
1167
                    self.performance[dataset][key]['fvu']['A'] = (error_var / np.var(A_np)).item()
1✔
1168
                    self.performance[dataset][key]['fvu']['B'] = (error_var / np.var(B_np)).item()
1✔
1169
                    if w and np.var(A_np) == 0.0 and  np.var(B_np) == 0.0:
1✔
1170
                        self.performance[dataset][key]['fvu']['A'] = np.nan
1✔
1171
                        self.performance[dataset][key]['fvu']['B'] = np.nan
1✔
1172
                self.performance[dataset][key]['fvu']['total'] = np.mean([self.performance[dataset][key]['fvu']['A'],self.performance[dataset][key]['fvu']['B']]).item()
1✔
1173
                # Compute AIC
1174
                #normal_dist = norm(0, error_var ** 0.5)
1175
                #probability_of_residual = normal_dist.pdf(residual)
1176
                #log_likelihood_first = sum(np.log(probability_of_residual))
1177
                p1 = -len(residual)/2.0*np.log(2*np.pi)
1✔
1178
                with warnings.catch_warnings(record=True) as w:
1✔
1179
                    p2 = -len(residual)/2.0*np.log(error_var)
1✔
1180
                    p3 = -1 / (2.0 * error_var) * np.sum(residual ** 2)
1✔
1181
                    if w and p2 == np.float32(np.inf) and p3 == np.float32(-np.inf):
1✔
1182
                        p2 = p3 = 0.0
1✔
1183
                log_likelihood = p1+p2+p3
1✔
1184
                #print(f"{key} log likelihood second mode:{log_likelihood} = {p1}+{p2}+{p3} first mode: {log_likelihood_first}")
1185
                total_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad) #TODO to be check the number is doubled
1✔
1186
                #print(f"{key} total_params:{total_params}")
1187
                aic = - 2 * log_likelihood + 2 * total_params
1✔
1188
                #print(f"{key} aic:{aic}")
1189
                self.performance[dataset][key]['aic'] = {'value':aic,'total_params':total_params,'log_likelihood':log_likelihood}
1✔
1190
                # Prediction and target
1191
                self.prediction[dataset][key] = {}
1✔
1192
                self.prediction[dataset][key]['A'] = A_np.tolist()
1✔
1193
                self.prediction[dataset][key]['B'] = B_np.tolist()
1✔
1194

1195
            self.performance[dataset]['total'] = {}
1✔
1196
            self.performance[dataset]['total']['mean_error'] = np.mean([value for key,value in total_losses.items()])
1✔
1197
            self.performance[dataset]['total']['fvu'] = np.mean([self.performance[dataset][key]['fvu']['total'] for key in self.model_def['Minimizers'].keys()])
1✔
1198
            self.performance[dataset]['total']['aic'] = np.mean([self.performance[dataset][key]['aic']['value']for key in self.model_def['Minimizers'].keys()])
1✔
1199

1200
        self.visualizer.showResult(dataset)
1✔
1201

1202
    def getWorkspace(self):
1✔
1203
        return self.exporter.getWorkspace()
1✔
1204

1205
    def saveTorchModel(self, name = 'net', model_folder = None, models = None):
1✔
1206
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet!')
1✔
1207
        if models is not None:
1✔
1208
            if name == 'net':
×
1209
                name += '_' + '_'.join(models)
×
1210
            model_def = ModelDef()
×
1211
            model_def.update(model_dict = {key: self.model_dict[key] for key in models if key in self.model_dict})
×
1212
            model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
×
1213
            model_def.updateParameters(self.model)
×
UNCOV
1214
            model = Model(model_def.json)
×
1215
        else:
1216
            model = self.model
1✔
1217
        self.exporter.saveTorchModel(model, name, model_folder)
1✔
1218

1219
    def loadTorchModel(self, name = 'net', model_folder = None):
1✔
1220
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet.')
1✔
1221
        self.exporter.loadTorchModel(self.model, name, model_folder)
1✔
1222

1223
    def saveModel(self, name = 'net', model_path = None, models = None):
1✔
1224
        if models is not None:
1✔
1225
            if name == 'net':
×
1226
                name += '_' + '_'.join(models)
×
1227
            model_def = ModelDef()
×
1228
            model_def.update(model_dict = {key: self.model_dict[key] for key in models if key in self.model_dict})
×
1229
            model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
×
UNCOV
1230
            model_def.updateParameters(self.model)
×
1231
        else:
1232
            model_def = self.model_def
1✔
1233
        check(model_def.isDefined(), RuntimeError, "The network has not been defined.")
1✔
1234
        self.exporter.saveModel(model_def.json, name, model_path)
1✔
1235

1236
    def loadModel(self, name = None, model_folder = None):
1✔
1237
        if name is None:
1✔
1238
            name = 'net'
1✔
1239
        model_def = self.exporter.loadModel(name, model_folder)
1✔
1240
        check(model_def, RuntimeError, "Error to load the network.")
1✔
1241
        self.model_def = ModelDef(model_def)
1✔
1242
        self.model = None
1✔
1243
        self.neuralized = False
1✔
1244
        self.traced = False
1✔
1245

1246
    def exportPythonModel(self, name = 'net', model_path = None, models = None):
1✔
1247
        if models is not None:
1✔
1248
            if name == 'net':
×
1249
                name += '_' + '_'.join(models)
×
1250
            model_def = ModelDef()
×
1251
            model_def.update(model_dict = {key: self.model_dict[key] for key in models if key in self.model_dict})
×
1252
            model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
×
1253
            model_def.updateParameters(self.model)
×
UNCOV
1254
            model = Model(model_def.json)
×
1255
        else:
1256
            model_def = self.model_def
1✔
1257
            model = self.model
1✔
1258
        check(model_def['States'] == {}, TypeError, "The network has state variables. The export to python is not possible.")
1✔
1259
        check(model_def.isDefined(), RuntimeError, "The network has not been defined.")
1✔
1260
        check(self.traced == False, RuntimeError,
1✔
1261
                  'The model is traced and cannot be exported to Python.\n Run neuralizeModel() to recreate a standard model.')
1262
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet.')
1✔
1263
        self.exporter.saveModel(model_def.json, name, model_path)
1✔
1264
        self.exporter.exportPythonModel(model_def, model, name, model_path)
1✔
1265

1266
    def importPythonModel(self, name = None, model_folder = None):
1✔
1267
        if name is None:
1✔
1268
            name = 'net'
1✔
1269
        model_def = self.exporter.loadModel(name, model_folder)
1✔
1270
        check(model_def is not None, RuntimeError, "Error to load the network.")
1✔
1271
        self.neuralizeModel(model_def=model_def)
1✔
1272
        self.model = self.exporter.importPythonModel(name, model_folder)
1✔
1273
        self.traced = True
1✔
1274
        self.model_def.updateParameters(self.model)
1✔
1275

1276
    def exportONNX(self, inputs_order, outputs_order,  models = None, name = 'net', model_folder = None):
1✔
1277
        check(self.model_def.isDefined(), RuntimeError, "The network has not been defined.")
1✔
1278
        check(self.traced == False, RuntimeError, 'The model is traced and cannot be exported to ONNX.\n Run neuralizeModel() to recreate a standard model.')
1✔
1279
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet.')
1✔
1280
        check(self.model_def.model_dict != {}, RuntimeError, 'The model is loaded and not created.')
1✔
1281
        model_def = ModelDef()
1✔
1282
        if models is not None:
1✔
1283
            if name == 'net':
1✔
1284
                name += '_' + '_'.join(models)
1✔
1285
            model_def.update(model_dict = {key: self.model_def.model_dict[key] for key in models if key in self.model_def.model_dict})
1✔
1286
        else:
1287
            model_def.update(model_dict = self.model_def.model_dict)
1✔
1288
        model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
1✔
1289
        model_def.updateParameters(self.model)
1✔
1290
        model = Model(model_def.json)
1✔
1291
        self.exporter.exportONNX(model_def, model, inputs_order, outputs_order, name, model_folder)
1✔
1292

1293
    def exportReport(self, name = 'net', model_folder = None):
1✔
1294
        self.exporter.exportReport(self, name, model_folder)
1✔
1295

1296
nnodely = Modely
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc