• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tonegas / nnodely / 16502811447

24 Jul 2025 04:44PM UTC coverage: 97.767% (+0.1%) from 97.651%
16502811447

push

github

web-flow
New version 1.5.0

This pull request introduces version 1.5.0 of **nnodely**, featuring several updates:
1. Improved clarity of documentation and examples.
2. Support for managing multi-dataset features is now available.
3. DataFrames can now be used to create datasets.
4. Datasets can now be resampled.
5. Random data training has been fixed for both classic and recurrent training.
6. The `state` variable has been removed.
7. It is now possible to add or remove a connection or a closed loop.
8. Partial models can now be exported.
9. The `train` function and the result analysis have been separated.
10. A new function, `trainAndAnalyse`, is now available.
11. The report now works across all network types.
12. The training function code has been reorganized.

2901 of 2967 new or added lines in 53 files covered. (97.78%)

16 existing lines in 6 files now uncovered.

12652 of 12941 relevant lines covered (97.77%)

0.98 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.0
/nnodely/operators/validator.py
1
import torch, warnings
1✔
2
import numpy as np
1✔
3

4
from nnodely.support.utils import ReadOnlyDict, get_batch_size
1✔
5

6
from nnodely.basic.loss import CustomLoss
1✔
7
from nnodely.operators.network import Network
1✔
8
from nnodely.support.utils import  check, TORCH_DTYPE, enforce_types
1✔
9

10
class Validator(Network):
1✔
11
    @enforce_types
1✔
12
    def __init__(self):
1✔
13
        check(type(self) is not Validator, TypeError, "Validator class cannot be instantiated directly")
1✔
14
        super().__init__()
1✔
15

16
        # Validation Parameters
17
        self.__performance = {}
1✔
18
        self.__prediction = {}
1✔
19

20
    @property
1✔
21
    def performance(self):
1✔
22
        return ReadOnlyDict(self.__performance)
1✔
23

24
    @property
1✔
25
    def prediction(self):
1✔
26
        return ReadOnlyDict(self.__prediction)
1✔
27
    
28
    @enforce_types
1✔
29
    def resultAnalysis(self,
1✔
30
                       dataset: str | list | dict, *,
31
                       name: str | None = None,
32
                       minimize_gain: dict = {},
33
                       closed_loop: dict = {},
34
                       connect: dict = {},
35
                       prediction_samples: int | str = -1, #TODO uniform to training set to 0
36
                       step: int = 0,
37
                       batch_size: int | None = None
38
                       ) -> None:
39
        """
40
        The function is used to analyze the performance of the model on the provided dataset.
41

42
        Parameters
43
        ----------
44
        dataset : str | list | dict
45
            Dataset to analyze the performance of the model on.
46
        name : str or None
47
            Label to be used in the plots
48
        minimize_gain : dict
49
            A dictionary specifying the gain for each minimization loss function.
50
        closed_loop : dict or None, optional
51
            A dictionary specifying closed loop connections. The keys are input names and the values are output names. Default is None.
52
        connect : dict or None, optional
53
            A dictionary specifying connections. The keys are input names and the values are output names. Default is None.
54
        step : int or None, optional
55
            The step size to analyze the model on the provided dataset. A big value will result in less data used for each epochs and a faster train. Default is None.
56
        prediction_samples : int or None, optional
57
            The size of the prediction horizon. Number of samples at each recurrent window Default is None.
58
        batch_size :
59
            The batch size use for analyse the performance of the model on the provided dataset.
60

61

62
        """
63

64
        with torch.enable_grad() if self._get_gradient_on_inference() else torch.inference_mode():
1✔
65
            ## Init model for retults analysis
66
            if name is None:
1✔
67
                dataset_tag = self._get_tag(dataset)
1✔
68
            else:
69
                dataset_tag = name
1✔
70

71
            self._model.eval()
1✔
72
            self.__performance[dataset_tag] = {}
1✔
73
            self.__prediction[dataset_tag] = {}
1✔
74
            A = {}
1✔
75
            B = {}
1✔
76
            total_losses = {}
1✔
77

78
            # Create the losses
79
            losses = {}
1✔
80
            for name, values in self._model_def['Minimizers'].items():
1✔
81
                losses[name] = CustomLoss(values['loss'])
1✔
82

83
            data = self._get_data(dataset)
1✔
84
            n_samples = len(data[list(data.keys())[0]])
1✔
85

86
            batch_size = get_batch_size(n_samples, batch_size, prediction_samples)
1✔
87
            prediction_samples = self._setup_recurrent_variables(prediction_samples, closed_loop, connect)
1✔
88

89
            if prediction_samples >= 0:
1✔
90
                mandatory_inputs, non_mandatory_inputs = self._get_mandatory_inputs(connect,closed_loop)
1✔
91

92
                for key, value in self._model_def['Minimizers'].items():
1✔
93
                    total_losses[key], A[key], B[key] = [], [], []
1✔
94
                    for horizon_idx in range(prediction_samples + 1):
1✔
95
                        A[key].append([])
1✔
96
                        B[key].append([])
1✔
97
                if type(dataset) is not dict and dataset in self._multifile.keys(): ## Multi-file Dataset
1✔
NEW
98
                    batch_indexes = self._get_batch_indexes(dataset, prediction_samples)
×
99
                else:
100
                    batch_indexes = list(range(n_samples - prediction_samples))
1✔
101

102
                ## Update with virtual states
103
                self._model.update(closed_loop=closed_loop, connect=connect)
1✔
104
                self._recurrent_inference(data, batch_indexes, batch_size, minimize_gain, prediction_samples,
1✔
105
                                          step, non_mandatory_inputs, mandatory_inputs, losses,
106
                                          total_losses = total_losses, A = A, B = B)
107

108
                for key, value in self._model_def['Minimizers'].items():
1✔
109
                    for horizon_idx in range(prediction_samples + 1):
1✔
110
                        if A is not None:
1✔
111
                            A[key][horizon_idx] = np.concatenate(A[key][horizon_idx])
1✔
112
                        if B is not None:
1✔
113
                            B[key][horizon_idx] = np.concatenate(B[key][horizon_idx])
1✔
114
                    if total_losses is not None:
1✔
115
                        total_losses[key] = np.mean(total_losses[key])
1✔
116
            else:
117
                for key, value in self._model_def['Minimizers'].items():
1✔
118
                    total_losses[key], A[key], B[key] = [], [], []
1✔
119

120
                self._model.update(disconnect=True)
1✔
121
                self._inference(data, n_samples, batch_size, minimize_gain, losses,
1✔
122
                                total_losses = total_losses, A = A, B = B)
123

124
                for key, value in self._model_def['Minimizers'].items():
1✔
125
                    A[key] = np.concatenate(A[key])
1✔
126
                    B[key] = np.concatenate(B[key])
1✔
127
                    total_losses[key] = np.mean(total_losses[key])
1✔
128

129
            for ind, (key, value) in enumerate(self._model_def['Minimizers'].items()):
1✔
130
                A_np = np.array(A[key])
1✔
131
                B_np = np.array(B[key])
1✔
132
                self.__performance[dataset_tag][key] = {}
1✔
133
                self.__performance[dataset_tag][key][value['loss']] = np.mean(total_losses[key]).item()
1✔
134
                self.__performance[dataset_tag][key]['fvu'] = {}
1✔
135
                # Compute FVU
136
                residual = A_np - B_np
1✔
137
                error_var = np.var(residual)
1✔
138
                error_mean = np.mean(residual)
1✔
139
                #error_var_manual = np.sum((residual-error_mean) ** 2) / (len(self.__prediction['B'][ind]) - 0)
140
                #print(f"{key} var np:{new_error_var} and var manual:{error_var_manual}")
141
                with warnings.catch_warnings(record=True) as w:
1✔
142
                    self.__performance[dataset_tag][key]['fvu']['A'] = (error_var / np.var(A_np)).item()
1✔
143
                    self.__performance[dataset_tag][key]['fvu']['B'] = (error_var / np.var(B_np)).item()
1✔
144
                    if w and np.var(A_np) == 0.0 and  np.var(B_np) == 0.0:
1✔
145
                        self.__performance[dataset_tag][key]['fvu']['A'] = np.nan
1✔
146
                        self.__performance[dataset_tag][key]['fvu']['B'] = np.nan
1✔
147
                self.__performance[dataset_tag][key]['fvu']['total'] = np.mean([self.__performance[dataset_tag][key]['fvu']['A'],self.__performance[dataset_tag][key]['fvu']['B']]).item()
1✔
148
                # Compute AIC
149
                #normal_dist = norm(0, error_var ** 0.5)
150
                #probability_of_residual = normal_dist.pdf(residual)
151
                #log_likelihood_first = sum(np.log(probability_of_residual))
152
                p1 = -len(residual)/2.0*np.log(2*np.pi)
1✔
153
                with warnings.catch_warnings(record=True) as w:
1✔
154
                    p2 = -len(residual)/2.0*np.log(error_var)
1✔
155
                    p3 = -1 / (2.0 * error_var) * np.sum(residual ** 2)
1✔
156
                    if w and p2 == np.float32(np.inf) and p3 == np.float32(-np.inf):
1✔
157
                        p2 = p3 = 0.0
1✔
158
                log_likelihood = p1+p2+p3
1✔
159
                #print(f"{key} log likelihood second mode:{log_likelihood} = {p1}+{p2}+{p3} first mode: {log_likelihood_first}")
160
                total_params = sum(p.numel() for p in self._model.parameters() if p.requires_grad)
1✔
161
                #print(f"{key} total_params:{total_params}")
162
                aic = - 2 * log_likelihood + 2 * total_params
1✔
163
                #print(f"{key} aic:{aic}")
164
                self.__performance[dataset_tag][key]['aic'] = {'value':aic,'total_params':total_params,'log_likelihood':log_likelihood}
1✔
165
                # Prediction and target
166
                self.__prediction[dataset_tag][key] = {}
1✔
167
                self.__prediction[dataset_tag][key]['A'] = A_np.tolist()
1✔
168
                self.__prediction[dataset_tag][key]['B'] = B_np.tolist()
1✔
169

170
            self.__performance[dataset_tag]['total'] = {}
1✔
171
            self.__performance[dataset_tag]['total']['mean_error'] = np.mean([value for key,value in total_losses.items()])
1✔
172
            self.__performance[dataset_tag]['total']['fvu'] = np.mean([self.__performance[dataset_tag][key]['fvu']['total'] for key in self._model_def['Minimizers'].keys()])
1✔
173
            self.__performance[dataset_tag]['total']['aic'] = np.mean([self.__performance[dataset_tag][key]['aic']['value']for key in self._model_def['Minimizers'].keys()])
1✔
174

175
        self.visualizer.showResult(dataset_tag)
1✔
176
#227
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc