• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tonegas / nnodely / 14319828903

07 Apr 2025 09:27PM UTC coverage: 97.259% (+0.2%) from 97.035%
14319828903

Pull #86

github

web-flow
Merge 44b7c25ee into e9c323c4f
Pull Request #86: Smallclasses

2275 of 2409 new or added lines in 54 files covered. (94.44%)

1 existing line in 1 file now uncovered.

11637 of 11965 relevant lines covered (97.26%)

0.97 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.64
/nnodely/operators/validator.py
1
import torch
1✔
2

3
import numpy as np
1✔
4

5
from nnodely.basic.loss import CustomLoss
1✔
6
from nnodely.support.utils import  check, TORCH_DTYPE, enforce_types
1✔
7
from nnodely.operators.memory import Memory
1✔
8

9
class Validator(Memory):
1✔
10
    def __init__(self):
1✔
11
        check(type(self) is not Validator, TypeError, "Validator class cannot be instantiated directly")
1✔
12

13
        # Validation Parameters
14
        self._performance = {}
1✔
15
        self._prediction = {}
1✔
16
        self._training = {}
1✔
17

18
    @enforce_types
1✔
19
    def resultAnalysis(self,
1✔
20
                       dataset: str,
21
                       data: dict | None = None,
22
                       minimize_gain: dict = {},
23
                       closed_loop: dict = {},
24
                       connect: dict = {},
25
                       prediction_samples: int | str | None = None,
26
                       step: int = 0,
27
                       batch_size: int | None = None
28
                       ) -> None:
29
        import warnings
1✔
30
        json_inputs = self.json['Inputs'] | self.json['States']
1✔
31
        calculate_grad = False
1✔
32
        for key, value in json_inputs.items():
1✔
33
            if 'type' in value.keys():
1✔
34
                calculate_grad = True
1✔
35
                break
1✔
36
        with torch.enable_grad() if calculate_grad else torch.inference_mode():
1✔
37
            ## Init model for retults analysis
38
            self._model.eval()
1✔
39
            self._performance[dataset] = {}
1✔
40
            self._prediction[dataset] = {}
1✔
41
            A = {}
1✔
42
            B = {}
1✔
43
            total_losses = {}
1✔
44

45
            # Create the losses
46
            losses = {}
1✔
47
            for name, values in self._model_def['Minimizers'].items():
1✔
48
                losses[name] = CustomLoss(values['loss'])
1✔
49

50
            recurrent = False
1✔
51
            if (closed_loop or connect or self._model_def['States']) and prediction_samples is not None:
1✔
52
                recurrent = True
1✔
53

54
            if data is None:
1✔
55
                check(dataset in self._data.keys(), ValueError, f'The dataset {dataset} is not loaded!')
1✔
56
                data = {key: torch.from_numpy(val).to(TORCH_DTYPE) for key, val in self._data[dataset].items()}
1✔
57
            n_samples = len(data[list(data.keys())[0]])
1✔
58

59
            if recurrent:
1✔
60
                batch_size = batch_size if batch_size is not None else n_samples - prediction_samples
1✔
61

62
                model_inputs = list(self._model_def['Inputs'].keys())
1✔
63

64
                state_closed_loop = [key for key, value in self._model_def['States'].items() if 'closedLoop' in value.keys()] + list(closed_loop.keys())
1✔
65
                state_connect = [key for key, value in self._model_def['States'].items() if 'connect' in value.keys()] + list(connect.keys())
1✔
66

67
                non_mandatory_inputs = state_closed_loop + state_connect
1✔
68
                mandatory_inputs = list(set(model_inputs) - set(non_mandatory_inputs))
1✔
69

70
                for key, value in self._model_def['Minimizers'].items():
1✔
71
                    total_losses[key], A[key], B[key] = [], [], []
1✔
72
                    for horizon_idx in range(prediction_samples + 1):
1✔
73
                        A[key].append([])
1✔
74
                        B[key].append([])
1✔
75

76
                list_of_batch_indexes = list(range(n_samples - prediction_samples))
1✔
77
                ## Remove forbidden indexes in case of a multi-file dataset
78
                if dataset in self._multifile.keys(): ## Multi-file Dataset
1✔
NEW
79
                    if n_samples == self.run_training_params['n_samples_train']: ## Training
×
NEW
80
                        list_of_batch_indexes, step = self.__get_batch_indexes(dataset, n_samples, prediction_samples, batch_size, step, type='train')
×
NEW
81
                    elif n_samples == self.run_training_params['n_samples_val']: ## Validation
×
NEW
82
                        list_of_batch_indexes, step = self.__get_batch_indexes(dataset, n_samples, prediction_samples, batch_size, step, type='val')
×
83
                    else:
NEW
84
                        list_of_batch_indexes, step = self.__get_batch_indexes(dataset, n_samples, prediction_samples, batch_size, step, type='test')
×
85

86
                X = {}
1✔
87
                ## Update with virtual states
88
                self._model.update(closed_loop=closed_loop, connect=connect)
1✔
89
                while len(list_of_batch_indexes) >= batch_size:
1✔
90
                    idxs = list_of_batch_indexes[:batch_size]
1✔
91
                    for num in idxs:
1✔
92
                        list_of_batch_indexes.remove(num)
1✔
93
                    if step > 0:
1✔
94
                        if len(list_of_batch_indexes) >= step:
1✔
95
                            step_idxs =  list_of_batch_indexes[:step]
1✔
96
                            for num in step_idxs:
1✔
97
                                list_of_batch_indexes.remove(num)
1✔
98
                        else:
99
                            list_of_batch_indexes = []
1✔
100
                    ## Reset
101
                    horizon_losses = {key: [] for key in self._model_def['Minimizers'].keys()}
1✔
102
                    for key in non_mandatory_inputs:
1✔
103
                        if key in data.keys():
1✔
104
                            ## with data
105
                            X[key] = data[key][idxs]
1✔
106
                        else:  ## with zeros
107
                            window_size = self._input_n_samples[key]
1✔
108
                            dim = json_inputs[key]['dim']
1✔
109
                            if 'type' in json_inputs[key]:
1✔
110
                                X[key] = torch.zeros(size=(batch_size, window_size, dim), dtype=TORCH_DTYPE, requires_grad=True)
1✔
111
                            else:
112
                                X[key] = torch.zeros(size=(batch_size, window_size, dim), dtype=TORCH_DTYPE, requires_grad=False)
1✔
113
                            self._states[key] = X[key]
1✔
114

115
                    for horizon_idx in range(prediction_samples + 1):
1✔
116
                        ## Get data
117
                        for key in mandatory_inputs:
1✔
118
                            X[key] = data[key][[idx+horizon_idx for idx in idxs]]
1✔
119
                        ## Forward pass
120
                        out, minimize_out, out_closed_loop, out_connect = self._model(X)
1✔
121

122
                        ## Loss Calculation
123
                        for key, value in self._model_def['Minimizers'].items():
1✔
124
                            A[key][horizon_idx].append(minimize_out[value['A']].detach().numpy())
1✔
125
                            B[key][horizon_idx].append(minimize_out[value['B']].detach().numpy())
1✔
126
                            loss = losses[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
127
                            loss = (loss * minimize_gain[key]) if key in minimize_gain.keys() else loss  ## Multiply by the gain if necessary
1✔
128
                            horizon_losses[key].append(loss)
1✔
129

130
                        ## Update
131
                        self._updateState(X, out_closed_loop, out_connect)
1✔
132

133
                    ## Calculate the total loss
134
                    for key in self._model_def['Minimizers'].keys():
1✔
135
                        loss = sum(horizon_losses[key]) / (prediction_samples + 1)
1✔
136
                        total_losses[key].append(loss.detach().numpy())
1✔
137

138
                for key, value in self._model_def['Minimizers'].items():
1✔
139
                    for horizon_idx in range(prediction_samples + 1):
1✔
140
                        A[key][horizon_idx] = np.concatenate(A[key][horizon_idx])
1✔
141
                        B[key][horizon_idx] = np.concatenate(B[key][horizon_idx])
1✔
142
                    total_losses[key] = np.mean(total_losses[key])
1✔
143

144
            else:
145
                if batch_size is None:
1✔
146
                    batch_size = n_samples
1✔
147

148
                for key, value in self._model_def['Minimizers'].items():
1✔
149
                    total_losses[key], A[key], B[key] = [], [], []
1✔
150

151
                for idx in range(0, (n_samples - batch_size + 1), batch_size):
1✔
152
                    ## Build the input tensor
153
                    XY = {key: val[idx:idx + batch_size] for key, val in data.items()}
1✔
154

155
                    ## Model Forward
156
                    _, minimize_out, _, _ = self._model(XY)  ## Forward pass
1✔
157
                    ## Loss Calculation
158
                    for key, value in self._model_def['Minimizers'].items():
1✔
159
                        A[key].append(minimize_out[value['A']].detach().numpy())
1✔
160
                        B[key].append(minimize_out[value['B']].detach().numpy())
1✔
161
                        loss = losses[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
162
                        loss = (loss * minimize_gain[key]) if key in minimize_gain.keys() else loss
1✔
163
                        total_losses[key].append(loss.detach().numpy())
1✔
164

165
                for key, value in self._model_def['Minimizers'].items():
1✔
166
                    A[key] = np.concatenate(A[key])
1✔
167
                    B[key] = np.concatenate(B[key])
1✔
168
                    total_losses[key] = np.mean(total_losses[key])
1✔
169

170
            for ind, (key, value) in enumerate(self._model_def['Minimizers'].items()):
1✔
171
                A_np = np.array(A[key])
1✔
172
                B_np = np.array(B[key])
1✔
173
                self._performance[dataset][key] = {}
1✔
174
                self._performance[dataset][key][value['loss']] = np.mean(total_losses[key]).item()
1✔
175
                self._performance[dataset][key]['fvu'] = {}
1✔
176
                # Compute FVU
177
                residual = A_np - B_np
1✔
178
                error_var = np.var(residual)
1✔
179
                error_mean = np.mean(residual)
1✔
180
                #error_var_manual = np.sum((residual-error_mean) ** 2) / (len(self._prediction['B'][ind]) - 0)
181
                #print(f"{key} var np:{new_error_var} and var manual:{error_var_manual}")
182
                with warnings.catch_warnings(record=True) as w:
1✔
183
                    self._performance[dataset][key]['fvu']['A'] = (error_var / np.var(A_np)).item()
1✔
184
                    self._performance[dataset][key]['fvu']['B'] = (error_var / np.var(B_np)).item()
1✔
185
                    if w and np.var(A_np) == 0.0 and  np.var(B_np) == 0.0:
1✔
186
                        self._performance[dataset][key]['fvu']['A'] = np.nan
1✔
187
                        self._performance[dataset][key]['fvu']['B'] = np.nan
1✔
188
                self._performance[dataset][key]['fvu']['total'] = np.mean([self._performance[dataset][key]['fvu']['A'],self._performance[dataset][key]['fvu']['B']]).item()
1✔
189
                # Compute AIC
190
                #normal_dist = norm(0, error_var ** 0.5)
191
                #probability_of_residual = normal_dist.pdf(residual)
192
                #log_likelihood_first = sum(np.log(probability_of_residual))
193
                p1 = -len(residual)/2.0*np.log(2*np.pi)
1✔
194
                with warnings.catch_warnings(record=True) as w:
1✔
195
                    p2 = -len(residual)/2.0*np.log(error_var)
1✔
196
                    p3 = -1 / (2.0 * error_var) * np.sum(residual ** 2)
1✔
197
                    if w and p2 == np.float32(np.inf) and p3 == np.float32(-np.inf):
1✔
198
                        p2 = p3 = 0.0
1✔
199
                log_likelihood = p1+p2+p3
1✔
200
                #print(f"{key} log likelihood second mode:{log_likelihood} = {p1}+{p2}+{p3} first mode: {log_likelihood_first}")
201
                total_params = sum(p.numel() for p in self._model.parameters() if p.requires_grad)
1✔
202
                #print(f"{key} total_params:{total_params}")
203
                aic = - 2 * log_likelihood + 2 * total_params
1✔
204
                #print(f"{key} aic:{aic}")
205
                self._performance[dataset][key]['aic'] = {'value':aic,'total_params':total_params,'log_likelihood':log_likelihood}
1✔
206
                # Prediction and target
207
                self._prediction[dataset][key] = {}
1✔
208
                self._prediction[dataset][key]['A'] = A_np.tolist()
1✔
209
                self._prediction[dataset][key]['B'] = B_np.tolist()
1✔
210

211
            ## Remove virtual states
212
            self._removeVirtualStates(connect, closed_loop)
1✔
213

214
            self._performance[dataset]['total'] = {}
1✔
215
            self._performance[dataset]['total']['mean_error'] = np.mean([value for key,value in total_losses.items()])
1✔
216
            self._performance[dataset]['total']['fvu'] = np.mean([self._performance[dataset][key]['fvu']['total'] for key in self._model_def['Minimizers'].keys()])
1✔
217
            self._performance[dataset]['total']['aic'] = np.mean([self._performance[dataset][key]['aic']['value']for key in self._model_def['Minimizers'].keys()])
1✔
218

219
        self.visualizer.showResult(dataset)
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc