• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tonegas / nnodely / 12162489906

04 Dec 2024 03:04PM UTC coverage: 93.934% (-0.2%) from 94.155%
12162489906

push

github

web-flow
Merge pull request #6 from tonegas/release/0.15.0

Release/0.15.0

37 of 42 new or added lines in 8 files covered. (88.1%)

59 existing lines in 7 files now uncovered.

8625 of 9182 relevant lines covered (93.93%)

0.94 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.22
/tests/test_parameters_of_train.py
1
import unittest, os, sys
1✔
2
import numpy as np
1✔
3

4
from nnodely import *
1✔
5
from nnodely import relation
1✔
6
relation.CHECK_NAMES = False
1✔
7

8
from nnodely.logger import logging, nnLogger
1✔
9
log = nnLogger(__name__, logging.CRITICAL)
1✔
10
log.setAllLevel(logging.CRITICAL)
1✔
11

12
sys.path.append(os.getcwd())
1✔
13

14
# 13 Tests
15
# Test the train parameter and the optimizer options
16

17
data_folder = os.path.join(os.path.dirname(__file__), 'data/')
1✔
18

19
def funIn(x, w):
1✔
20
    return x * w
1✔
21

22
def funOut(x, w):
1✔
23
    return x / w
1✔
24

25
def linear_fun(x,a,b):
1✔
26
    return x*a+b
1✔
27

28
class ModelyTrainingTestParameter(unittest.TestCase):
1✔
29
    def TestAlmostEqual(self, data1, data2, precision=4):
1✔
30
        assert np.asarray(data1, dtype=np.float32).ndim == np.asarray(data2, dtype=np.float32).ndim, f'Inputs must have the same dimension! Received {type(data1)} and {type(data2)}'
×
31
        if type(data1) == type(data2) == list:
×
NEW
32
            self.assertEqual(len(data1), len(data2))
×
33
            for pred, label in zip(data1, data2):
×
34
                self.TestAlmostEqual(pred, label, precision=precision)
×
35
        else:
36
            self.assertAlmostEqual(data1, data2, places=precision)
×
37

38
    def test_network_mass_spring_damper(self):
1✔
39
        x = Input('x')  # Position
1✔
40
        F = Input('F')  # Force
1✔
41

42
        # List the output of the model
43
        x_z = Output('x_z', Fir(x.tw(0.3)) + Fir(F.last()))
1✔
44

45
        # Add the neural model to the nnodely structure and neuralization of the model
46
        test = Modely(visualizer=None)
1✔
47
        test.addModel('x_z',x_z)
1✔
48
        test.addMinimize('next-pos', x.z(-1), x_z, 'mse')
1✔
49

50
        # Create the neural network
51
        test.neuralizeModel(sample_time=0.05)  # The sampling time depends to the dataset
1✔
52

53
        # Data load
54
        data_struct = ['x','F','x2','y2','','A1x','A1y','B1x','B1y','','A2x','A2y','B2x','out','','x3','in1','in2','time']
1✔
55
        test.loadData(name='dataset', source=data_folder, format=data_struct, skiplines=4, delimiter='\t', header=None)
1✔
56
        test.trainModel(splits=[80,10,10])
1✔
57

58
        self.assertEqual((15-6), test.num_of_samples['dataset'])
1✔
59
        self.assertEqual(round((15-6)*80/100),test.run_training_params['n_samples_train'])
1✔
60
        self.assertEqual(round((15-6)*10/100),test.run_training_params['n_samples_val'])
1✔
61
        self.assertEqual(round((15-6)*10/100),test.run_training_params['n_samples_test'])
1✔
62
        self.assertEqual(round((15-6)*80/100),test.run_training_params['train_batch_size'])
1✔
63
        self.assertEqual(1, test.run_training_params['update_per_epochs'])
1✔
64
        self.assertEqual(0, test.run_training_params['unused_samples'])
1✔
65
        self.assertEqual(1,test.run_training_params['val_batch_size'])
1✔
66
        self.assertEqual(1,test.run_training_params['test_batch_size'])
1✔
67
        self.assertEqual(100,test.run_training_params['num_of_epochs'])
1✔
68
        self.assertEqual(0.001,test.run_training_params['optimizer_defaults']['lr'])
1✔
69

70
    def test_build_dataset_batch_connect(self):
1✔
71
        data_x = np.random.rand(500) * 20 - 10
1✔
72
        data_a = 2
1✔
73
        data_b = -3
1✔
74
        dataset = {'in1': data_x, 'out': linear_fun(data_x, data_a, data_b)}
1✔
75

76
        input1 = Input('in1')
1✔
77
        out = Input('out')
1✔
78
        rel1 = Fir(input1.tw(0.05))
1✔
79
        y = Output('y', rel1)
1✔
80

81
        test = Modely(visualizer=None, seed=42)
1✔
82
        test.addModel('y',y)
1✔
83
        test.addMinimize('pos', out.next(), y)
1✔
84
        test.neuralizeModel(0.01)
1✔
85

86
        test.loadData(name='dataset',source=dataset)
1✔
87

88
        training_params = {}
1✔
89
        training_params['train_batch_size'] = 4
1✔
90
        training_params['val_batch_size'] = 4
1✔
91
        training_params['test_batch_size'] = 1
1✔
92
        training_params['lr'] = 0.1
1✔
93
        training_params['num_of_epochs'] = 5
1✔
94
        test.trainModel(splits=[70,20,10], closed_loop={'in1':'y'}, prediction_samples=5, step=1, training_params = training_params)
1✔
95

96
        self.assertEqual(346,test.run_training_params['n_samples_train']) ## ((500 - 5) * 0.7)  = 346
1✔
97
        self.assertEqual(99,test.run_training_params['n_samples_val']) ## ((500 - 5) * 0.2)  = 99
1✔
98
        self.assertEqual(50,test.run_training_params['n_samples_test']) ## ((500 - 5) * 0.1)  = 50
1✔
99
        self.assertEqual(495,test.num_of_samples['dataset'])
1✔
100
        self.assertEqual(4,test.run_training_params['train_batch_size'])
1✔
101
        self.assertEqual(4,test.run_training_params['val_batch_size'])
1✔
102
        self.assertEqual(1,test.run_training_params['test_batch_size'])
1✔
103
        self.assertEqual(5,test.run_training_params['num_of_epochs'])
1✔
104
        self.assertEqual(5, test.run_training_params['prediction_samples'])
1✔
105
        self.assertEqual(1, test.run_training_params['step'])
1✔
106
        self.assertEqual({'in1':'y'}, test.run_training_params['closed_loop'])
1✔
107
        self.assertEqual(0.1,test.run_training_params['optimizer_defaults']['lr'])
1✔
108

109
        n_samples = test.run_training_params['n_samples_train']
1✔
110
        batch_size = test.run_training_params['train_batch_size']
1✔
111
        prediction_samples = test.run_training_params['prediction_samples']
1✔
112
        step = test.run_training_params['step']
1✔
113
        list_of_batch_indexes = range(0, n_samples - batch_size - prediction_samples + 1, (batch_size + step - 1))
1✔
114
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
115
        #n_samples - list_of_batch_indexes[-1] - batch_size - prediction_samples
116
        self.assertEqual(1, test.run_training_params['unused_samples'])
1✔
117

118
    def test_recurrent_train_closed_loop(self):
1✔
119
        data_x = np.random.rand(500) * 20 - 10
1✔
120
        data_a = 2
1✔
121
        data_b = -3
1✔
122
        dataset = {'in1': data_x, 'out': linear_fun(data_x, data_a, data_b)}
1✔
123

124
        x = Input('in1')
1✔
125
        p = Parameter('p', dimensions=1, sw=1, values=[[1.0]])
1✔
126
        fir = Fir(parameter=p)(x.last())
1✔
127
        out = Output('out', fir)
1✔
128

129
        test = Modely(visualizer=None, seed=42)
1✔
130
        test.addModel('out',out)
1✔
131
        test.addMinimize('pos', x.next(), out)
1✔
132
        test.neuralizeModel(0.01)
1✔
133

134
        test.loadData(name='dataset',source=dataset)
1✔
135

136
        training_params = {}
1✔
137
        training_params['train_batch_size'] = 4
1✔
138
        training_params['val_batch_size'] = 4
1✔
139
        training_params['test_batch_size'] = 1
1✔
140
        training_params['lr'] = 0.1
1✔
141
        training_params['num_of_epochs'] = 50
1✔
142

143
        test.trainModel(splits=[100,0,0], closed_loop={'in1':'out'}, prediction_samples=3, step=1, training_params = training_params)
1✔
144

145
        self.assertEqual((len(data_x)-1)*100/100,test.run_training_params['n_samples_train']) ## ((500 - 1) * 1)  = 499
1✔
146
        self.assertEqual(0,test.run_training_params['n_samples_val']) ## ((500 - 5) * 0)  = 0
1✔
147
        self.assertEqual(0,test.run_training_params['n_samples_test']) ## ((500 - 5) * 0)  = 0
1✔
148
        self.assertEqual((len(data_x)-1)*100/100,test.num_of_samples['dataset'])
1✔
149
        self.assertEqual(4,test.run_training_params['train_batch_size'])
1✔
150
        self.assertEqual(0,test.run_training_params['val_batch_size'])
1✔
151
        self.assertEqual(0,test.run_training_params['test_batch_size'])
1✔
152
        self.assertEqual(50,test.run_training_params['num_of_epochs'])
1✔
153
        self.assertEqual(3, test.run_training_params['prediction_samples'])
1✔
154
        self.assertEqual(1, test.run_training_params['step'])
1✔
155
        self.assertEqual({'in1':'out'}, test.run_training_params['closed_loop'])
1✔
156
        self.assertEqual(0.1,test.run_training_params['optimizer_defaults']['lr'])
1✔
157

158
        n_samples = test.run_training_params['n_samples_train']
1✔
159
        batch_size = test.run_training_params['train_batch_size']
1✔
160
        prediction_samples = test.run_training_params['prediction_samples']
1✔
161
        step = test.run_training_params['step']
1✔
162
        list_of_batch_indexes = range(0, n_samples - batch_size - prediction_samples + 1, (batch_size + step - 1))
1✔
163
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
164
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size - prediction_samples, test.run_training_params['unused_samples'])
1✔
165

166
    def test_recurrent_train_single_close_loop(self):
1✔
167
        data_x = np.array(list(range(1, 101, 1)), dtype=np.float32)
1✔
168
        dataset = {'x': data_x, 'y': 2 * data_x}
1✔
169

170
        x = Input('x')
1✔
171
        y = Input('y')
1✔
172
        out = Output('out', Fir(x.last()))
1✔
173

174
        test = Modely(visualizer=None, seed=42)
1✔
175
        test.addModel('out', out)
1✔
176
        test.addMinimize('pos', y.last(), out)
1✔
177
        test.neuralizeModel(0.01)
1✔
178

179
        test.loadData(name='dataset', source=dataset)
1✔
180

181
        training_params = {}
1✔
182
        training_params['train_batch_size'] = 4
1✔
183
        training_params['val_batch_size'] = 4
1✔
184
        training_params['test_batch_size'] = 1
1✔
185
        training_params['lr'] = 0.01
1✔
186
        training_params['num_of_epochs'] = 50
1✔
187
        test.trainModel(splits=[80, 20, 0], closed_loop={'x': 'out'}, prediction_samples=3, step=3,
1✔
188
                        training_params=training_params)
189

190
        self.assertEqual(round((len(data_x) - 0) * 80 / 100), test.run_training_params['n_samples_train'])
1✔
191
        self.assertEqual((len(data_x) - 0) * 20 / 100, test.run_training_params['n_samples_val'])
1✔
192
        self.assertEqual(0, test.run_training_params['n_samples_test'])
1✔
193
        self.assertEqual((len(data_x) - 0) * 100 / 100, test.num_of_samples['dataset'])
1✔
194
        self.assertEqual(4, test.run_training_params['train_batch_size'])
1✔
195
        self.assertEqual(4, test.run_training_params['val_batch_size'])
1✔
196
        self.assertEqual(0, test.run_training_params['test_batch_size'])
1✔
197
        self.assertEqual(50, test.run_training_params['num_of_epochs'])
1✔
198
        self.assertEqual(3, test.run_training_params['prediction_samples'])
1✔
199
        self.assertEqual(3, test.run_training_params['step'])
1✔
200
        self.assertEqual({'x': 'out'}, test.run_training_params['closed_loop'])
1✔
201
        self.assertEqual(0.01, test.run_training_params['optimizer_defaults']['lr'])
1✔
202

203
        n_samples = test.run_training_params['n_samples_train']
1✔
204
        batch_size = test.run_training_params['train_batch_size']
1✔
205
        prediction_samples = test.run_training_params['prediction_samples']
1✔
206
        step = test.run_training_params['step']
1✔
207
        list_of_batch_indexes = range(0, n_samples - batch_size - prediction_samples + 1, (batch_size + step - 1))
1✔
208
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
209
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size - prediction_samples, test.run_training_params['unused_samples'])
1✔
210

211
    def test_recurrent_train_multiple_close_loop(self):
1✔
212
        data_x = np.array(list(range(1, 101, 1)), dtype=np.float32)
1✔
213
        dataset = {'x': data_x, 'y': 2 * data_x}
1✔
214

215
        x = Input('x')
1✔
216
        y = Input('y')
1✔
217
        out_x = Output('out_x', Fir(x.last()))
1✔
218
        out_y = Output('out_y', Fir(y.last()))
1✔
219

220
        test = Modely(visualizer=None, seed=42)
1✔
221
        test.addModel('out_x', out_x)
1✔
222
        test.addModel('out_y', out_y)
1✔
223
        test.addMinimize('pos_x', x.next(), out_x)
1✔
224
        test.addMinimize('pos_y', y.next(), out_y)
1✔
225
        test.neuralizeModel(0.01)
1✔
226

227
        test.loadData(name='dataset', source=dataset)
1✔
228

229
        training_params = {}
1✔
230
        training_params['train_batch_size'] = 4
1✔
231
        training_params['val_batch_size'] = 4
1✔
232
        training_params['test_batch_size'] = 1
1✔
233
        training_params['lr'] = 0.01
1✔
234
        training_params['num_of_epochs'] = 32
1✔
235

236
        test.trainModel(splits=[80, 20, 0], closed_loop={'x': 'out_x', 'y': 'out_y'}, prediction_samples=3, step=1,
1✔
237
                        training_params=training_params)
238

239
        self.assertEqual(round((len(data_x) - 1) * 80 / 100), test.run_training_params['n_samples_train'])
1✔
240
        self.assertEqual(round((len(data_x) - 1) * 20 / 100), test.run_training_params['n_samples_val'])
1✔
241
        self.assertEqual(0, test.run_training_params['n_samples_test'])
1✔
242
        self.assertEqual((len(data_x) - 1) * 100 / 100, test.num_of_samples['dataset'])
1✔
243
        self.assertEqual(4, test.run_training_params['train_batch_size'])
1✔
244
        self.assertEqual(4, test.run_training_params['val_batch_size'])
1✔
245
        self.assertEqual(0, test.run_training_params['test_batch_size'])
1✔
246
        self.assertEqual(32, test.run_training_params['num_of_epochs'])
1✔
247
        self.assertEqual(3, test.run_training_params['prediction_samples'])
1✔
248
        self.assertEqual(1, test.run_training_params['step'])
1✔
249
        self.assertEqual({'x': 'out_x', 'y': 'out_y'}, test.run_training_params['closed_loop'])
1✔
250
        self.assertEqual(0.01, test.run_training_params['optimizer_defaults']['lr'])
1✔
251

252
        n_samples = test.run_training_params['n_samples_train']
1✔
253
        batch_size = test.run_training_params['train_batch_size']
1✔
254
        prediction_samples = test.run_training_params['prediction_samples']
1✔
255
        step = test.run_training_params['step']
1✔
256
        list_of_batch_indexes = range(0, n_samples - batch_size - prediction_samples + 1, (batch_size + step - 1))
1✔
257
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
258
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size - prediction_samples, test.run_training_params['unused_samples'])
1✔
259

260
        # print('test before train: ', test(inputs={'x': [100, 101, 102, 103, 104], 'y': [200, 202, 204, 206, 208]}))
261
        # print('test after train: ', test(inputs={'x': [100, 101, 102, 103, 104], 'y': [200, 202, 204, 206, 208]}))
262

263
    def test_build_dataset_batch(self):
1✔
264
        input1 = Input('in1')
1✔
265
        output = Input('out')
1✔
266
        rel1 = Fir(input1.tw(0.05))
1✔
267

268
        test = Modely(visualizer=None)
1✔
269
        test.addMinimize('out', output.z(-1), rel1)
1✔
270
        test.neuralizeModel(0.01)
1✔
271

272
        data_struct = ['x','F','x2','y2','','A1x','A1y','B1x','B1y','','A2x','A2y','B2x','out','','x3','in1','in2','time']
1✔
273
        test.loadData(name='dataset', source=data_folder, format=data_struct, skiplines=4, delimiter='\t', header=None)
1✔
274
        self.assertEqual((10,5,1),test.data['dataset']['in1'].shape)
1✔
275

276
        training_params = {}
1✔
277
        training_params['train_batch_size'] = 1
1✔
278
        training_params['val_batch_size'] = 1
1✔
279
        training_params['test_batch_size'] = 1
1✔
280
        training_params['lr'] = 0.1
1✔
281
        training_params['num_of_epochs'] = 5
1✔
282
        test.trainModel(splits=[70,20,10],training_params = training_params)
1✔
283

284
        # 15 lines in the dataset
285
        # 5 lines for input + 1 for output -> total of sample 10
286
        # 10 / 1 * 0.7 = 7 for training
287
        # 10 / 1 * 0.2 = 2 for validation
288
        # 10 / 1 * 0.1 = 1 for test
289

290
        self.assertEqual(7,test.run_training_params['n_samples_train'])
1✔
291
        self.assertEqual(2,test.run_training_params['n_samples_val'])
1✔
292
        self.assertEqual(1,test.run_training_params['n_samples_test'])
1✔
293
        self.assertEqual(10,test.num_of_samples['dataset'])
1✔
294
        self.assertEqual(1,test.run_training_params['train_batch_size'])
1✔
295
        self.assertEqual(1,test.run_training_params['val_batch_size'])
1✔
296
        self.assertEqual(1,test.run_training_params['test_batch_size'])
1✔
297
        self.assertEqual(5,test.run_training_params['num_of_epochs'])
1✔
298
        self.assertEqual(0.1,test.run_training_params['optimizer_defaults']['lr'])
1✔
299

300
        n_samples = test.run_training_params['n_samples_train']
1✔
301
        batch_size = test.run_training_params['train_batch_size']
1✔
302
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
303
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
304
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
305

306
    def test_build_dataset_batch2(self):
1✔
307
        input1 = Input('in1')
1✔
308
        output = Input('out')
1✔
309
        rel1 = Fir(input1.tw(0.05))
1✔
310

311
        test = Modely(visualizer=None)
1✔
312
        test.addMinimize('out', output.z(-1), rel1)
1✔
313
        test.neuralizeModel(0.01)
1✔
314

315
        data_struct = ['x','F','x2','y2','','A1x','A1y','B1x','B1y','','A2x','A2y','B2x','out','','x3','in1','in2','time']
1✔
316
        test.loadData(name='dataset',source=data_folder, format=data_struct, skiplines=4, delimiter='\t', header=None)
1✔
317
        self.assertEqual((10,5,1),test.data['dataset']['in1'].shape)
1✔
318

319
        training_params = {}
1✔
320
        training_params['train_batch_size'] = 25
1✔
321
        training_params['val_batch_size'] = 25
1✔
322
        training_params['test_batch_size'] = 25
1✔
323
        training_params['lr'] = 0.1
1✔
324
        training_params['num_of_epochs'] = 5
1✔
325
        test.trainModel(splits=[50,0,50],training_params = training_params)
1✔
326

327
        # 15 lines in the dataset
328
        # 5 lines for input + 1 for output -> total of sample 10
329
        # batch_size > 5 use batch_size = 1
330
        # 10 / 1 * 0.5 = 5 for training
331
        # 10 / 1 * 0.0 = 0 for validation
332
        # 10 / 1 * 0.5 = 5 for test
333
        self.assertEqual((15 - 5), test.num_of_samples['dataset'])
1✔
334
        self.assertEqual(round((15 - 5) * 50 / 100), test.run_training_params['n_samples_train'])
1✔
335
        self.assertEqual(round((15 - 5) * 0 / 100), test.run_training_params['n_samples_val'])
1✔
336
        self.assertEqual(round((15 - 5) * 50 / 100), test.run_training_params['n_samples_test'])
1✔
337
        self.assertEqual(round((15 - 5) * 50 / 100), test.run_training_params['train_batch_size'])
1✔
338
        self.assertEqual(0, test.run_training_params['val_batch_size'])
1✔
339
        self.assertEqual(round((15 - 5) * 50 / 100), test.run_training_params['test_batch_size'])
1✔
340
        self.assertEqual(5, test.run_training_params['num_of_epochs'])
1✔
341
        self.assertEqual(0.1, test.run_training_params['optimizer_defaults']['lr'])
1✔
342

343
        n_samples = test.run_training_params['n_samples_train']
1✔
344
        batch_size = test.run_training_params['train_batch_size']
1✔
345
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, (batch_size - 1))
1✔
346
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
347
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
348

349
    def test_build_dataset_batch3(self):
1✔
350
        input1 = Input('in1')
1✔
351
        output = Input('out')
1✔
352
        rel1 = Fir(input1.tw(0.05))
1✔
353

354
        test = Modely(visualizer=None)
1✔
355
        test.addMinimize('out', output.next(), rel1)
1✔
356
        test.neuralizeModel(0.01)
1✔
357

358
        data_struct = ['x', 'F', 'x2', 'y2', '', 'A1x', 'A1y', 'B1x', 'B1y', '', 'A2x', 'A2y', 'B2x', 'out', '', 'x3',
1✔
359
                       'in1', 'in2', 'time']
360
        test.loadData(name='dataset', source=data_folder, format=data_struct, skiplines=4, delimiter='\t', header=None)
1✔
361
        self.assertEqual((10, 5, 1), test.data['dataset']['in1'].shape)
1✔
362

363
        training_params = {}
1✔
364
        training_params['train_batch_size'] = 2
1✔
365
        training_params['val_batch_size'] = 2
1✔
366
        training_params['test_batch_size'] = 2
1✔
367
        training_params['lr'] = 0.1
1✔
368
        training_params['num_of_epochs'] = 5
1✔
369
        test.trainModel(splits=[40, 30, 30], training_params=training_params)
1✔
370

371
        # 15 lines in the dataset
372
        # 5 lines for input + 1 for output -> total of sample 10
373
        # batch_size > 5 -> NO
374
        # num_of_training_sample must be multiple of batch_size
375
        # num_of_test_sample must be multiple of batch_size and at least 50%
376
        # 10 * 0.4 = 2 for training
377
        # 10 * 0.3 = 1 for validation
378
        # 10 * 0.3 = 1 for test
379
        self.assertEqual((15 - 5), test.num_of_samples['dataset'])
1✔
380
        self.assertEqual(round((15 - 5) * 40 / 100), test.run_training_params['n_samples_train'])
1✔
381
        self.assertEqual(round((15 - 5) * 30 / 100), test.run_training_params['n_samples_val'])
1✔
382
        self.assertEqual(round((15 - 5) * 30 / 100), test.run_training_params['n_samples_test'])
1✔
383
        self.assertEqual(2, test.run_training_params['train_batch_size'])
1✔
384
        self.assertEqual(2, test.run_training_params['val_batch_size'])
1✔
385
        self.assertEqual(2, test.run_training_params['test_batch_size'])
1✔
386
        self.assertEqual(5, test.run_training_params['num_of_epochs'])
1✔
387
        self.assertEqual(0.1, test.run_training_params['optimizer_defaults']['lr'])
1✔
388

389
        n_samples = test.run_training_params['n_samples_train']
1✔
390
        batch_size = test.run_training_params['train_batch_size']
1✔
391
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
392
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
393
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
394

395
    def test_build_dataset_batch4(self):
1✔
396
        input1 = Input('in1')
1✔
397
        output = Input('out')
1✔
398
        rel1 = Fir(input1.tw(0.05))
1✔
399

400
        test = Modely(visualizer=None)
1✔
401
        test.addMinimize('out', output.z(-1), rel1)
1✔
402
        test.neuralizeModel(0.01)
1✔
403

404
        data_struct = ['x', 'F', 'x2', 'y2', '', 'A1x', 'A1y', 'B1x', 'B1y', '', 'A2x', 'A2y', 'B2x', 'out', '', 'x3',
1✔
405
                       'in1', 'in2', 'time']
406
        test.loadData(name='dataset', source=data_folder, format=data_struct, skiplines=4, delimiter='\t', header=None)
1✔
407
        self.assertEqual((10, 5, 1), test.data['dataset']['in1'].shape)
1✔
408

409
        training_params = {}
1✔
410
        training_params['train_batch_size'] = 2
1✔
411
        training_params['val_batch_size'] = 2
1✔
412
        training_params['test_batch_size'] = 2
1✔
413
        training_params['lr'] = 0.1
1✔
414
        training_params['num_of_epochs'] = 5
1✔
415
        test.trainModel(splits=[80, 10, 10], training_params=training_params)
1✔
416

417
        # 15 lines in the dataset
418
        # 5 lines for input + 1 for output -> total of sample 10
419
        # batch_size > 1 -> YES
420
        # num_of_training_sample must be multiple of batch_size
421
        # num_of_test_sample must be multiple of batch_size and at least 10%
422
        # 10 * 0.8 = 8 for training
423
        # 10 * 0.1 = 1 for validation
424
        # 10 * 0.1 = 1 for test
425
        self.assertEqual((15 - 5), test.num_of_samples['dataset'])
1✔
426
        self.assertEqual(round((15 - 5) * 80 / 100), test.run_training_params['n_samples_train'])
1✔
427
        self.assertEqual(round((15 - 5) * 10 / 100), test.run_training_params['n_samples_val'])
1✔
428
        self.assertEqual(round((15 - 5) * 10 / 100), test.run_training_params['n_samples_test'])
1✔
429
        self.assertEqual(2, test.run_training_params['train_batch_size'])
1✔
430
        self.assertEqual(1, test.run_training_params['val_batch_size'])
1✔
431
        self.assertEqual(1, test.run_training_params['test_batch_size'])
1✔
432
        self.assertEqual(5, test.run_training_params['num_of_epochs'])
1✔
433
        self.assertEqual(0.1, test.run_training_params['optimizer_defaults']['lr'])
1✔
434

435
        n_samples = test.run_training_params['n_samples_train']
1✔
436
        batch_size = test.run_training_params['train_batch_size']
1✔
437
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
438
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
439
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
440

441
    def test_build_dataset_from_code(self):
1✔
442
        input1 = Input('in1')
1✔
443
        output = Input('out')
1✔
444
        rel1 = Fir(input1.tw(0.05))
1✔
445

446
        test = Modely(visualizer=None)
1✔
447
        test.addMinimize('out', output.next(), rel1)
1✔
448
        test.neuralizeModel(0.01)
1✔
449

450
        x_size = 20
1✔
451
        data_x = np.random.rand(x_size) * 20 - 10
1✔
452
        data_a = 2
1✔
453
        data_b = -3
1✔
454
        dataset = {'in1': data_x, 'out': data_x * data_a + data_b}
1✔
455

456
        test.loadData(name='dataset', source=dataset, skiplines=0)
1✔
457
        self.assertEqual((15, 5, 1),
1✔
458
                         test.data['dataset']['in1'].shape)  ## 20 data - 5 tw = 15 sample | 0.05/0.01 = 5 in1
459

460
        training_params = {}
1✔
461
        training_params['train_batch_size'] = 2
1✔
462
        training_params['val_batch_size'] = 2
1✔
463
        training_params['test_batch_size'] = 2
1✔
464
        training_params['lr'] = 0.1
1✔
465
        training_params['num_of_epochs'] = 5
1✔
466
        test.trainModel(splits=[80, 20, 0], training_params=training_params)
1✔
467

468
        # 20 lines in the dataset
469
        # 5 lines for input + 1 for output -> total of sample (20 - 5 - 1) = 16
470
        # batch_size > 1 -> YES
471
        # num_of_training_sample must be multiple of batch_size
472
        # num_of_test_sample must be multiple of batch_size and at least 10%
473
        # 15 * 0.8 = 12 for training
474
        # 15 * 0.2 = 3 for validation
475
        # 15 * 0.0 = 0 for test
476
        self.assertEqual((20 - 5), test.num_of_samples['dataset'])
1✔
477
        self.assertEqual(round((20 - 5) * 80 / 100), test.run_training_params['n_samples_train'])
1✔
478
        self.assertEqual(round((20 - 5) * 20 / 100), test.run_training_params['n_samples_val'])
1✔
479
        self.assertEqual(round((20 - 5) * 0 / 100), test.run_training_params['n_samples_test'])
1✔
480
        self.assertEqual(2, test.run_training_params['train_batch_size'])
1✔
481
        self.assertEqual(2, test.run_training_params['val_batch_size'])
1✔
482
        self.assertEqual(0, test.run_training_params['test_batch_size'])
1✔
483
        self.assertEqual(5, test.run_training_params['num_of_epochs'])
1✔
484
        self.assertEqual(0.1, test.run_training_params['optimizer_defaults']['lr'])
1✔
485

486
        n_samples = test.run_training_params['n_samples_train']
1✔
487
        batch_size = test.run_training_params['train_batch_size']
1✔
488
        list_of_batch_indexes = range(0, (n_samples - batch_size + 1), batch_size)
1✔
489
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
490
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
491

492
    def test_network_multi_dataset(self):
1✔
493
        train_folder = os.path.join(os.path.dirname(__file__), 'data/')
1✔
494
        val_folder = os.path.join(os.path.dirname(__file__), 'val_data/')
1✔
495
        test_folder = os.path.join(os.path.dirname(__file__), 'test_data/')
1✔
496

497
        x = Input('x')  # Position
1✔
498
        F = Input('F')  # Force
1✔
499

500
        # List the output of the model
501
        x_z = Output('x_z', Fir(x.tw(0.3)) + Fir(F.last()))
1✔
502

503
        # Add the neural model to the nnodely structure and neuralization of the model
504
        test = Modely(visualizer=None)
1✔
505
        test.addModel('x_z', x_z)
1✔
506
        test.addMinimize('next-pos', x.z(-1), x_z, 'mse')
1✔
507

508
        # Create the neural network
509
        test.neuralizeModel(sample_time=0.05)  # The sampling time depends to the dataset
1✔
510

511
        # Data load
512
        data_struct = ['x', 'F', 'x2', 'y2', '', 'A1x', 'A1y', 'B1x', 'B1y', '', 'A2x', 'A2y', 'B2x', 'out', '', 'x3',
1✔
513
                       'in1', 'in2', 'time']
514
        test.loadData(name='train_dataset', source=train_folder, format=data_struct, skiplines=4, delimiter='\t',
1✔
515
                      header=None)
516
        test.loadData(name='validation_dataset', source=val_folder, format=data_struct, skiplines=4, delimiter='\t',
1✔
517
                      header=None)
518
        test.loadData(name='test_dataset', source=test_folder, format=data_struct, skiplines=4, delimiter='\t',
1✔
519
                      header=None)
520

521
        training_params = {}
1✔
522
        training_params['train_batch_size'] = 3
1✔
523
        training_params['val_batch_size'] = 2
1✔
524
        training_params['test_batch_size'] = 1
1✔
525
        training_params['lr'] = 0.1
1✔
526
        training_params['num_of_epochs'] = 5
1✔
527
        test.trainModel(train_dataset='train_dataset', validation_dataset='validation_dataset',
1✔
528
                        test_dataset='test_dataset', training_params=training_params)
529

530
        self.assertEqual(9, test.num_of_samples['train_dataset'])
1✔
531
        self.assertEqual(5, test.num_of_samples['validation_dataset'])
1✔
532
        self.assertEqual(7, test.num_of_samples['test_dataset'])
1✔
533
        self.assertEqual(9, test.run_training_params['n_samples_train'])
1✔
534
        self.assertEqual(5, test.run_training_params['n_samples_val'])
1✔
535
        self.assertEqual(7, test.run_training_params['n_samples_test'])
1✔
536
        self.assertEqual(3, test.run_training_params['train_batch_size'])
1✔
537
        self.assertEqual(2, test.run_training_params['val_batch_size'])
1✔
538
        self.assertEqual(1, test.run_training_params['test_batch_size'])
1✔
539
        self.assertEqual(5, test.run_training_params['num_of_epochs'])
1✔
540
        self.assertEqual(0.1, test.run_training_params['optimizer_defaults']['lr'])
1✔
541

542
        n_samples = test.run_training_params['n_samples_train']
1✔
543
        batch_size = test.run_training_params['train_batch_size']
1✔
544
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
545
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
546
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
547

548
    def test_train_vector_input(self):
1✔
549
        x = Input('x', dimensions=4)
1✔
550
        y = Input('y', dimensions=3)
1✔
551
        k = Input('k', dimensions=2)
1✔
552
        w = Input('w')
1✔
553

554
        out = Output('out', Fir(Linear(Linear(3)(x.tw(0.02)) + y.tw(0.02))))
1✔
555
        out2 = Output('out2', Fir(Linear(k.last() + Fir(2)(w.tw(0.05, offset=-0.02)))))
1✔
556

557
        test = Modely(visualizer=None)
1✔
558
        test.addMinimize('out', out, out2)
1✔
559
        test.neuralizeModel(0.01)
1✔
560

561
        data_folder = os.path.join(os.path.dirname(__file__), 'vector_data/')
1✔
562
        data_struct = ['x', 'y', '', '', '', '', 'k', '', '', '', 'w']
1✔
563
        test.loadData(name='dataset', source=data_folder, format=data_struct, skiplines=1, delimiter='\t', header=None)
1✔
564

565
        training_params = {}
1✔
566
        training_params['train_batch_size'] = 1
1✔
567
        training_params['val_batch_size'] = 1
1✔
568
        training_params['test_batch_size'] = 1
1✔
569
        training_params['lr'] = 0.01
1✔
570
        training_params['num_of_epochs'] = 7
1✔
571
        test.trainModel(splits=[80, 10, 10], training_params=training_params)
1✔
572

573
        self.assertEqual(22, test.num_of_samples['dataset'])
1✔
574
        self.assertEqual(18, test.run_training_params['n_samples_train'])
1✔
575
        self.assertEqual(2, test.run_training_params['n_samples_val'])
1✔
576
        self.assertEqual(2, test.run_training_params['n_samples_test'])
1✔
577
        self.assertEqual(1, test.run_training_params['train_batch_size'])
1✔
578
        self.assertEqual(1, test.run_training_params['val_batch_size'])
1✔
579
        self.assertEqual(1, test.run_training_params['test_batch_size'])
1✔
580
        self.assertEqual(7, test.run_training_params['num_of_epochs'])
1✔
581
        self.assertEqual(0.01, test.run_training_params['optimizer_defaults']['lr'])
1✔
582

583
        n_samples = test.run_training_params['n_samples_train']
1✔
584
        batch_size = test.run_training_params['train_batch_size']
1✔
585
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
586
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
587
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
588

589
        training_params = {}
1✔
590
        training_params['train_batch_size'] = 6
1✔
591
        training_params['val_batch_size'] = 2
1✔
592
        training_params['test_batch_size'] = 2
1✔
593
        test.trainModel(splits=[80, 10, 10], training_params=training_params)
1✔
594

595
        self.assertEqual(22, test.num_of_samples['dataset'])
1✔
596
        self.assertEqual(18, test.run_training_params['n_samples_train'])
1✔
597
        self.assertEqual(2, test.run_training_params['n_samples_val'])
1✔
598
        self.assertEqual(2, test.run_training_params['n_samples_test'])
1✔
599
        self.assertEqual(6, test.run_training_params['train_batch_size'])
1✔
600
        self.assertEqual(2, test.run_training_params['val_batch_size'])
1✔
601
        self.assertEqual(2, test.run_training_params['test_batch_size'])
1✔
602
        self.assertEqual(100, test.run_training_params['num_of_epochs'])
1✔
603
        self.assertEqual(0.001, test.run_training_params['optimizer_defaults']['lr'])
1✔
604

605
        n_samples = test.run_training_params['n_samples_train']
1✔
606
        batch_size = test.run_training_params['train_batch_size']
1✔
607
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
608
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
609
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
610

611
    def test_optimizer_configuration(self):
1✔
612
        ## Model1
613
        input1 = Input('in1')
1✔
614
        a = Parameter('a', dimensions=1, tw=0.05, values=[[1], [1], [1], [1], [1]])
1✔
615
        shared_w = Parameter('w', values=[[5]])
1✔
616
        output1 = Output('out1',
1✔
617
                         Fir(parameter=a)(input1.tw(0.05)) + ParamFun(funIn, parameters={'w': shared_w})(
618
                             input1.last()))
619

620
        test = Modely(visualizer=None, seed=42)
1✔
621
        test.addModel('model1', output1)
1✔
622
        test.addMinimize('error1', input1.last(), output1)
1✔
623

624
        ## Model2
625
        input2 = Input('in2')
1✔
626
        b = Parameter('b', dimensions=1, tw=0.05, values=[[1], [1], [1], [1], [1]])
1✔
627
        output2 = Output('out2',
1✔
628
                         Fir(parameter=b)(input2.tw(0.05)) + ParamFun(funOut, parameters={'w': shared_w})(
629
                             input2.last()))
630

631
        test.addModel('model2', output2)
1✔
632
        test.addMinimize('error2', input2.last(), output2)
1✔
633
        test.neuralizeModel(0.01)
1✔
634

635
        # Dataset for train
636
        data_in1 = np.linspace(0, 5, 60)
1✔
637
        data_in2 = np.linspace(10, 15, 60)
1✔
638
        data_out1 = 2
1✔
639
        data_out2 = -3
1✔
640
        dataset = {'in1': data_in1, 'in2': data_in2, 'out1': data_in1 * data_out1, 'out2': data_in2 * data_out2}
1✔
641
        test.loadData(name='dataset1', source=dataset)
1✔
642

643
        data_in1 = np.linspace(0, 5, 100)
1✔
644
        data_in2 = np.linspace(10, 15, 100)
1✔
645
        data_out1 = 2
1✔
646
        data_out2 = -3
1✔
647
        dataset = {'in1': data_in1, 'in2': data_in2, 'out1': data_in1 * data_out1, 'out2': data_in2 * data_out2}
1✔
648
        test.loadData(name='dataset2', source=dataset)
1✔
649

650
        # Optimizer
651
        # Basic usage
652
        # Standard optimizer with standard configuration
653
        # We train all the models with split [70,20,10], lr =0.01 and epochs = 100
654
        # TODO if more than one dataset is loaded I use all the dataset
655
        test.trainModel()
1✔
656
        self.assertEqual(['model1', 'model2'], test.run_training_params['models'])
1✔
657
        self.assertEqual(39, test.run_training_params['n_samples_train'])
1✔
658
        self.assertEqual(11, test.run_training_params['n_samples_val'])
1✔
659
        self.assertEqual(6, test.run_training_params['n_samples_test'])
1✔
660
        self.assertEqual(100, test.run_training_params['num_of_epochs'])
1✔
661
        self.assertEqual(0.001, test.run_training_params['optimizer_defaults']['lr'])
1✔
662

663
        n_samples = test.run_training_params['n_samples_train']
1✔
664
        batch_size = test.run_training_params['train_batch_size']
1✔
665
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
666
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
667
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
668

669
        # We train only model1 with split [100,0,0]
670
        # TODO Learning rate automoatically optimized based on the mean and variance of the output
671
        # TODO num_of_epochs automatically defined
672
        # now is 0.001 for learning rate and 100 for the epochs and optimizer Adam
673
        test.trainModel(models='model1', splits=[100, 0, 0])
1✔
674
        self.assertEqual('model1', test.run_training_params['models'])
1✔
675
        self.assertEqual(100, test.run_training_params['num_of_epochs'])
1✔
676
        self.assertEqual(56, test.run_training_params['n_samples_train'])
1✔
677
        self.assertEqual(0, test.run_training_params['n_samples_val'])
1✔
678
        self.assertEqual(0, test.run_training_params['n_samples_test'])
1✔
679

680
        n_samples = test.run_training_params['n_samples_train']
1✔
681
        batch_size = test.run_training_params['train_batch_size']
1✔
682
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
683
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
684
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
685

686
        # Set number of epoch and learning rate via parameters it works only for standard parameters
687
        test.trainModel(models='model1', splits=[100, 0, 0], lr=0.5, num_of_epochs=5)
1✔
688
        self.assertEqual('model1', test.run_training_params['models'])
1✔
689
        self.assertEqual(5, test.run_training_params['num_of_epochs'])
1✔
690
        self.assertEqual(56, test.run_training_params['n_samples_train'])
1✔
691
        self.assertEqual(0, test.run_training_params['n_samples_val'])
1✔
692
        self.assertEqual(0, test.run_training_params['n_samples_test'])
1✔
693
        self.assertEqual(0.5, test.run_training_params['optimizer_defaults']['lr'])
1✔
694

695
        n_samples = test.run_training_params['n_samples_train']
1✔
696
        batch_size = test.run_training_params['train_batch_size']
1✔
697
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
698
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
699
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
700

701
        # Set number of epoch and learning rate via parameters it works only for standard parameters and use two different dataset one for train and one for validation
702
        test.trainModel(models='model1', train_dataset='dataset1', validation_dataset='dataset2', lr=0.6,
1✔
703
                        num_of_epochs=10)
704
        self.assertEqual('model1', test.run_training_params['models'])
1✔
705
        self.assertEqual(10, test.run_training_params['num_of_epochs'])
1✔
706
        self.assertEqual(56, test.run_training_params['n_samples_train'])
1✔
707
        self.assertEqual(96, test.run_training_params['n_samples_val'])
1✔
708
        self.assertEqual(0, test.run_training_params['n_samples_test'])
1✔
709
        self.assertEqual(0.6, test.run_training_params['optimizer_defaults']['lr'])
1✔
710

711
        n_samples = test.run_training_params['n_samples_train']
1✔
712
        batch_size = test.run_training_params['train_batch_size']
1✔
713
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
714
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
715
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
716

717
        # Use dictionary for set number of epoch, learning rate, etc.. This configuration works only standard parameters (all the parameters that are input of the trainModel).
718
        training_params = {
1✔
719
            'models': ['model2'],
720
            'splits': [55, 40, 5],
721
            'num_of_epochs': 20,
722
            'lr': 0.7
723
        }
724
        test.trainModel(training_params=training_params)
1✔
725
        self.assertEqual(['model2'], test.run_training_params['models'])
1✔
726
        self.assertEqual(20, test.run_training_params['num_of_epochs'])
1✔
727
        self.assertEqual(round(56 * 55 / 100), test.run_training_params['n_samples_train'])
1✔
728
        self.assertEqual(round(56 * 40 / 100), test.run_training_params['n_samples_val'])
1✔
729
        self.assertEqual(round(56 * 5 / 100), test.run_training_params['n_samples_test'])
1✔
730
        self.assertEqual(0.7, test.run_training_params['optimizer_defaults']['lr'])
1✔
731

732
        n_samples = test.run_training_params['n_samples_train']
1✔
733
        batch_size = test.run_training_params['train_batch_size']
1✔
734
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
735
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
736
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
737

738
        # If I add a function parameter it has the priority
739
        # In this case apply train parameter but on a different model
740
        test.trainModel(models='model1', training_params=training_params)
1✔
741
        self.assertEqual('model1', test.run_training_params['models'])
1✔
742
        self.assertEqual(20, test.run_training_params['num_of_epochs'])
1✔
743
        self.assertEqual(round(56 * 55 / 100), test.run_training_params['n_samples_train'])
1✔
744
        self.assertEqual(round(56 * 40 / 100), test.run_training_params['n_samples_val'])
1✔
745
        self.assertEqual(round(56 * 5 / 100), test.run_training_params['n_samples_test'])
1✔
746
        self.assertEqual(0.7, test.run_training_params['optimizer_defaults']['lr'])
1✔
747

748
        n_samples = test.run_training_params['n_samples_train']
1✔
749
        batch_size = test.run_training_params['train_batch_size']
1✔
750
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
751
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
752
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
753

754
        ##################################
755
        # Modify additional parameters in the optimizer that are not present in the standard parameter
756
        # In this case I modify the learning rate and the betas of the Adam optimizer
757
        # For the optimizer parameter the priority is the following
758
        # max priority to the function parameter ('lr' : 0.2)
759
        # then the standard_optimizer_parameters ('lr' : 0.1)
760
        # finally the standard_train_parameters  ('lr' : 0.5)
761
        optimizer_defaults = {
1✔
762
            'lr': 0.1,
763
            'betas': (0.5, 0.99)
764
        }
765
        test.trainModel(training_params=training_params, optimizer_defaults=optimizer_defaults, lr=0.2)
1✔
766
        self.assertEqual(['model2'], test.run_training_params['models'])
1✔
767
        self.assertEqual(20, test.run_training_params['num_of_epochs'])
1✔
768
        self.assertEqual(round(56 * 55 / 100), test.run_training_params['n_samples_train'])
1✔
769
        self.assertEqual(round(56 * 40 / 100), test.run_training_params['n_samples_val'])
1✔
770
        self.assertEqual(round(56 * 5 / 100), test.run_training_params['n_samples_test'])
1✔
771
        self.assertEqual(0.2, test.run_training_params['optimizer_defaults']['lr'])
1✔
772
        self.assertEqual((0.5, 0.99), test.run_training_params['optimizer_defaults']['betas'])
1✔
773

774
        n_samples = test.run_training_params['n_samples_train']
1✔
775
        batch_size = test.run_training_params['train_batch_size']
1✔
776
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
777
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
778
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
779

780
        test.trainModel(training_params=training_params, optimizer_defaults=optimizer_defaults)
1✔
781
        self.assertEqual(['model2'], test.run_training_params['models'])
1✔
782
        self.assertEqual(20, test.run_training_params['num_of_epochs'])
1✔
783
        self.assertEqual(round(56 * 55 / 100), test.run_training_params['n_samples_train'])
1✔
784
        self.assertEqual(round(56 * 40 / 100), test.run_training_params['n_samples_val'])
1✔
785
        self.assertEqual(round(56 * 5 / 100), test.run_training_params['n_samples_test'])
1✔
786
        self.assertEqual(0.1, test.run_training_params['optimizer_defaults']['lr'])
1✔
787
        self.assertEqual((0.5, 0.99), test.run_training_params['optimizer_defaults']['betas'])
1✔
788

789
        n_samples = test.run_training_params['n_samples_train']
1✔
790
        batch_size = test.run_training_params['train_batch_size']
1✔
791
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
792
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
793
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
794

795
        test.trainModel(training_params=training_params)
1✔
796
        self.assertEqual(['model2'], test.run_training_params['models'])
1✔
797
        self.assertEqual(20, test.run_training_params['num_of_epochs'])
1✔
798
        self.assertEqual(round(56 * 55 / 100), test.run_training_params['n_samples_train'])
1✔
799
        self.assertEqual(round(56 * 40 / 100), test.run_training_params['n_samples_val'])
1✔
800
        self.assertEqual(round(56 * 5 / 100), test.run_training_params['n_samples_test'])
1✔
801
        self.assertEqual(0.7, test.run_training_params['optimizer_defaults']['lr'])
1✔
802

803
        n_samples = test.run_training_params['n_samples_train']
1✔
804
        batch_size = test.run_training_params['train_batch_size']
1✔
805
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
806
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
807
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
808
        ##################################
809

810
        # Modify the non standard args of the optimizer using the optimizer_defaults
811
        # In this case use the SGD with 0.2 of momentum
812
        optimizer_defaults = {
1✔
813
            'momentum': 0.002
814
        }
815
        test.trainModel(optimizer='SGD', training_params=training_params, optimizer_defaults=optimizer_defaults, lr=0.2)
1✔
816
        self.assertEqual(['model2'], test.run_training_params['models'])
1✔
817
        self.assertEqual('SGD', test.run_training_params['optimizer'])
1✔
818
        self.assertEqual(20, test.run_training_params['num_of_epochs'])
1✔
819
        self.assertEqual(round(56 * 55 / 100), test.run_training_params['n_samples_train'])
1✔
820
        self.assertEqual(round(56 * 40 / 100), test.run_training_params['n_samples_val'])
1✔
821
        self.assertEqual(round(56 * 5 / 100), test.run_training_params['n_samples_test'])
1✔
822
        self.assertEqual(0.2, test.run_training_params['optimizer_defaults']['lr'])
1✔
823
        self.assertEqual(0.002, test.run_training_params['optimizer_defaults']['momentum'])
1✔
824

825
        n_samples = test.run_training_params['n_samples_train']
1✔
826
        batch_size = test.run_training_params['train_batch_size']
1✔
827
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
828
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
829
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
830

831
        # Modify standard optimizer parameter for each training parameter
832
        training_params = {
1✔
833
            'models': ['model1'],
834
            'splits': [100, 0, 0],
835
            'num_of_epochs': 30,
836
            'lr': 0.5,
837
            'lr_param': {'a': 0.1}
838
        }
839
        test.trainModel(training_params=training_params)
1✔
840
        self.assertEqual(['model1'], test.run_training_params['models'])
1✔
841
        self.assertEqual(30, test.run_training_params['num_of_epochs'])
1✔
842
        self.assertEqual(round(56 * 100 / 100), test.run_training_params['n_samples_train'])
1✔
843
        self.assertEqual(round(56 * 0 / 100), test.run_training_params['n_samples_val'])
1✔
844
        self.assertEqual(round(56 * 0 / 100), test.run_training_params['n_samples_test'])
1✔
845
        self.assertEqual(0.5, test.run_training_params['optimizer_defaults']['lr'])
1✔
846
        self.assertEqual([{'lr': 0.1, 'params': 'a'},
1✔
847
                          {'lr': 0.0, 'params': 'b'},
848
                          {'params': 'w'}], test.run_training_params['optimizer_params'])
849

850
        n_samples = test.run_training_params['n_samples_train']
1✔
851
        batch_size = test.run_training_params['train_batch_size']
1✔
852
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
853
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
854
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
855

856
        ##################################
857
        # Modify standard optimizer parameter for each training parameter using optimizer_params
858
        # The priority is the following
859
        # max priority to the function parameter ( 'lr_param'={'a': 0.4})
860
        # then the optimizer_params ( {'params':'a','lr':0.6} )
861
        # then the optimizer_params inside the train_parameters ( {'params':['a'],'lr':0.7} )
862
        # finally the train_parameters  ( 'lr_param'={'a': 0.1})
863
        training_params = {
1✔
864
            'models': ['model1'],
865
            'splits': [100, 0, 0],
866
            'num_of_epochs': 40,
867
            'lr': 0.5,
868
            'lr_param': {'a': 0.1},
869
            'optimizer_params': [{'params': ['a'], 'lr': 0.7}],
870
            'optimizer_defaults': {'lr': 0.12}
871
        }
872
        optimizer_params = [
1✔
873
            {'params': ['a'], 'lr': 0.6}
874
        ]
875
        optimizer_defaults = {
1✔
876
            'lr': 0.2
877
        }
878
        test.trainModel(training_params=training_params, optimizer_params=optimizer_params,
1✔
879
                        optimizer_defaults=optimizer_defaults, lr_param={'a': 0.4})
880
        self.assertEqual(['model1'], test.run_training_params['models'])
1✔
881
        self.assertEqual(40, test.run_training_params['num_of_epochs'])
1✔
882
        self.assertEqual(round(56 * 100 / 100), test.run_training_params['n_samples_train'])
1✔
883
        self.assertEqual(round(56 * 0 / 100), test.run_training_params['n_samples_val'])
1✔
884
        self.assertEqual(round(56 * 0 / 100), test.run_training_params['n_samples_test'])
1✔
885
        self.assertEqual(0.2, test.run_training_params['optimizer_defaults']['lr'])
1✔
886
        self.assertEqual([{'lr': 0.4, 'params': 'a'}], test.run_training_params['optimizer_params'])
1✔
887

888
        n_samples = test.run_training_params['n_samples_train']
1✔
889
        batch_size = test.run_training_params['train_batch_size']
1✔
890
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
891
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
892
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
893

894
        test.trainModel(training_params=training_params, optimizer_params=optimizer_params,
1✔
895
                        optimizer_defaults=optimizer_defaults)
896
        self.assertEqual(0.2, test.run_training_params['optimizer_defaults']['lr'])
1✔
897
        self.assertEqual([{'lr': 0.6, 'params': 'a'}], test.run_training_params['optimizer_params'])
1✔
898

899
        test.trainModel(training_params=training_params, optimizer_params=optimizer_params)
1✔
900
        self.assertEqual(0.12, test.run_training_params['optimizer_defaults']['lr'])
1✔
901
        self.assertEqual([{'lr': 0.6, 'params': 'a'}], test.run_training_params['optimizer_params'])
1✔
902

903
        test.trainModel(training_params=training_params)
1✔
904
        self.assertEqual(0.12, test.run_training_params['optimizer_defaults']['lr'])
1✔
905
        self.assertEqual([{'params': 'a', 'lr': 0.7}], test.run_training_params['optimizer_params'])
1✔
906

907
        del training_params['optimizer_defaults']
1✔
908
        test.trainModel(training_params=training_params)
1✔
909
        self.assertEqual(0.5, test.run_training_params['optimizer_defaults']['lr'])
1✔
910
        self.assertEqual([{'params': 'a', 'lr': 0.7}], test.run_training_params['optimizer_params'])
1✔
911

912
        del training_params['optimizer_params']
1✔
913
        test.trainModel(training_params=training_params)
1✔
914
        self.assertEqual(0.5, test.run_training_params['optimizer_defaults']['lr'])
1✔
915
        self.assertEqual([{'lr': 0.1, 'params': 'a'},
1✔
916
                          {'lr': 0.0, 'params': 'b'},
917
                          {'params': 'w'}], test.run_training_params['optimizer_params'])
918

919
        test.trainModel()
1✔
920
        self.assertEqual(0.001, test.run_training_params['optimizer_defaults']['lr'])
1✔
921
        self.assertEqual([{'params': 'a'},
1✔
922
                          {'params': 'b'},
923
                          {'params': 'w'}], test.run_training_params['optimizer_params'])
924

925
        n_samples = test.run_training_params['n_samples_train']
1✔
926
        batch_size = test.run_training_params['train_batch_size']
1✔
927
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
928
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
929
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
930

931
        ##################################
932

933
        ##################################
934
        # Maximum level of configuration I define a custom optimizer with defaults
935
        # For the optimizer default the priority is the following
936
        # max priority to the function parameter ('lr'= 0.4)
937
        # then the optimizer_defaults ('lr':0.1)
938
        # then the optimizer_defaults inside the train_parameters ('lr'= 0.12)
939
        # finally the train_parameters  ('lr'= 0.5)
940
        class RMSprop(Optimizer):
1✔
941
            def __init__(self, optimizer_defaults={}, optimizer_params=[]):
1✔
942
                super(RMSprop, self).__init__('RMSprop', optimizer_defaults, optimizer_params)
1✔
943

944
            def get_torch_optimizer(self):
1✔
945
                import torch
1✔
946
                return torch.optim.RMSprop(self.replace_key_with_params(), **self.optimizer_defaults)
1✔
947

948
        training_params = {
1✔
949
            'models': ['model1'],
950
            'splits': [100, 0, 0],
951
            'num_of_epochs': 40,
952
            'lr': 0.5,
953
            'lr_param': {'a': 0.1},
954
            'optimizer_params': [{'params': ['a'], 'lr': 0.7}],
955
            'optimizer_defaults': {'lr': 0.12}
956
        }
957
        optimizer_defaults = {
1✔
958
            'alpha': 0.8
959
        }
960
        optimizer = RMSprop(optimizer_defaults)
1✔
961
        test.trainModel(optimizer=optimizer, training_params=training_params, optimizer_defaults={'lr': 0.3}, lr=0.4)
1✔
962
        self.assertEqual(['model1'], test.run_training_params['models'])
1✔
963
        self.assertEqual('RMSprop', test.run_training_params['optimizer'])
1✔
964
        self.assertEqual(40, test.run_training_params['num_of_epochs'])
1✔
965
        self.assertEqual(round(56 * 100 / 100), test.run_training_params['n_samples_train'])
1✔
966
        self.assertEqual(round(56 * 0 / 100), test.run_training_params['n_samples_val'])
1✔
967
        self.assertEqual(round(56 * 0 / 100), test.run_training_params['n_samples_test'])
1✔
968
        self.assertEqual({'lr': 0.4}, test.run_training_params['optimizer_defaults'])
1✔
969
        self.assertEqual([{'lr': 0.7, 'params': 'a'}], test.run_training_params['optimizer_params'])
1✔
970

971
        test.trainModel(optimizer=optimizer, training_params=training_params, optimizer_defaults={'lr': 0.1})
1✔
972
        self.assertEqual({'lr': 0.1}, test.run_training_params['optimizer_defaults'])
1✔
973
        self.assertEqual([{'lr': 0.7, 'params': 'a'}], test.run_training_params['optimizer_params'])
1✔
974

975
        test.trainModel(optimizer=optimizer, training_params=training_params)
1✔
976
        self.assertEqual({'lr': 0.12}, test.run_training_params['optimizer_defaults'])
1✔
977
        self.assertEqual([{'lr': 0.7, 'params': 'a'}], test.run_training_params['optimizer_params'])
1✔
978

979
        del training_params['optimizer_defaults']
1✔
980
        test.trainModel(optimizer=optimizer, training_params=training_params)
1✔
981
        self.assertEqual({'alpha': 0.8, 'lr': 0.5}, test.run_training_params['optimizer_defaults'])
1✔
982
        self.assertEqual([{'lr': 0.7, 'params': 'a'}], test.run_training_params['optimizer_params'])
1✔
983

984
        del training_params['optimizer_params']
1✔
985
        test.trainModel(optimizer=optimizer, training_params=training_params)
1✔
986
        self.assertEqual({'alpha': 0.8, 'lr': 0.5}, test.run_training_params['optimizer_defaults'])
1✔
987
        self.assertEqual([{'lr': 0.1, 'params': 'a'}, {'lr': 0.0, 'params': 'b'}, {'params': 'w'}],
1✔
988
                         test.run_training_params['optimizer_params'])
989

990
        test.trainModel(optimizer=optimizer)
1✔
991
        self.assertEqual({'alpha': 0.8, 'lr': 0.001}, test.run_training_params['optimizer_defaults'])
1✔
992
        self.assertEqual([{'params': 'a'}, {'params': 'b'}, {'params': 'w'}],
1✔
993
                         test.run_training_params['optimizer_params'])
994

995
        n_samples = test.run_training_params['n_samples_train']
1✔
996
        batch_size = test.run_training_params['train_batch_size']
1✔
997
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
998
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
999
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
1000
        ##################################
1001

1002
        ##################################
1003
        # Maximum level of configuration I define a custom optimizer with custom value for each params
1004
        # The priority is the following
1005
        # max priority to the function parameter ( 'lr_param'={'a': 0.2})
1006
        # then the optimizer_params ( [{'params':['a'],'lr':1.0}] )
1007
        # then the optimizer_params inside the train_parameters (  [{'params':['a'],'lr':0.7}] )
1008
        # then the train_parameters  ( 'lr_param'={'a': 0.1} )
1009
        # finnaly the optimizer_paramsat the time of the optimizer initialization [{'params':['a'],'lr':0.6}]
1010
        training_params = {
1✔
1011
            'models': ['model1'],
1012
            'splits': [100, 0, 0],
1013
            'num_of_epochs': 40,
1014
            'lr': 0.5,
1015
            'lr_param': {'a': 0.1},
1016
            'optimizer_params': [{'params': ['a'], 'lr': 0.7}],
1017
            'optimizer_defaults': {'lr': 0.12}
1018
        }
1019
        optimizer_defaults = {
1✔
1020
            'alpha': 0.8
1021
        }
1022
        optimizer_params = [
1✔
1023
            {'params': ['a'], 'lr': 0.6}, {'params': 'w', 'lr': 0.12, 'alpha': 0.02}
1024
        ]
1025
        optimizer = RMSprop(optimizer_defaults, optimizer_params)
1✔
1026
        test.trainModel(optimizer=optimizer, training_params=training_params, optimizer_defaults={'lr': 0.3},
1✔
1027
                        optimizer_params=[{'params': ['a'], 'lr': 1.0}, {'params': ['b'], 'lr': 1.2}],
1028
                        lr_param={'a': 0.2})
1029
        self.assertEqual(['model1'], test.run_training_params['models'])
1✔
1030
        self.assertEqual('RMSprop', test.run_training_params['optimizer'])
1✔
1031
        self.assertEqual(40, test.run_training_params['num_of_epochs'])
1✔
1032
        self.assertEqual(round(56 * 100 / 100), test.run_training_params['n_samples_train'])
1✔
1033
        self.assertEqual(round(56 * 0 / 100), test.run_training_params['n_samples_val'])
1✔
1034
        self.assertEqual(round(56 * 0 / 100), test.run_training_params['n_samples_test'])
1✔
1035
        self.assertEqual({'lr': 0.3}, test.run_training_params['optimizer_defaults'])
1✔
1036
        self.assertEqual([{'params': 'a', 'lr': 0.2}, {'params': 'b', 'lr': 1.2}],
1✔
1037
                         test.run_training_params['optimizer_params'])
1038

1039
        test.trainModel(optimizer=optimizer, training_params=training_params, optimizer_defaults={'lr': 0.3},
1✔
1040
                        optimizer_params=[{'params': ['a'], 'lr': 0.1}, {'params': ['b'], 'lr': 0.2}])
1041
        self.assertEqual({'lr': 0.3}, test.run_training_params['optimizer_defaults'])
1✔
1042
        self.assertEqual([{'params': 'a', 'lr': 0.1}, {'params': 'b', 'lr': 0.2}],
1✔
1043
                         test.run_training_params['optimizer_params'])
1044

1045
        test.trainModel(optimizer=optimizer, training_params=training_params, optimizer_defaults={'lr': 0.3})
1✔
1046
        self.assertEqual({'lr': 0.3}, test.run_training_params['optimizer_defaults'])
1✔
1047
        self.assertEqual([{'params': 'a', 'lr': 0.7}], test.run_training_params['optimizer_params'])
1✔
1048

1049
        test.trainModel(optimizer=optimizer, training_params=training_params)
1✔
1050
        self.assertEqual({'lr': 0.12}, test.run_training_params['optimizer_defaults'])
1✔
1051
        self.assertEqual([{'params': 'a', 'lr': 0.7}], test.run_training_params['optimizer_params'])
1✔
1052

1053
        del training_params['optimizer_defaults']
1✔
1054
        test.trainModel(optimizer=optimizer, training_params=training_params)
1✔
1055
        self.assertEqual({'alpha': 0.8, 'lr': 0.5}, test.run_training_params['optimizer_defaults'])
1✔
1056
        self.assertEqual([{'params': 'a', 'lr': 0.7}], test.run_training_params['optimizer_params'])
1✔
1057

1058
        del training_params['optimizer_params']
1✔
1059
        test.trainModel(optimizer=optimizer, training_params=training_params)
1✔
1060
        self.assertEqual({'alpha': 0.8, 'lr': 0.5}, test.run_training_params['optimizer_defaults'])
1✔
1061
        self.assertEqual([{'lr': 0.1, 'params': 'a'}, {'alpha': 0.02, 'lr': 0.12, 'params': 'w'}],
1✔
1062
                         test.run_training_params['optimizer_params'])
1063

1064
        test.trainModel(optimizer=optimizer)
1✔
1065
        self.assertEqual({'alpha': 0.8, 'lr': 0.001}, test.run_training_params['optimizer_defaults'])
1✔
1066
        self.assertEqual([{'params': 'a', 'lr': 0.6}, {'params': 'w', 'lr': 0.12, 'alpha': 0.02}],
1✔
1067
                         test.run_training_params['optimizer_params'])
1068

1069
        n_samples = test.run_training_params['n_samples_train']
1✔
1070
        batch_size = test.run_training_params['train_batch_size']
1✔
1071
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
1072
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
1073
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
1074
        ##################################
1075

1076
        ##################################
1077
        # Maximum level of configuration I define a custom optimizer and add some parameter over the defaults
1078
        # The priority is the following
1079
        # max priority to the function parameter ( 'lr_param'={'a': 0.2})
1080
        # then the optimizer_params ( [{'params':['a'],'lr':1.0}] )
1081
        # then the optimizer_params inside the train_parameters (  [{'params':['a'],'lr':0.7}] )
1082
        # then the train_parameters  ( 'lr_param'={'a': 0.1} )
1083
        # The other parameters are the defaults
1084
        training_params = {
1✔
1085
            'models': ['model1'],
1086
            'splits': [100, 0, 0],
1087
            'num_of_epochs': 40,
1088
            'lr': 0.5,
1089
            'lr_param': {'a': 0.1},
1090
            'add_optimizer_params': [{'params': ['a'], 'lr': 0.7}],
1091
            'add_optimizer_defaults': {'lr': 0.12}
1092
        }
1093
        optimizer = RMSprop()
1✔
1094
        test.trainModel(optimizer=optimizer, training_params=training_params, add_optimizer_defaults={'lr': 0.3},
1✔
1095
                        add_optimizer_params=[{'params': ['a'], 'lr': 1.0}, {'params': ['b'], 'lr': 1.2}],
1096
                        lr_param={'a': 0.2})
1097
        self.assertEqual(['model1'], test.run_training_params['models'])
1✔
1098
        self.assertEqual('RMSprop', test.run_training_params['optimizer'])
1✔
1099
        self.assertEqual(40, test.run_training_params['num_of_epochs'])
1✔
1100
        self.assertEqual(round(56 * 100 / 100), test.run_training_params['n_samples_train'])
1✔
1101
        self.assertEqual(round(56 * 0 / 100), test.run_training_params['n_samples_val'])
1✔
1102
        self.assertEqual(round(56 * 0 / 100), test.run_training_params['n_samples_test'])
1✔
1103
        self.assertEqual({'lr': 0.3}, test.run_training_params['optimizer_defaults'])
1✔
1104
        self.assertEqual([{'params': 'a', 'lr': 0.2}, {'params': 'b', 'lr': 1.2}, {'params': 'w'}],
1✔
1105
                         test.run_training_params['optimizer_params'])
1106

1107
        test.trainModel(optimizer=optimizer, training_params=training_params, add_optimizer_defaults={'lr': 0.3},
1✔
1108
                        add_optimizer_params=[{'params': ['a'], 'lr': 0.1}, {'params': ['b'], 'lr': 0.2}])
1109
        self.assertEqual({'lr': 0.3}, test.run_training_params['optimizer_defaults'])
1✔
1110
        self.assertEqual([{'params': 'a', 'lr': 0.1}, {'params': 'b', 'lr': 0.2}, {'params': 'w'}],
1✔
1111
                         test.run_training_params['optimizer_params'])
1112

1113
        test.trainModel(optimizer=optimizer, training_params=training_params, add_optimizer_defaults={'lr': 0.3})
1✔
1114
        self.assertEqual({'lr': 0.3}, test.run_training_params['optimizer_defaults'])
1✔
1115
        self.assertEqual([{'params': 'a', 'lr': 0.7}, {'lr': 0.0, 'params': 'b'}, {'params': 'w'}],
1✔
1116
                         test.run_training_params['optimizer_params'])
1117

1118
        test.trainModel(optimizer=optimizer, training_params=training_params)
1✔
1119
        self.assertEqual({'lr': 0.12}, test.run_training_params['optimizer_defaults'])
1✔
1120
        self.assertEqual([{'params': 'a', 'lr': 0.7}, {'lr': 0.0, 'params': 'b'}, {'params': 'w'}],
1✔
1121
                         test.run_training_params['optimizer_params'])
1122

1123
        del training_params['add_optimizer_defaults']
1✔
1124
        test.trainModel(optimizer=optimizer, training_params=training_params)
1✔
1125
        self.assertEqual({'lr': 0.5}, test.run_training_params['optimizer_defaults'])
1✔
1126
        self.assertEqual([{'params': 'a', 'lr': 0.7}, {'lr': 0.0, 'params': 'b'}, {'params': 'w'}],
1✔
1127
                         test.run_training_params['optimizer_params'])
1128

1129
        del training_params['add_optimizer_params']
1✔
1130
        test.trainModel(optimizer=optimizer, training_params=training_params)
1✔
1131
        self.assertEqual({'lr': 0.5}, test.run_training_params['optimizer_defaults'])
1✔
1132
        self.assertEqual([{'lr': 0.1, 'params': 'a'}, {'lr': 0.0, 'params': 'b'}, {'params': 'w'}],
1✔
1133
                         test.run_training_params['optimizer_params'])
1134

1135
        test.trainModel(optimizer=optimizer)
1✔
1136
        self.assertEqual({'lr': 0.001}, test.run_training_params['optimizer_defaults'])
1✔
1137
        self.assertEqual([{'params': 'a'}, {'params': 'b'}, {'params': 'w'}],
1✔
1138
                         test.run_training_params['optimizer_params'])
1139

1140
        n_samples = test.run_training_params['n_samples_train']
1✔
1141
        batch_size = test.run_training_params['train_batch_size']
1✔
1142
        list_of_batch_indexes = range(0, n_samples - batch_size + 1, batch_size)
1✔
1143
        self.assertEqual(len(list_of_batch_indexes), test.run_training_params['update_per_epochs'])
1✔
1144
        self.assertEqual(n_samples - list_of_batch_indexes[-1] - batch_size, test.run_training_params['unused_samples'])
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc