• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

WassimTenachi / PhySO / #13

10 Jun 2024 12:28AM UTC coverage: 52.052% (-30.3%) from 82.385%
#13

push

coveralls-python

WassimTenachi
Update requirements.txt

2980 of 5725 relevant lines covered (52.05%)

0.52 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

26.56
/physo/physym/tests/program_UnitTest.py
1
import unittest
1✔
2

3
import matplotlib.pyplot as plt
1✔
4
import numpy as np
1✔
5
import time as time
1✔
6
import warnings
×
7
import torch
×
8
import pickle
1✔
9
import os
1✔
10

11
# Internal imports
12
from physo.physym import library as Lib
×
13
from physo.physym import program as Prog
1✔
14
from physo.physym import dimensional_analysis as phy
×
15
from physo.physym.functions import data_conversion, data_conversion_inv
1✔
16
import physo.physym.free_const as free_const
×
17
from physo.physym import vect_programs as VProg
1✔
18

19
def make_lib():
×
20
    # LIBRARY CONFIG
21
    args_make_tokens = {
1✔
22
                    # operations
23
                    "op_names"             : "all",  # or ["mul", "neg", "inv", "sin"]
24
                    "use_protected_ops"    : False,
25
                    # input variables
26
                    "input_var_ids"        : {"x" : 0         , "v" : 1          , "t" : 2,        },
27
                    "input_var_units"      : {"x" : [1, 0, 0] , "v" : [1, -1, 0] , "t" : [0, 1, 0] },
28
                    "input_var_complexity" : {"x" : 0.        , "v" : 1.         , "t" : 0.,       },
29
                    # constants
30
                    "constants"            : {"pi" : np.pi     , "c" : 3e8       , "M" : 1e6       },
31
                    "constants_units"      : {"pi" : [0, 0, 0] , "c" : [1, -1, 0], "M" : [0, 0, 1] },
32
                    "constants_complexity" : {"pi" : 0.        , "c" : 0.        , "M" : 1.        },
33
                    # free constants
34
                    "free_constants"            : {"c0"             , "c1"               , "c2"             },
35
                    "free_constants_init_val"   : {"c0" : 1.        , "c1"  : 10.        , "c2" : 1.        },
36
                    "free_constants_units"      : {"c0" : [0, 0, 0] , "c1"  : [1, -1, 0] , "c2" : [0, 0, 1] },
37
                    "free_constants_complexity" : {"c0" : 0.        , "c1"  : 0.         , "c2" : 1.        },
38
                           }
39

40
    my_lib = Lib.Library(args_make_tokens = args_make_tokens,
×
41
                         superparent_units = [1, -2, 1], superparent_name = "y")
42
    return my_lib
×
43

44
class ProgramTest(unittest.TestCase):
×
45

46
    def test_creation(self):
1✔
47

48
        k0_init = [9,10,11]*10 # np.full(n_realizations, 1.)
1✔
49
        n_realizations = len(k0_init)
1✔
50
        # consts
51
        pi     = data_conversion (np.pi)
×
52
        const1 = data_conversion (1.)
1✔
53

54
        # LIBRARY CONFIG
55
        args_make_tokens = {
1✔
56
                        # operations
57
                        "op_names"             : "all",
58
                        "use_protected_ops"    : True,
59
                        # input variables
60
                        "input_var_ids"        : {"t" : 0         , "l" : 1          },
61
                        "input_var_units"      : {"t" : [1, 0, 0] , "l" : [0, 1, 0]  },
62
                        "input_var_complexity" : {"t" : 0.        , "l" : 1.         },
63
                        # constants
64
                        "constants"            : {"pi" : pi        , "const1" : const1    },
65
                        "constants_units"      : {"pi" : [0, 0, 0] , "const1" : [0, 0, 0] },
66
                        "constants_complexity" : {"pi" : 1.        , "const1" : 1.        },
67
                        # free constants
68
                        "class_free_constants"            : {"c0"              , "c1"               },
69
                        "class_free_constants_init_val"   : {"c0" : 21.        , "c1"  : 22.         },
70
                        "class_free_constants_units"      : {"c0" : [-1, 0, 0] , "c1"  : [0, -1, 0] },
71
                        "class_free_constants_complexity" : {"c0" : 1.         , "c1"  : 1.         },
72
                        # free constants
73
                        "spe_free_constants"            : {"k0"              , "k1"               , "k2"               },
74
                        "spe_free_constants_init_val"   : {"k0" : k0_init    , "k1"  : 2.         , "k2"  : 3.         },
75
                        "spe_free_constants_units"      : {"k0" : [0, 0, 0]  , "k1"  : [-1, 0, 0] , "k2"  : [0, 0, 0]  },
76
                        "spe_free_constants_complexity" : {"k0" : 1.         , "k1"  : 1.         , "k2"  : 1.         },
77
                           }
78
        my_lib = Lib.Library(args_make_tokens = args_make_tokens,
×
79
                             superparent_units = [0, 0, 0], superparent_name = "y")
80

81
        # TEST PROGRAMS
82
        test_programs_idx = []
1✔
83
        test_prog_str_0 = ["add", "mul", "mul", "k0"  , "exp", "mul", "neg", "k1", "t", "cos", "add", "mul", "c0", "t", "k2", "mul", "c1", "l", ]
1✔
84
        test_prog_str_1 = ["mul", "n2" , "c0" , "cos" , "div", "t"  , "c1" ,]
1✔
85

86
        # Converting into idx
87
        test_prog_tokens_0 = np.array([my_lib.lib_name_to_token[tok_str] for tok_str in test_prog_str_0])
×
88
        test_prog_tokens_1 = np.array([my_lib.lib_name_to_token[tok_str] for tok_str in test_prog_str_1])
×
89

90
        # Creating programs wo free constants
91
        try:
1✔
92
            with warnings.catch_warnings():
1✔
93
                warnings.simplefilter("ignore")
1✔
94
                # Raises some warnings due to free constants table not being provided
95
                prog0 = Prog.Program(tokens=test_prog_tokens_0, library=my_lib, n_realizations=n_realizations, free_consts=None, is_physical=None, candidate_wrapper=None)
×
96
                prog1 = Prog.Program(tokens=test_prog_tokens_1, library=my_lib, n_realizations=n_realizations, free_consts=None, is_physical=None, candidate_wrapper=None)
×
97
        except:
1✔
98
            self.fail("Program creation failed.")
×
99

100
        # Creating programs with free constants
101
        try:
1✔
102
            free_consts_table_0 = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=n_realizations)
×
103
            free_consts_table_1 = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=n_realizations)
1✔
104
            prog0 = Prog.Program(tokens=test_prog_tokens_0, library=my_lib, n_realizations=n_realizations, free_consts=free_consts_table_0, is_physical=None, candidate_wrapper=None)
×
105
            prog1 = Prog.Program(tokens=test_prog_tokens_1, library=my_lib, n_realizations=n_realizations, free_consts=free_consts_table_1, is_physical=None, candidate_wrapper=None)
1✔
106
        except:
1✔
107
            self.fail("Program creation failed.")
1✔
108

109
    def test_pickability(self):
×
110

111
        DEVICE = 'cpu'
×
112
        if torch.cuda.is_available():
1✔
113
            DEVICE = 'cuda'
1✔
114

115
        # -------------------------------------- Making fake datasets --------------------------------------
116

117
        multi_X = []
×
118
        for n_samples in [90, 100, 110]:
1✔
119
            x1 = np.linspace(0, 10, n_samples)
1✔
120
            x2 = np.linspace(0, 1 , n_samples)
1✔
121
            X = np.stack((x1,x2),axis=0)
×
122
            X = torch.tensor(X).to(DEVICE)
×
123
            multi_X.append(X)
1✔
124
        multi_X = multi_X*10                         # (n_realizations,) of (n_dim, [n_samples depends on dataset],)
×
125

126
        n_samples_per_dataset = np.array([X.shape[1] for X in multi_X])
1✔
127
        n_all_samples = n_samples_per_dataset.sum()
1✔
128
        n_realizations = len(multi_X)
×
129
        def flatten_multi_data (multi_data,):
1✔
130
            """
131
            Flattens multiple datasets into a single one for vectorized evaluation.
132
            Parameters
133
            ----------
134
            multi_data : list of length (n_realizations,) of torch.tensor of shape (..., [n_samples depends on dataset],)
135
                List of datasets to be flattened.
136
            Returns
137
            -------
138
            torch.tensor of shape (..., n_all_samples)
139
                Flattened data (n_all_samples = sum([n_samples depends on dataset])).
140
            """
141
            flattened_data = torch.cat(multi_data, axis=-1) # (..., n_all_samples)
1✔
142
            return flattened_data
1✔
143

144
        def unflatten_multi_data (flattened_data):
1✔
145
            """
146
            Unflattens a single data into multiple ones.
147
            Parameters
148
            ----------
149
            flattened_data : torch.tensor of shape (..., n_all_samples)
150
                Flattened data (n_all_samples = sum([n_samples depends on dataset])).
151
            Returns
152
            -------
153
            list of len (n_realizations,) of torch.tensor of shape (..., [n_samples depends on dataset],)
154
                Unflattened data.
155
            """
156
            return list(torch.split(flattened_data, n_samples_per_dataset.tolist(), dim=-1)) # (n_realizations,) of (..., [n_samples depends on dataset],)
1✔
157

158
        # y_weights_per_dataset = np.array([0, 0.001, 1.0]*10) # Shows weights work
159
        y_weights_per_dataset = torch.tensor(np.array([1., 1., 1.]*10))
1✔
160
        multi_y_weights = [torch.full(size=(n_samples_per_dataset[i],), fill_value=y_weights_per_dataset[i]) for i in range (n_realizations)]
1✔
161
        y_weights_flatten = flatten_multi_data(multi_y_weights)
×
162

163
        multi_X_flatten = flatten_multi_data(multi_X)  # (n_dim, n_all_samples)
1✔
164

165
        # Making fake ideal parameters
166
        # n_spe_params   = 3
167
        # n_class_params = 2
168
        random_shift       = (np.random.rand(n_realizations,3)-0.5)*0.8
×
169
        ideal_spe_params   = torch.tensor(np.array([1.123, 0.345, 0.116]) + random_shift) # (n_realizations, n_spe_params,)
1✔
170
        ideal_class_params = torch.tensor(np.array([1.389, 1.005]))                       # (n_class_params, )
×
171

172
        ideal_spe_params_flatten = torch.cat(
1✔
173
            [torch.tile(ideal_spe_params[i], (n_samples_per_dataset[i],1)).transpose(0,1) for i in range (n_realizations)], # (n_realizations,) of (n_spe_params, [n_samples depends on dataset],)
174
            axis = 1
175
        ) # (n_spe_params, n_all_samples)
176

177
        ideal_class_params_flatten = torch.tile(ideal_class_params, (n_all_samples,1)).transpose(0,1) # (n_class_params, n_all_samples)
1✔
178

179
        def trial_func (X, params, class_params):
×
180
            y = params[0]*torch.exp(-params[1]*X[0])*torch.cos(class_params[0]*X[0]+params[2]) + class_params[1]*X[1]
1✔
181
            return y
1✔
182

183
        y_ideals_flatten = trial_func (multi_X_flatten, ideal_spe_params_flatten, ideal_class_params_flatten) # (n_all_samples,)
×
184
        multi_y_ideals   = unflatten_multi_data(y_ideals_flatten)                                         # (n_realizations,) of (n_samples depends on dataset,)
1✔
185

186

187
        # params[0]*torch.exp(-params[1]*X[0])*torch.cos(class_params[0]*X[0]+params[2]) + class_params[1]*X[1]
188
        # k0 * exp(-k1 * t) * cos(c0 * t + k2) + c1 * l
189
        # "add", "mul", "mul", "k0", "exp", "mul", "neg", "k1", "t", "cos", "add", "mul", "c0", "t", "k2", "mul", "c1", "l"
190

191
        k0_init = [9,10,11]*10 # np.full(n_realizations, 1.)
×
192
        # consts
193
        pi     = data_conversion (np.pi) .to(DEVICE)
1✔
194
        const1 = data_conversion (1.)    .to(DEVICE)
1✔
195

196
        # LIBRARY CONFIG
197
        args_make_tokens = {
1✔
198
                        # operations
199
                        "op_names"             : "all",
200
                        "use_protected_ops"    : True,
201
                        # input variables
202
                        "input_var_ids"        : {"t" : 0         , "l" : 1          },
203
                        "input_var_units"      : {"t" : [1, 0, 0] , "l" : [0, 1, 0]  },
204
                        "input_var_complexity" : {"t" : 0.        , "l" : 1.         },
205
                        # constants
206
                        "constants"            : {"pi" : pi        , "const1" : const1    },
207
                        "constants_units"      : {"pi" : [0, 0, 0] , "const1" : [0, 0, 0] },
208
                        "constants_complexity" : {"pi" : 1.        , "const1" : 1.        },
209
                        # free constants
210
                        "class_free_constants"            : {"c0"              , "c1"               },
211
                        "class_free_constants_init_val"   : {"c0" : 21.        , "c1"  : 22.         },
212
                        "class_free_constants_units"      : {"c0" : [-1, 0, 0] , "c1"  : [0, -1, 0] },
213
                        "class_free_constants_complexity" : {"c0" : 1.         , "c1"  : 1.         },
214
                        # free constants
215
                        "spe_free_constants"            : {"k0"              , "k1"               , "k2"               },
216
                        "spe_free_constants_init_val"   : {"k0" : k0_init    , "k1"  : 2.         , "k2"  : 3.         },
217
                        "spe_free_constants_units"      : {"k0" : [0, 0, 0]  , "k1"  : [-1, 0, 0] , "k2"  : [0, 0, 0]  },
218
                        "spe_free_constants_complexity" : {"k0" : 1.         , "k1"  : 1.         , "k2"  : 1.         },
219
                           }
220
        my_lib = Lib.Library(args_make_tokens = args_make_tokens,
1✔
221
                             superparent_units = [0, 0, 0], superparent_name = "y")
222

223
        # TEST PROGRAMS
224
        test_programs_idx = []
1✔
225
        test_prog_str_0 = ["add", "mul", "mul", "k0"  , "exp", "mul", "neg", "k1", "t", "cos", "add", "mul", "c0", "t", "k2", "mul", "c1", "l", ]
1✔
226
        test_tokens_0 = [my_lib.lib_name_to_token[name] for name in test_prog_str_0]
×
227

228
        free_const_table = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=n_realizations)
1✔
229
        free_const_table.class_values[0] = ideal_class_params
1✔
230
        free_const_table.spe_values  [0] = ideal_spe_params.transpose(0,1) # (n_spe_params, n_realizations)
×
231

232
        prog = Prog.Program(tokens=test_tokens_0, library=my_lib, free_consts=free_const_table, n_realizations=n_realizations)
1✔
233

234
        # Test pickability
235
        try:
1✔
236
            pickle.dumps(prog)
1✔
237
        except:
1✔
238
            self.fail("Program pickability failed.")
1✔
239

240
        # Test save
241
        fpath = "test_prog.pkl"
×
242
        try:
1✔
243
            prog.save(fpath)
1✔
244
            os.remove(fpath) if os.path.exists(fpath) else None
×
245
        except:
×
246
            os.remove(fpath) if os.path.exists(fpath) else None
1✔
247
            self.fail("Program save failed.")
×
248

249
        # Test pickle load
250
        try:
×
251
            prog.save(fpath)
×
252
            prog_loaded = Prog.load_program_pkl(fpath)
1✔
253
            os.remove(fpath) if os.path.exists(fpath) else None
1✔
254
        except:
×
255
            os.remove(fpath) if os.path.exists(fpath) else None
1✔
256
            self.fail("Program pickle load failed.")
1✔
257

258
        return None
1✔
259

260
    # Test program execution on a complicated function (no free consts)
261
    def test_execution (self):
×
262

263
        DEVICE = 'cpu'
1✔
264
        if torch.cuda.is_available():
1✔
265
            DEVICE = 'cuda'
1✔
266

267
        # DATA
268
        N = int(1e6)
1✔
269

270
        # input var
271
        x = data_conversion  (np.linspace(0.04, 4, N)  ).to(DEVICE)
1✔
272
        v = data_conversion  (np.linspace(0.10, 10, N) ).to(DEVICE)
1✔
273
        t = data_conversion  (np.linspace(0.06, 6, N)  ).to(DEVICE)
×
274
        data = torch.stack((x, v, t), axis=0)
×
275

276
        # consts
277
        pi = data_conversion (np.pi).to(DEVICE)
1✔
278
        const1 = data_conversion (1.).to(DEVICE)
×
279

280
        # free consts
281
        c  = data_conversion (3e8).to(DEVICE)
×
282
        M  = data_conversion (1e6).to(DEVICE)
1✔
283

284
        # LIBRARY CONFIG
285
        args_make_tokens = {
1✔
286
                        # operations
287
                        "op_names"             : "all",  # or ["mul", "neg", "inv", "sin"]
288
                        "use_protected_ops"    : False,
289
                        # input variables
290
                        "input_var_ids"        : {"x" : 0         , "v" : 1          , "t" : 2,        },
291
                        "input_var_units"      : {"x" : [1, 0, 0] , "v" : [1, -1, 0] , "t" : [0, 1, 0] },
292
                        "input_var_complexity" : {"x" : 0.        , "v" : 1.         , "t" : 0.,       },
293
                        # constants
294
                        "constants"            : {"pi" : pi        , "1" : const1    , "c" : c         , "M" : M         },
295
                        "constants_units"      : {"pi" : [0, 0, 0] , "1" : [0, 0, 0] , "c" : [1, -1, 0], "M" : [0, 0, 1] },
296
                        "constants_complexity" : {"pi" : 0.        , "1" : 1.        , "c" : 0.        , "M" : 1.        },
297
                           }
298
        my_lib = Lib.Library(args_make_tokens = args_make_tokens,
1✔
299
                             superparent_units = [1, -2, 1], superparent_name = "y")
300

301
        # PROGRAM
302
        test_program_str = ["mul", "mul", "M", "n2", "c", "sub", "inv", "sqrt", "sub", "1", "div", "n2", "v", "n2",
×
303
                            "c", "cos", "div", "sub", "1", "div", "v", "c", "div", "div", "x", "t", "c"]
304
        test_program     = [my_lib.lib_name_to_token[name] for name in test_program_str]
1✔
305
        free_const_table = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=1)
×
306
        prog = Prog.Program(tokens=test_program, library=my_lib, free_consts=free_const_table, n_realizations=1)
1✔
307
        # EXPECTED RES
308
        expected_res     = M*(c**2)*(1./torch.sqrt(1.-(v**2)/(c**2))-torch.cos((1.-(v/c))/((x/t)/c)))
1✔
309

310
        N = 100
1✔
311
        # EXECUTION
312
        t0 = time.perf_counter()
1✔
313
        for _ in range (N):
×
314
            res = prog.execute(data)
×
315
        t1 = time.perf_counter()
1✔
316
        print("\nprog.execute time = %.3f ms"%((t1-t0)*1e3/N))
1✔
317

318
        # EXECUTION (wo tokens)
319
        t0 = time.perf_counter()
×
320
        for _ in range (N):
1✔
321
            expected_res     = M*(c**2)*(1./torch.sqrt(1.-(v**2)/(c**2))-torch.cos((1.-(v/c))/((x/t)/c)))
×
322
        t1 = time.perf_counter()
×
323
        print("\nprog.execute time (wo tokens) = %.3f ms"%((t1-t0)*1e3/N))
1✔
324

325
        # TEST
326
        works_bool = np.array_equal(data_conversion_inv(res.cpu()), data_conversion_inv(expected_res.cpu()),)
×
327
        self.assertTrue(works_bool)
1✔
328
        return None
×
329

330
    # Test program execution on a complicated function
331
    def test_execution_with_free_consts (self):
1✔
332

333
        DEVICE = 'cpu'
×
334
        if torch.cuda.is_available():
1✔
335
            DEVICE = 'cuda'
1✔
336

337
        # DATA
338
        N = int(1e6)
×
339

340
        # input var
341
        x = data_conversion  (np.linspace(0.04, 4, N)  ).to(DEVICE)
×
342
        v = data_conversion  (np.linspace(0.10, 10, N) ).to(DEVICE)
1✔
343
        t = data_conversion  (np.linspace(0.06, 6, N)  ).to(DEVICE)
×
344
        data = torch.stack((x, v, t), axis=0)
1✔
345

346
        # consts
347
        pi = data_conversion (np.pi).to(DEVICE)
1✔
348
        const1 = data_conversion (1.).to(DEVICE)
1✔
349

350
        # free consts
351
        c  = data_conversion (3e8).to(DEVICE)
1✔
352
        M  = data_conversion (1e6).to(DEVICE)
1✔
353
        free_const_values = torch.stack((M, c), axis=0)
1✔
354
        # (M, c) in alphabetical order as library will give them ids based on that order
355

356
        # LIBRARY CONFIG
357
        args_make_tokens = {
1✔
358
                        # operations
359
                        "op_names"             : "all",  # or ["mul", "neg", "inv", "sin"]
360
                        "use_protected_ops"    : False,
361
                        # input variables
362
                        "input_var_ids"        : {"x" : 0         , "v" : 1          , "t" : 2,        },
363
                        "input_var_units"      : {"x" : [1, 0, 0] , "v" : [1, -1, 0] , "t" : [0, 1, 0] },
364
                        "input_var_complexity" : {"x" : 0.        , "v" : 1.         , "t" : 0.,       },
365
                        # constants
366
                        "constants"            : {"pi" : pi        , "1" : const1    },
367
                        "constants_units"      : {"pi" : [0, 0, 0] , "1" : [0, 0, 0] },
368
                        "constants_complexity" : {"pi" : 0.        , "1" : 1.        },
369
                        # free constants
370
                        "free_constants"            : {"c"              , "M"             },
371
                        "free_constants_init_val"   : {"c" : 1.         , "M" : 1.        },
372
                        "free_constants_units"      : {"c" : [1, -1, 0] , "M" : [0, 0, 1] },
373
                        "free_constants_complexity" : {"c" : 0.         , "M" : 1.        },
374
                           }
375
        my_lib = Lib.Library(args_make_tokens = args_make_tokens,
1✔
376
                             superparent_units = [1, -2, 1], superparent_name = "y")
377

378
        # PROGRAM
379
        test_program_str = ["mul", "mul", "M", "n2", "c", "sub", "inv", "sqrt", "sub", "1", "div", "n2", "v", "n2",
1✔
380
                            "c", "cos", "div", "sub", "1", "div", "v", "c", "div", "div", "x", "t", "c"]
381
        test_program     = [my_lib.lib_name_to_token[name] for name in test_program_str]
1✔
382
        free_const_table = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=1)
1✔
383
        free_const_table.class_values[0] = free_const_values
×
384
        prog = Prog.Program(tokens=test_program, library=my_lib, free_consts=free_const_table, n_realizations=1)
1✔
385
        # EXPECTED RES
386
        expected_res     = M*(c**2)*(1./torch.sqrt(1.-(v**2)/(c**2))-torch.cos((1.-(v/c))/((x/t)/c)))
1✔
387

388
        N = 100
1✔
389
        # EXECUTION
390
        t0 = time.perf_counter()
×
391
        for _ in range (N):
1✔
392
            res = prog.execute(data)
×
393
        t1 = time.perf_counter()
1✔
394
        print("\nprog.execute time = %.3f ms"%((t1-t0)*1e3/N))
1✔
395

396
        # EXECUTION (wo tokens)
397
        t0 = time.perf_counter()
1✔
398
        for _ in range (N):
×
399
            expected_res     = M*(c**2)*(1./torch.sqrt(1.-(v**2)/(c**2))-torch.cos((1.-(v/c))/((x/t)/c)))
1✔
400
        t1 = time.perf_counter()
1✔
401
        print("\nprog.execute time (wo tokens) = %.3f ms"%((t1-t0)*1e3/N))
1✔
402

403
        # TEST
404
        works_bool = np.array_equal(data_conversion_inv(res.cpu()), data_conversion_inv(expected_res.cpu()),)
×
405
        self.assertTrue(works_bool)
×
406
        return None
×
407

408
    # Test program execution in Class SR scenario
409
    def test_execution_with_class_and_spe_free_consts (self):
×
410

411
        DEVICE = 'cpu'
×
412
        if torch.cuda.is_available():
×
413
            DEVICE = 'cuda'
×
414

415
        # -------------------------------------- Making fake datasets --------------------------------------
416

417
        multi_X = []
×
418
        for n_samples in [90, 100, 110]:
×
419
            x1 = np.linspace(0, 10, n_samples)
×
420
            x2 = np.linspace(0, 1 , n_samples)
×
421
            X = np.stack((x1,x2),axis=0)
×
422
            X = torch.tensor(X).to(DEVICE)
×
423
            multi_X.append(X)
×
424
        multi_X = multi_X*10                         # (n_realizations,) of (n_dim, [n_samples depends on dataset],)
×
425

426
        n_samples_per_dataset = np.array([X.shape[1] for X in multi_X])
×
427
        n_all_samples = n_samples_per_dataset.sum()
×
428
        n_realizations = len(multi_X)
×
429
        def flatten_multi_data (multi_data,):
×
430
            """
431
            Flattens multiple datasets into a single one for vectorized evaluation.
432
            Parameters
433
            ----------
434
            multi_data : list of length (n_realizations,) of torch.tensor of shape (..., [n_samples depends on dataset],)
435
                List of datasets to be flattened.
436
            Returns
437
            -------
438
            torch.tensor of shape (..., n_all_samples)
439
                Flattened data (n_all_samples = sum([n_samples depends on dataset])).
440
            """
441
            flattened_data = torch.cat(multi_data, axis=-1) # (..., n_all_samples)
×
442
            return flattened_data
×
443

444
        def unflatten_multi_data (flattened_data):
×
445
            """
446
            Unflattens a single data into multiple ones.
447
            Parameters
448
            ----------
449
            flattened_data : torch.tensor of shape (..., n_all_samples)
450
                Flattened data (n_all_samples = sum([n_samples depends on dataset])).
451
            Returns
452
            -------
453
            list of len (n_realizations,) of torch.tensor of shape (..., [n_samples depends on dataset],)
454
                Unflattened data.
455
            """
456
            return list(torch.split(flattened_data, n_samples_per_dataset.tolist(), dim=-1)) # (n_realizations,) of (..., [n_samples depends on dataset],)
×
457

458
        # y_weights_per_dataset = np.array([0, 0.001, 1.0]*10) # Shows weights work
459
        y_weights_per_dataset = torch.tensor(np.array([1., 1., 1.]*10))
×
460
        multi_y_weights = [torch.full(size=(n_samples_per_dataset[i],), fill_value=y_weights_per_dataset[i]) for i in range (n_realizations)]
×
461
        y_weights_flatten = flatten_multi_data(multi_y_weights)
×
462

463
        multi_X_flatten = flatten_multi_data(multi_X)  # (n_dim, n_all_samples)
×
464

465
        # Making fake ideal parameters
466
        # n_spe_params   = 3
467
        # n_class_params = 2
468
        random_shift       = (np.random.rand(n_realizations,3)-0.5)*0.8
×
469
        ideal_spe_params   = torch.tensor(np.array([1.123, 0.345, 0.116]) + random_shift) # (n_realizations, n_spe_params,)
×
470
        ideal_class_params = torch.tensor(np.array([1.389, 1.005]))                       # (n_class_params, )
×
471

472
        ideal_spe_params_flatten = torch.cat(
×
473
            [torch.tile(ideal_spe_params[i], (n_samples_per_dataset[i],1)).transpose(0,1) for i in range (n_realizations)], # (n_realizations,) of (n_spe_params, [n_samples depends on dataset],)
474
            axis = 1
475
        ) # (n_spe_params, n_all_samples)
476

477
        ideal_class_params_flatten = torch.tile(ideal_class_params, (n_all_samples,1)).transpose(0,1) # (n_class_params, n_all_samples)
×
478

479
        def trial_func (X, params, class_params):
×
480
            y = params[0]*torch.exp(-params[1]*X[0])*torch.cos(class_params[0]*X[0]+params[2]) + class_params[1]*X[1]
×
481
            return y
×
482

483
        y_ideals_flatten = trial_func (multi_X_flatten, ideal_spe_params_flatten, ideal_class_params_flatten) # (n_all_samples,)
×
484
        multi_y_ideals   = unflatten_multi_data(y_ideals_flatten)                                         # (n_realizations,) of (n_samples depends on dataset,)
×
485

486

487
        # params[0]*torch.exp(-params[1]*X[0])*torch.cos(class_params[0]*X[0]+params[2]) + class_params[1]*X[1]
488
        # k0 * exp(-k1 * t) * cos(c0 * t + k2) + c1 * l
489
        # "add", "mul", "mul", "k0", "exp", "mul", "neg", "k1", "t", "cos", "add", "mul", "c0", "t", "k2", "mul", "c1", "l"
490

491
        k0_init = [9,10,11]*10 # np.full(n_realizations, 1.)
×
492
        # consts
493
        pi     = data_conversion (np.pi) .to(DEVICE)
×
494
        const1 = data_conversion (1.)    .to(DEVICE)
×
495

496
        # LIBRARY CONFIG
497
        args_make_tokens = {
×
498
                        # operations
499
                        "op_names"             : "all",
500
                        "use_protected_ops"    : True,
501
                        # input variables
502
                        "input_var_ids"        : {"t" : 0         , "l" : 1          },
503
                        "input_var_units"      : {"t" : [1, 0, 0] , "l" : [0, 1, 0]  },
504
                        "input_var_complexity" : {"t" : 0.        , "l" : 1.         },
505
                        # constants
506
                        "constants"            : {"pi" : pi        , "const1" : const1    },
507
                        "constants_units"      : {"pi" : [0, 0, 0] , "const1" : [0, 0, 0] },
508
                        "constants_complexity" : {"pi" : 1.        , "const1" : 1.        },
509
                        # free constants
510
                        "class_free_constants"            : {"c0"              , "c1"               },
511
                        "class_free_constants_init_val"   : {"c0" : 21.        , "c1"  : 22.         },
512
                        "class_free_constants_units"      : {"c0" : [-1, 0, 0] , "c1"  : [0, -1, 0] },
513
                        "class_free_constants_complexity" : {"c0" : 1.         , "c1"  : 1.         },
514
                        # free constants
515
                        "spe_free_constants"            : {"k0"              , "k1"               , "k2"               },
516
                        "spe_free_constants_init_val"   : {"k0" : k0_init    , "k1"  : 2.         , "k2"  : 3.         },
517
                        "spe_free_constants_units"      : {"k0" : [0, 0, 0]  , "k1"  : [-1, 0, 0] , "k2"  : [0, 0, 0]  },
518
                        "spe_free_constants_complexity" : {"k0" : 1.         , "k1"  : 1.         , "k2"  : 1.         },
519
                           }
520
        my_lib = Lib.Library(args_make_tokens = args_make_tokens,
×
521
                             superparent_units = [0, 0, 0], superparent_name = "y")
522

523
        # TEST PROGRAMS
524
        test_programs_idx = []
×
525
        test_prog_str_0 = ["add", "mul", "mul", "k0"  , "exp", "mul", "neg", "k1", "t", "cos", "add", "mul", "c0", "t", "k2", "mul", "c1", "l", ]
×
526
        test_tokens_0 = [my_lib.lib_name_to_token[name] for name in test_prog_str_0]
×
527

528
        free_const_table = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=n_realizations)
×
529
        free_const_table.class_values[0] = ideal_class_params
×
530
        free_const_table.spe_values  [0] = ideal_spe_params.transpose(0,1) # (n_spe_params, n_realizations)
×
531

532
        prog = Prog.Program(tokens=test_tokens_0, library=my_lib, free_consts=free_const_table, n_realizations=n_realizations)
×
533

534

535
        # Test execution on all datasets in flattened form
536
        N = 1000
×
537
        t0 = time.perf_counter()
×
538
        for _ in range (N):
×
539
            y_computed_flatten = prog.execute(X                     = multi_X_flatten,
×
540
                                              n_samples_per_dataset = n_samples_per_dataset,
541
                                              )
542
        t1 = time.perf_counter()
×
543
        print("\nprog.execute time (flattened mdho2d scenario) = %.3f ms"%((t1-t0)*1e3/N))
×
544
        #multi_y_computed = unflatten_multi_data(y_computed_flatten)
545
        works_bool = (y_computed_flatten == y_ideals_flatten).all()
×
546
        self.assertTrue(works_bool)
×
547

548
        # Test execution on all datasets but one by one
549
        t0 = time.perf_counter()
×
550
        for _ in range (N):
×
551
            multi_y_computed = []
×
552
            for i in range(n_realizations):
×
553
                y_computed = prog.execute(X              = multi_X[i],
×
554
                                          i_realization  = i,
555
                                          )
556
                multi_y_computed.append(y_computed)
×
557
        t1 = time.perf_counter()
×
558
        print("\nprog.execute time (one-by-one mdho2d scenario) = %.3f ms"%((t1-t0)*1e3/N))
×
559

560
        for i in range(n_realizations):
×
561
            works_bool = (multi_y_computed[i] == multi_y_ideals[i]).all()
×
562
            self.assertTrue(works_bool)
×
563

564
        # # Sanity plot
565
        # fig, ax = plt.subplots(1,1,figsize=(10,5))
566
        # for i in range(n_realizations):
567
        #     ax.plot(multi_X[i][0], multi_y_ideals   [i].cpu().detach().numpy(), 'o', )
568
        #     ax.plot(multi_X[i][0], multi_y_computed [i].cpu().detach().numpy(), 'r-',)
569
        # ax.legend()
570
        # plt.show()
571
        # for i in range(n_realizations):
572
        #     mse = torch.mean((multi_y_computed[i] - multi_y_ideals[i])**2)
573
        #     print("%i, mse = %f"%(i, mse))
574

575

576
        return None
×
577

578

579
    # Test program const optimization (normal SR scenario)
580
    def test_optimize (self):
×
581

582
        seed = 42
×
583
        np.random.seed(seed)
×
584
        torch.manual_seed(seed)
×
585

586
        ideal_class_params = torch.tensor([1.389, 1.005]) # (n_class_params, )
×
587

588
        # Synthetic data
589
        x0 = torch.linspace(0, 10, 1000)
×
590
        x1 = torch.linspace(-5, 1 , 1000)
×
591
        X = torch.stack((x0,x1),axis=0)
×
592
        y_ideals = ideal_class_params[0]*X[0] + ideal_class_params[1] + X[1]
×
593

594
        # consts
595
        pi     = data_conversion (np.pi)
×
596
        const1 = data_conversion (1.)
×
597

598
        # LIBRARY CONFIG
599
        args_make_tokens = {
×
600
                        # operations
601
                        "op_names"             : "all",  # or ["mul", "neg", "inv", "sin"]
602
                        "use_protected_ops"    : False,
603
                        # input variables
604
                        "input_var_ids"        : {"x0" : 0         , "x1" : 1         },
605
                        "input_var_units"      : {"x0" : [0, 0, 0] , "x1" : [0, 0, 0] },
606
                        "input_var_complexity" : {"x0" : 0.        , "x1" : 1.        },
607
                        # constants
608
                        "constants"            : {"pi" : pi        , "1" : const1    },
609
                        "constants_units"      : {"pi" : [0, 0, 0] , "1" : [0, 0, 0] },
610
                        "constants_complexity" : {"pi" : 0.        , "1" : 1.        },
611
                        # free constants
612
                        "free_constants"            : {"a"             , "b"             },
613
                        "free_constants_init_val"   : {"a" : 1.        , "b" : 1.        },
614
                        "free_constants_units"      : {"a" : [0, 0, 0] , "b" : [0, 0, 0] },
615
                        "free_constants_complexity" : {"a" : 0.        , "b" : 1.        },
616
                           }
617
        my_lib = Lib.Library(args_make_tokens = args_make_tokens,
×
618
                             superparent_units = [0, 0, 0], superparent_name = "y")
619

620
        # PROGRAM
621
        test_program_str = ["add", "add", "mul", "a", "x0", "b", "x1",]
×
622
        test_program     = [my_lib.lib_name_to_token[name] for name in test_program_str]
×
623
        free_const_table = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=1)
×
624
        prog = Prog.Program(tokens=test_program, library=my_lib, free_consts=free_const_table, n_realizations=1)
×
625

626
        # OPTIMIZATION
627
        history = prog.optimize_constants(X=X, y_target=y_ideals,)
×
628

629
        # Execution for results
630
        y_pred  = prog.execute(X=X,)
×
631
        # Testing that optimization processed was logged
632
        works_bool = (prog.free_consts.is_opti[0] == True) and (prog.free_consts.opti_steps[0] > 0)
×
633
        self.assertTrue(works_bool)
×
634
        # Testing that constants were recovered
635
        tol = 1e-6
×
636
        works_bool = (torch.abs(prog.free_consts.class_values[0] - ideal_class_params)<tol).all()
×
637
        self.assertTrue(works_bool)
×
638
        # Testing that MSEs is low
639
        mse_tol = 1e-8
×
640
        mse = torch.mean((y_pred - y_ideals)**2)
×
641
        works_bool = (mse < mse_tol)
×
642
        self.assertTrue(works_bool)
×
643

644
        return None
×
645

646
    # Test program const optimization in Class SR scenario
647
    def test_optimize_with_spe_free_consts (self):
×
648

649
        seed = 42
×
650
        np.random.seed(seed)
×
651
        torch.manual_seed(seed)
×
652

653
        DEVICE = 'cpu'
×
654
        if torch.cuda.is_available():
×
655
            DEVICE = 'cuda'
×
656

657
        # -------------------------------------- Making fake datasets --------------------------------------
658

659
        multi_X = []
×
660
        for n_samples in [90, 100, 110]:
×
661
            x1 = np.linspace(0, 10, n_samples)
×
662
            x2 = np.linspace(0, 1 , n_samples)
×
663
            X = np.stack((x1,x2),axis=0)
×
664
            X = torch.tensor(X).to(DEVICE)
×
665
            multi_X.append(X)
×
666
        multi_X = multi_X*10                         # (n_realizations,) of (n_dim, [n_samples depends on dataset],)
×
667

668
        n_samples_per_dataset = np.array([X.shape[1] for X in multi_X])
×
669
        n_all_samples = n_samples_per_dataset.sum()
×
670
        n_realizations = len(multi_X)
×
671
        def flatten_multi_data (multi_data,):
×
672
            """
673
            Flattens multiple datasets into a single one for vectorized evaluation.
674
            Parameters
675
            ----------
676
            multi_data : list of length (n_realizations,) of torch.tensor of shape (..., [n_samples depends on dataset],)
677
                List of datasets to be flattened.
678
            Returns
679
            -------
680
            torch.tensor of shape (..., n_all_samples)
681
                Flattened data (n_all_samples = sum([n_samples depends on dataset])).
682
            """
683
            flattened_data = torch.cat(multi_data, axis=-1) # (..., n_all_samples)
×
684
            return flattened_data
×
685

686
        def unflatten_multi_data (flattened_data):
×
687
            """
688
            Unflattens a single data into multiple ones.
689
            Parameters
690
            ----------
691
            flattened_data : torch.tensor of shape (..., n_all_samples)
692
                Flattened data (n_all_samples = sum([n_samples depends on dataset])).
693
            Returns
694
            -------
695
            list of len (n_realizations,) of torch.tensor of shape (..., [n_samples depends on dataset],)
696
                Unflattened data.
697
            """
698
            return list(torch.split(flattened_data, n_samples_per_dataset.tolist(), dim=-1)) # (n_realizations,) of (..., [n_samples depends on dataset],)
×
699

700
        # y_weights_per_dataset = np.array([0, 0.001, 1.0]*10) # Shows weights work
701
        y_weights_per_dataset = torch.tensor(np.array([1., 1., 1.]*10))
×
702
        multi_y_weights = [torch.full(size=(n_samples_per_dataset[i],), fill_value=y_weights_per_dataset[i]) for i in range (n_realizations)]
×
703
        y_weights_flatten = flatten_multi_data(multi_y_weights)
×
704

705
        multi_X_flatten = flatten_multi_data(multi_X)  # (n_dim, n_all_samples)
×
706

707
        # Making fake ideal parameters
708
        # n_spe_params   = 3
709
        # n_class_params = 2
710
        random_shift       = (np.random.rand(n_realizations,3)-0.5)*0.8
×
711
        ideal_spe_params   = torch.tensor(np.array([1.123, 0.345, 0.116]) + random_shift) # (n_realizations, n_spe_params,)
×
712
        ideal_spe_params   = ideal_spe_params.transpose(0,1)                              # (n_spe_params, n_realizations)
×
713
        ideal_class_params = torch.tensor(np.array([1.389, 1.005]))                       # (n_class_params, )
×
714

715
        ideal_spe_params_flatten = torch.cat(
×
716
            [torch.tile(ideal_spe_params[:,i], (n_samples_per_dataset[i],1)).transpose(0,1) for i in range (n_realizations)], # (n_realizations,) of (n_spe_params, [n_samples depends on dataset],)
717
            axis = 1
718
        ) # (n_spe_params, n_all_samples)
719

720
        ideal_class_params_flatten = torch.tile(ideal_class_params, (n_all_samples,1)).transpose(0,1) # (n_class_params, n_all_samples)
×
721

722
        def trial_func (X, params, class_params):
×
723
            y = params[0]*torch.exp(-params[1]*X[0])*torch.cos(class_params[0]*X[0]+params[2]) + class_params[1]*X[1]
×
724
            return y
×
725

726
        y_ideals_flatten = trial_func (multi_X_flatten, ideal_spe_params_flatten, ideal_class_params_flatten) # (n_all_samples,)
×
727
        multi_y_ideals   = unflatten_multi_data(y_ideals_flatten)                                         # (n_realizations,) of (n_samples depends on dataset,)
×
728

729

730
        # params[0]*torch.exp(-params[1]*X[0])*torch.cos(class_params[0]*X[0]+params[2]) + class_params[1]*X[1]
731
        # k0 * exp(-k1 * t) * cos(c0 * t + k2) + c1 * l
732
        # "add", "mul", "mul", "k0", "exp", "mul", "neg", "k1", "t", "cos", "add", "mul", "c0", "t", "k2", "mul", "c1", "l"
733

734
        k0_init = [1.,1.,1.]*10 # np.full(n_realizations, 1.)
×
735
        # consts
736
        pi     = data_conversion (np.pi) .to(DEVICE)
×
737
        const1 = data_conversion (1.)    .to(DEVICE)
×
738

739
        # LIBRARY CONFIG
740
        args_make_tokens = {
×
741
                        # operations
742
                        "op_names"             : "all",
743
                        "use_protected_ops"    : True,
744
                        # input variables
745
                        "input_var_ids"        : {"t" : 0         , "l" : 1          },
746
                        "input_var_units"      : {"t" : [1, 0, 0] , "l" : [0, 1, 0]  },
747
                        "input_var_complexity" : {"t" : 0.        , "l" : 1.         },
748
                        # constants
749
                        "constants"            : {"pi" : pi        , "const1" : const1    },
750
                        "constants_units"      : {"pi" : [0, 0, 0] , "const1" : [0, 0, 0] },
751
                        "constants_complexity" : {"pi" : 1.        , "const1" : 1.        },
752
                        # free constants
753
                        "class_free_constants"            : {"c0"              , "c1"               },
754
                        "class_free_constants_init_val"   : {"c0" : 1.         , "c1"  : 1.         },
755
                        "class_free_constants_units"      : {"c0" : [-1, 0, 0] , "c1"  : [0, -1, 0] },
756
                        "class_free_constants_complexity" : {"c0" : 1.         , "c1"  : 1.         },
757
                        # free constants
758
                        "spe_free_constants"            : {"k0"              , "k1"               , "k2"               },
759
                        "spe_free_constants_init_val"   : {"k0" : k0_init    , "k1"  : 1.         , "k2"  : 1.         },
760
                        "spe_free_constants_units"      : {"k0" : [0, 0, 0]  , "k1"  : [-1, 0, 0] , "k2"  : [0, 0, 0]  },
761
                        "spe_free_constants_complexity" : {"k0" : 1.         , "k1"  : 1.         , "k2"  : 1.         },
762
                           }
763
        my_lib = Lib.Library(args_make_tokens = args_make_tokens,
×
764
                             superparent_units = [0, 0, 0], superparent_name = "y")
765

766
        # TEST PROGRAM
767
        test_programs_idx = []
×
768
        test_prog_str_0 = ["add", "mul", "mul", "k0"  , "exp", "mul", "neg", "k1", "t", "cos", "add", "mul", "c0", "t", "k2", "mul", "c1", "l", ]
×
769
        test_tokens_0 = [my_lib.lib_name_to_token[name] for name in test_prog_str_0]
×
770
        free_const_table = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=n_realizations)
×
771
        prog = Prog.Program(tokens=test_tokens_0, library=my_lib, free_consts=free_const_table, n_realizations=n_realizations)
×
772

773
        # ------- Test optimization (all datasets flattened way) -------
774
        N = 10
×
775
        t0 = time.perf_counter()
×
776
        for _ in range (N):
×
777
            # Resetting free constants to initial values for timing
778
            free_const_table = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=n_realizations)
×
779
            prog.free_consts = free_const_table
×
780
            # Optimization
781
            history = prog.optimize_constants(X                     = multi_X_flatten,
×
782
                                              y_target              = y_ideals_flatten,
783
                                              n_samples_per_dataset = n_samples_per_dataset,
784
                                            )
785
        t1 = time.perf_counter()
×
786
        print("\nprog.optimize_constants time (flattened mdho2d scenario) = %.3f ms"%((t1-t0)*1e3/N))
×
787

788
        # Execution for results
789
        y_computed_flatten = prog.execute(X = multi_X_flatten, n_samples_per_dataset = n_samples_per_dataset,)
×
790
        multi_y_computed = unflatten_multi_data(y_computed_flatten)
×
791
        # Testing that optimization processed was logged
792
        works_bool = (prog.free_consts.is_opti[0] == True) and (prog.free_consts.opti_steps[0] > 0)
×
793
        self.assertTrue(works_bool)
×
794
        # Testing that constants were recovered
795
        tol = 5*1e-3
×
796
        works_bool = (torch.abs(prog.free_consts.class_values[0] - ideal_class_params)<tol).all()
×
797
        self.assertTrue(works_bool)
×
798
        works_bool = (torch.abs(prog.free_consts.spe_values[0] - ideal_spe_params)<tol).all()
×
799
        above_tol = torch.abs(prog.free_consts.spe_values[0] - ideal_spe_params)[(torch.abs(prog.free_consts.spe_values[0] - ideal_spe_params)>=tol)]
×
800
        self.assertTrue(works_bool, "above_tol = %s"%above_tol)
×
801
        # Testing that MSEs are low
802
        mse_tol = 1e-6
×
803
        MSEs = torch.tensor([torch.mean((multi_y_computed[i] - multi_y_ideals[i])**2) for i in range(n_realizations)])
×
804
        works_bool = (MSEs < mse_tol).all()
×
805
        above_tol = MSEs[MSEs>=mse_tol]
×
806
        self.assertTrue(works_bool, "above_tol = %s"%above_tol)
×
807

808
        # ------- Test optimization (one-by-one) -------
809
        # Actually this makes no sense to test this as this will optimize class free constants one time per realization
810
        # but they are supposed to be common to all realizations. There is no point in optimizing them one by one even
811
        # if this was faster. Commenting out for now.
812
        #
813
        # t0 = time.perf_counter()
814
        # for _ in range (N):
815
        #    # Resetting free constants to initial values for timing
816
        #    free_const_table = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=n_realizations)
817
        #    prog.free_consts = free_const_table
818
        #    for i in range(n_realizations):
819
        #        # Optimization
820
        #        history = prog.optimize_constants(X             = multi_X[i],
821
        #                                          y_target      = multi_y_ideals[i],
822
        #                                          i_realization = i,
823
        #                                          )
824
        # t1 = time.perf_counter()
825
        # print("\nprog.optimize_constants time (one-by-one mdho2d scenario) = %.3f ms"%((t1-t0)*1e3/N))
826
        #
827
        # # Execution for results
828
        # y_computed_flatten = prog.execute(X = multi_X_flatten, n_samples_per_dataset = n_samples_per_dataset,)
829
        # multi_y_computed = unflatten_multi_data(y_computed_flatten)
830
        # # Testing that optimization processed was logged
831
        # works_bool = (prog.free_consts.is_opti[0] == True) and (prog.free_consts.opti_steps[0] > 0)
832
        # self.assertTrue(works_bool)
833
        # # Testing that constants were recovered
834
        # tol = 5*1e-3
835
        # works_bool = (torch.abs(prog.free_consts.class_values[0] - ideal_class_params)<tol).all()
836
        # self.assertTrue(works_bool)
837
        # works_bool = (torch.abs(prog.free_consts.spe_values[0] - ideal_spe_params)<tol).all()
838
        # above_tol = torch.abs(prog.free_consts.spe_values[0] - ideal_spe_params)[(torch.abs(prog.free_consts.spe_values[0] - ideal_spe_params)>=tol)]
839
        # self.assertTrue(works_bool, "above_tol = %s"%above_tol)
840
        # # Testing that MSEs are low
841
        # mse_tol = 1e-6
842
        # MSEs = torch.tensor([torch.mean((multi_y_computed[i] - multi_y_ideals[i])**2) for i in range(n_realizations)])
843
        # works_bool = (MSEs < mse_tol).all()
844
        # above_tol = MSEs[MSEs>=mse_tol]
845
        # self.assertTrue(works_bool, "above_tol = %s"%above_tol)
846

847
        # ------- Sanity plot -------
848
        # fig, ax = plt.subplots(1,1,figsize=(10,5))
849
        # for i in range(n_realizations):
850
        #      ax.plot(multi_X[i][0], multi_y_ideals   [i].cpu().detach().numpy(), 'o', )
851
        #      ax.plot(multi_X[i][0], multi_y_computed [i].cpu().detach().numpy(), 'r-',)
852
        # ax.legend()
853
        #
854
        # fig, ax = plt.subplots(1, 1, figsize=(10, 5))
855
        # ax.plot(history)
856
        # plt.show()
857
        #
858
        # for i in range(n_realizations):
859
        #      mse = torch.mean((multi_y_computed[i] - multi_y_ideals[i])**2)
860
        #      print("%i, mse = %f"%(i, mse))
861
        return None
×
862

863
    # Test program const optimization in Class SR scenario
864
    def test_optimize_with_spe_free_consts_with_weights (self):
×
865

866
        seed = 42
×
867
        np.random.seed(seed)
×
868
        torch.manual_seed(seed)
×
869

870
        DEVICE = 'cpu'
×
871
        if torch.cuda.is_available():
×
872
            DEVICE = 'cuda'
×
873

874
        # -------------------------------------- Making fake datasets --------------------------------------
875

876
        multi_X = []
×
877
        for n_samples in [90, 100, 110]:
×
878
            x1 = np.linspace(0, 10, n_samples)
×
879
            x2 = np.linspace(0, 1 , n_samples)
×
880
            X = np.stack((x1,x2),axis=0)
×
881
            X = torch.tensor(X).to(DEVICE)
×
882
            multi_X.append(X)
×
883
        multi_X = multi_X*10                         # (n_realizations,) of (n_dim, [n_samples depends on dataset],)
×
884

885
        n_samples_per_dataset = np.array([X.shape[1] for X in multi_X])
×
886
        n_all_samples = n_samples_per_dataset.sum()
×
887
        n_realizations = len(multi_X)
×
888
        def flatten_multi_data (multi_data,):
×
889
            """
890
            Flattens multiple datasets into a single one for vectorized evaluation.
891
            Parameters
892
            ----------
893
            multi_data : list of length (n_realizations,) of torch.tensor of shape (..., [n_samples depends on dataset],)
894
                List of datasets to be flattened.
895
            Returns
896
            -------
897
            torch.tensor of shape (..., n_all_samples)
898
                Flattened data (n_all_samples = sum([n_samples depends on dataset])).
899
            """
900
            flattened_data = torch.cat(multi_data, axis=-1) # (..., n_all_samples)
×
901
            return flattened_data
×
902

903
        def unflatten_multi_data (flattened_data):
×
904
            """
905
            Unflattens a single data into multiple ones.
906
            Parameters
907
            ----------
908
            flattened_data : torch.tensor of shape (..., n_all_samples)
909
                Flattened data (n_all_samples = sum([n_samples depends on dataset])).
910
            Returns
911
            -------
912
            list of len (n_realizations,) of torch.tensor of shape (..., [n_samples depends on dataset],)
913
                Unflattened data.
914
            """
915
            return list(torch.split(flattened_data, n_samples_per_dataset.tolist(), dim=-1)) # (n_realizations,) of (..., [n_samples depends on dataset],)
×
916

917
        y_weights_per_dataset = np.array([0, 0.001, 1.0]*10) # Shows weights work
×
918
        # y_weights_per_dataset = torch.tensor(np.array([1., 1., 1.]*10))
919
        multi_y_weights = [torch.full(size=(n_samples_per_dataset[i],), fill_value=y_weights_per_dataset[i]) for i in range (n_realizations)]
×
920
        y_weights_flatten = flatten_multi_data(multi_y_weights)
×
921

922
        multi_X_flatten = flatten_multi_data(multi_X)  # (n_dim, n_all_samples)
×
923

924
        # Making fake ideal parameters
925
        # n_spe_params   = 3
926
        # n_class_params = 2
927
        random_shift       = (np.random.rand(n_realizations,3)-0.5)*0.8
×
928
        ideal_spe_params   = torch.tensor(np.array([1.123, 0.345, 0.116]) + random_shift) # (n_realizations, n_spe_params,)
×
929
        ideal_spe_params   = ideal_spe_params.transpose(0,1)                              # (n_spe_params, n_realizations)
×
930
        ideal_class_params = torch.tensor(np.array([1.389, 1.005]))                       # (n_class_params, )
×
931

932
        ideal_spe_params_flatten = torch.cat(
×
933
            [torch.tile(ideal_spe_params[:,i], (n_samples_per_dataset[i],1)).transpose(0,1) for i in range (n_realizations)], # (n_realizations,) of (n_spe_params, [n_samples depends on dataset],)
934
            axis = 1
935
        ) # (n_spe_params, n_all_samples)
936

937
        ideal_class_params_flatten = torch.tile(ideal_class_params, (n_all_samples,1)).transpose(0,1) # (n_class_params, n_all_samples)
×
938

939
        def trial_func (X, params, class_params):
×
940
            y = params[0]*torch.exp(-params[1]*X[0])*torch.cos(class_params[0]*X[0]+params[2]) + class_params[1]*X[1]
×
941
            return y
×
942

943
        y_ideals_flatten = trial_func (multi_X_flatten, ideal_spe_params_flatten, ideal_class_params_flatten) # (n_all_samples,)
×
944
        multi_y_ideals   = unflatten_multi_data(y_ideals_flatten)                                         # (n_realizations,) of (n_samples depends on dataset,)
×
945

946

947
        # params[0]*torch.exp(-params[1]*X[0])*torch.cos(class_params[0]*X[0]+params[2]) + class_params[1]*X[1]
948
        # k0 * exp(-k1 * t) * cos(c0 * t + k2) + c1 * l
949
        # "add", "mul", "mul", "k0", "exp", "mul", "neg", "k1", "t", "cos", "add", "mul", "c0", "t", "k2", "mul", "c1", "l"
950

951
        k0_init = [1.,1.,1.]*10 # np.full(n_realizations, 1.)
×
952
        # consts
953
        pi     = data_conversion (np.pi) .to(DEVICE)
×
954
        const1 = data_conversion (1.)    .to(DEVICE)
×
955

956
        # LIBRARY CONFIG
957
        args_make_tokens = {
×
958
                        # operations
959
                        "op_names"             : "all",
960
                        "use_protected_ops"    : True,
961
                        # input variables
962
                        "input_var_ids"        : {"t" : 0         , "l" : 1          },
963
                        "input_var_units"      : {"t" : [1, 0, 0] , "l" : [0, 1, 0]  },
964
                        "input_var_complexity" : {"t" : 0.        , "l" : 1.         },
965
                        # constants
966
                        "constants"            : {"pi" : pi        , "const1" : const1    },
967
                        "constants_units"      : {"pi" : [0, 0, 0] , "const1" : [0, 0, 0] },
968
                        "constants_complexity" : {"pi" : 1.        , "const1" : 1.        },
969
                        # free constants
970
                        "class_free_constants"            : {"c0"              , "c1"               },
971
                        "class_free_constants_init_val"   : {"c0" : 1.         , "c1"  : 1.         },
972
                        "class_free_constants_units"      : {"c0" : [-1, 0, 0] , "c1"  : [0, -1, 0] },
973
                        "class_free_constants_complexity" : {"c0" : 1.         , "c1"  : 1.         },
974
                        # free constants
975
                        "spe_free_constants"            : {"k0"              , "k1"               , "k2"               },
976
                        "spe_free_constants_init_val"   : {"k0" : k0_init    , "k1"  : 1.         , "k2"  : 1.         },
977
                        "spe_free_constants_units"      : {"k0" : [0, 0, 0]  , "k1"  : [-1, 0, 0] , "k2"  : [0, 0, 0]  },
978
                        "spe_free_constants_complexity" : {"k0" : 1.         , "k1"  : 1.         , "k2"  : 1.         },
979
                           }
980
        my_lib = Lib.Library(args_make_tokens = args_make_tokens,
×
981
                             superparent_units = [0, 0, 0], superparent_name = "y")
982

983
        # TEST PROGRAM
984
        test_programs_idx = []
×
985
        test_prog_str_0 = ["add", "mul", "mul", "k0"  , "exp", "mul", "neg", "k1", "t", "cos", "add", "mul", "c0", "t", "k2", "mul", "c1", "l", ]
×
986
        test_tokens_0 = [my_lib.lib_name_to_token[name] for name in test_prog_str_0]
×
987
        free_const_table = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=n_realizations)
×
988
        prog = Prog.Program(tokens=test_tokens_0, library=my_lib, free_consts=free_const_table, n_realizations=n_realizations)
×
989

990
        # ------- Test optimization (all datasets flattened way) -------
991
        free_const_opti_args = {
×
992
            'loss': "MSE",
993
            'method': 'LBFGS',
994
            'method_args': {
995
                'n_steps': 120,
996
                'tol': 1e-16,
997
                'lbfgs_func_args': {
998
                    'max_iter': 4,
999
                    'line_search_fn': "strong_wolfe",
1000
                },
1001
            },
1002
        }
1003

1004
        N = 10
×
1005
        t0 = time.perf_counter()
×
1006
        for _ in range (N):
×
1007
            # Resetting free constants to initial values for timing
1008
            free_const_table = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=n_realizations)
×
1009
            prog.free_consts = free_const_table
×
1010
            # Optimization
1011
            history = prog.optimize_constants(X                     = multi_X_flatten,
×
1012
                                              y_weights             = y_weights_flatten,
1013
                                              y_target              = y_ideals_flatten,
1014
                                              n_samples_per_dataset = n_samples_per_dataset,
1015
                                              args_opti             = free_const_opti_args,
1016
                                            )
1017
        t1 = time.perf_counter()
×
1018
        print("\nprog.optimize_constants time (flattened wmdho2d scenario) = %.3f ms"%((t1-t0)*1e3/N))
×
1019

1020
        # --------------------------- TESTS ---------------------------
1021

1022
        # Execution for results
1023
        y_computed_flatten = prog.execute(X = multi_X_flatten, n_samples_per_dataset = n_samples_per_dataset,)
×
1024
        multi_y_computed = unflatten_multi_data(y_computed_flatten)
×
1025

1026
        # --------- Testing that optimization processed was logged ---------
1027
        works_bool = (prog.free_consts.is_opti[0] == True) and (prog.free_consts.opti_steps[0] > 0)
×
1028
        self.assertTrue(works_bool)
×
1029

1030
        # --------- Testing that constants were recovered ---------
1031
        tol = 1*1e-3
×
1032

1033
        # -> Class free constants
1034
        works_bool = (torch.abs(prog.free_consts.class_values[0] - ideal_class_params)<tol).all()
×
1035
        self.assertTrue(works_bool)
×
1036

1037
        # -> Spe free constants
1038
        spe_init = torch.tensor(prog.free_consts.library.spe_free_constants_init_val)           # (n_spe_params, n_realizations)
×
1039
        # Checking that constants with high weights are recovered
1040
        i_reals  = y_weights_per_dataset > 0.9
×
1041
        is_recov = torch.abs(prog.free_consts.spe_values[0][:,i_reals] - ideal_spe_params[:,i_reals]) < tol
×
1042
        works_bool = is_recov.all()
×
1043
        self.assertTrue(works_bool)
×
1044
        # Checking that constants with non zero-weights changed
1045
        i_reals  = y_weights_per_dataset > 0.0
×
1046
        is_recov = torch.abs(prog.free_consts.spe_values[0][:,i_reals] - spe_init[:,i_reals]) > 0.
×
1047
        works_bool = is_recov.all()
×
1048
        self.assertTrue(works_bool)
×
1049
        # Checking that constants with zero-weights did not change
1050
        i_reals  = y_weights_per_dataset == 0.0
×
1051
        is_recov = torch.abs(prog.free_consts.spe_values[0][:,i_reals] - spe_init[:,i_reals]) == 0
×
1052
        works_bool = is_recov.all()
×
1053
        self.assertTrue(works_bool)
×
1054

1055
        # --------- MSE ---------
1056
        mse_tol = 1e-6
×
1057
        MSEs = torch.tensor([torch.mean((multi_y_computed[i] - multi_y_ideals[i])**2) for i in range(n_realizations)]) # (n_realizations,)
×
1058

1059
        # Testing that MSEs are low for high weights
1060
        works_bool = (MSEs[y_weights_per_dataset > 0.5] < mse_tol).all()
×
1061
        self.assertTrue(works_bool)
×
1062

1063
        # Testing that MSEs are high for low weights
1064
        works_bool = (MSEs[y_weights_per_dataset == 0.] > mse_tol).all()
×
1065
        self.assertTrue(works_bool)
×
1066

1067
        # ------- Test optimization (one-by-one) -------
1068
        # Actually this makes no sense to test this as this will optimize class free constants one time per realization
1069
        # but they are supposed to be common to all realizations. There is no point in optimizing them one by one even
1070
        # if this was faster. Commenting out for now.
1071
        #
1072
        # t0 = time.perf_counter()
1073
        # for _ in range (N):
1074
        #    # Resetting free constants to initial values for timing
1075
        #    free_const_table = free_const.FreeConstantsTable(batch_size=1, library=my_lib, n_realizations=n_realizations)
1076
        #    prog.free_consts = free_const_table
1077
        #    for i in range(n_realizations):
1078
        #        # Optimization
1079
        #        history = prog.optimize_constants(X             = multi_X[i],
1080
        #                                          y_target      = multi_y_ideals[i],
1081
        #                                          i_realization = i,
1082
        #                                          )
1083
        # t1 = time.perf_counter()
1084
        # print("\nprog.optimize_constants time (one-by-one wmdho2d scenario) = %.3f ms"%((t1-t0)*1e3/N))
1085
        #
1086
        # # Execution for results
1087
        # y_computed_flatten = prog.execute(X = multi_X_flatten, n_samples_per_dataset = n_samples_per_dataset,)
1088
        # multi_y_computed = unflatten_multi_data(y_computed_flatten)
1089
        # # Testing that optimization processed was logged
1090
        # works_bool = (prog.free_consts.is_opti[0] == True) and (prog.free_consts.opti_steps[0] > 0)
1091
        # self.assertTrue(works_bool)
1092
        # # Testing that constants were recovered
1093
        # tol = 5*1e-3
1094
        # works_bool = (torch.abs(prog.free_consts.class_values[0] - ideal_class_params)<tol).all()
1095
        # self.assertTrue(works_bool)
1096
        # works_bool = (torch.abs(prog.free_consts.spe_values[0] - ideal_spe_params)<tol).all()
1097
        # above_tol = torch.abs(prog.free_consts.spe_values[0] - ideal_spe_params)[(torch.abs(prog.free_consts.spe_values[0] - ideal_spe_params)>=tol)]
1098
        # self.assertTrue(works_bool, "above_tol = %s"%above_tol)
1099
        # # Testing that MSEs are low
1100
        # mse_tol = 1e-6
1101
        # MSEs = torch.tensor([torch.mean((multi_y_computed[i] - multi_y_ideals[i])**2) for i in range(n_realizations)])
1102
        # works_bool = (MSEs < mse_tol).all()
1103
        # above_tol = MSEs[MSEs>=mse_tol]
1104
        # self.assertTrue(works_bool, "above_tol = %s"%above_tol)
1105

1106
        # ------- Sanity plot -------
1107
        # fig, ax = plt.subplots(1,1,figsize=(10,5))
1108
        # for i in range(n_realizations):
1109
        #      ax.plot(multi_X[i][0], multi_y_ideals   [i].cpu().detach().numpy(), 'o', )
1110
        #      ax.plot(multi_X[i][0], multi_y_computed [i].cpu().detach().numpy(), 'r-',)
1111
        # ax.legend()
1112
        #
1113
        # fig, ax = plt.subplots(1, 1, figsize=(10, 5))
1114
        # ax.plot(history)
1115
        # plt.show()
1116
        #
1117
        # for i in range(n_realizations):
1118
        #      mse = torch.mean((multi_y_computed[i] - multi_y_ideals[i])**2)
1119
        #      print("%i, mse = %f"%(i, mse))
1120
        return None
×
1121

1122
if __name__ == '__main__':
×
1123
    unittest.main(verbosity=2)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc