• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mosesyhc / LCGP / 16986452651

15 Aug 2025 08:42AM UTC coverage: 98.043% (+6.5%) from 91.538%
16986452651

push

github

mosesyhc
re-add coveralls and add continue-on-error flag

451 of 460 relevant lines covered (98.04%)

3.92 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.01
/lcgp/lcgp.py
1
from ._import_util import _import_tensorflow
4✔
2
import tensorflow_probability as tfp
4✔
3
import gpflow
4✔
4
from .covmat import Matern32
4✔
5
import numpy as np
4✔
6

7
# for Python 3.9 inclusion
8
from typing import Optional
4✔
9

10
tf = _import_tensorflow()
4✔
11

12
# Display only code-breaking errors
13
tf.get_logger().setLevel('ERROR')
4✔
14
# Set default float type to float64
15
tf.keras.backend.set_floatx('float64')
4✔
16

17

18
class LCGP(gpflow.Module):
4✔
19
    def __init__(self,
4✔
20
                 y: Optional[np.ndarray] = tf.Tensor,
21
                 x: Optional[np.ndarray] = tf.Tensor,
22
                 q: int = None,
23
                 var_threshold: float = None,
24
                 diag_error_structure: list = None,
25
                 parameter_clamp_flag: bool = False,
26
                 robust_mean: bool = True,
27
                 penalty_const: dict = None,
28
                 submethod: str = 'full',
29
                 verbose: bool = False):
30
        """
31
        Constructor for LCGP class.
32
        """
33
        super().__init__()
4✔
34
        self.verbose = verbose
4✔
35
        self.robust_mean = robust_mean
4✔
36
        self.x = self._verify_data_types(x)
4✔
37
        self.y = self._verify_data_types(y)
4✔
38

39
        self.method = 'LCGP'
4✔
40
        self.submethod = submethod
4✔
41
        self.submethod_loss_map = {'full': self.neglpost,
4✔
42
                                   # 'elbo': self.negelbo,
43
                                   # 'proflik': self.negproflik
44
                                   }
45
        self.submethod_predict_map = {'full': self.predict_full,
4✔
46
                                      # 'elbo': self.predict_elbo,
47
                                      # 'proflik': self.predict_proflik
48
                                      }
49

50
        self.parameter_clamp_flag = parameter_clamp_flag
4✔
51

52
        if (q is not None) and (var_threshold is not None):
4✔
53
            raise ValueError('Include only q or var_threshold but not both.')
4✔
54
        self.q = q
4✔
55
        self.var_threshold = var_threshold
4✔
56

57
        # standardize x to unit hypercube
58
        self.x, self.x_min, self.x_max, self.x_orig, self.xnorm = \
4✔
59
            self.init_standard_x(self.x)
60
        # standardize y
61
        self.y, self.ymean, self.ystd, self.y_orig = self.init_standard_y(self.y)
4✔
62

63
        # placeholders for variables
64
        self.n, self.d, self.p = 0., 0., 0.
4✔
65
        # verify that input and output dimensions match
66
        # sets n, d, and p
67
        self.verify_dim(self.y, self.x)
4✔
68

69
        # reset q if none is provided
70
        self.g, self.phi, self.diag_D, self.q = \
4✔
71
            self.init_phi(var_threshold=var_threshold)
72

73
        if diag_error_structure is None:
4✔
74
            self.diag_error_structure = [1] * int(self.p)
4✔
75
        else:
76
            self.diag_error_structure = diag_error_structure
4✔
77

78
        self.verify_error_structure(self.diag_error_structure, self.y)
4✔
79

80
        # Initialize parameters
81
        self.lLmb = gpflow.Parameter(tf.ones([self.q, self.x.shape[1]], dtype=tf.float64),
4✔
82
                                     name='Latent GP log-scale',
83
                                     transform=tfp.bijectors.SoftClip(
84
                                         low=tf.constant(1e-6, dtype=tf.float64),
85
                                         high=tf.constant(1e4, dtype=tf.float64)
86
                                     ), dtype=tf.float64)
87
        self.lLmb0 = gpflow.Parameter(tf.ones([self.q], dtype=tf.float64),
4✔
88
                                      name='Latent GP log-lengthscale',
89
                                      transform=tfp.bijectors.SoftClip(
90
                                          low=tf.constant(1e-4, dtype=tf.float64),
91
                                          high=tf.constant(1e4, dtype=tf.float64)
92
                                      ), dtype=tf.float64)
93
        self.lsigma2s = gpflow.Parameter(tf.ones([len(self.diag_error_structure)], dtype=tf.float64),
4✔
94
                                         name='Diagonal error log-variance') #, transform=tfp.bijectors.Exp())
95
        self.lnugGPs = gpflow.Parameter(tf.ones([self.q], dtype=tf.float64) * 1e-6,
4✔
96
                                        name='Latent GP nugget scale',
97
                                        transform=tfp.bijectors.SoftClip(
98
                                            low=tf.math.exp(tf.constant(-16, dtype=tf.float64)),
99
                                            high=tf.math.exp(tf.constant(-2, dtype=tf.float64))
100
                                        ), dtype=tf.float64)
101

102
        if penalty_const is None:
4✔
103
            pc = {'lLmb': 40, 'lLmb0': 5}
4✔
104
        else:
105
            pc = penalty_const
4✔
106
            for k, v in pc.items():
4✔
107
                assert v >= 0, 'penalty constant should be nonnegative.'
4✔
108
        self.penalty_const = pc
4✔
109

110
        self.init_params()
4✔
111

112
        # placeholders for predictive quantities
113
        self.CinvMs = tf.fill([self.q, self.n], tf.constant(float('nan'), dtype=tf.float64))
4✔
114
        self.Ths = tf.fill([self.q, self.n, self.n], tf.constant(float('nan'), dtype=tf.float64))
4✔
115
        self.Th_hats = tf.fill([self.q, self.n, self.n], tf.constant(float('nan'), dtype=tf.float64))
4✔
116
        self.Cinvhs = tf.fill([self.q, self.n, self.n], tf.constant(float('nan'), dtype=tf.float64))
4✔
117

118
    @staticmethod
4✔
119
    def init_standard_x(x):
4✔
120
        """
121
        Standardizes training inputs and collects summary information.
122
        """
123
        x_max = tf.reduce_max(x, axis=0)
4✔
124
        x_min = tf.reduce_min(x, axis=0)
4✔
125
        xs = (x - x_min) / (x_max - x_min)
4✔
126

127
        xnorm = tf.zeros(x.shape[1], dtype=tf.float64)
4✔
128
        for j in range(x.shape[1]):
4✔
129
            xdist = tf.abs((tf.reshape(x[:, j], (-1, 1)) - x[:, j]))
4✔
130

131
            positive_xdist = tf.boolean_mask(xdist, xdist > 0)
4✔
132
            mean_val = tf.reduce_mean(positive_xdist)
4✔
133

134
            xnorm = tf.tensor_scatter_nd_update(xnorm, [[j]], [mean_val])
4✔
135
        return xs, x_min, x_max, x, xnorm
4✔
136

137
    def init_standard_y(self, y):
4✔
138
        """
139
        Standardizes outputs and collects summary information.
140
        """
141
        if self.robust_mean:
4✔
142
            ycenter = tfp.stats.percentile(y, 50.0, axis=1, keepdims=True)
4✔
143
            yspread = tfp.stats.percentile(tf.abs(y - ycenter), 50.0, axis=1, keepdims=True)
4✔
144
        else:
145
            ycenter = tf.reduce_mean(y, axis=1, keepdims=True)
4✔
146
            yspread = tf.math.reduce_std(y, axis=1, keepdims=True)
4✔
147

148
        ys = (y - ycenter) / yspread
4✔
149
        return ys, ycenter, yspread, y
4✔
150

151
    def __repr__(self):
4✔
152
        params = gpflow.utilities.tabulate_module_summary(self)
4✔
153
        desc = 'LCGP(\n' \
4✔
154
               '\tsubmethod:\t{:s}\n' \
155
               '\toutput dimension:\t{:d}\n' \
156
               '\tnumber of latent components:\t{:d}\n' \
157
               '\tparameter_clamping:\t{:s}\n' \
158
               '\trobust_standardization:\t{:s}\n' \
159
               '\tdiagonal_error structure:\t{:s}\n' \
160
               '\tparameters:\t\n{}\n)'.format(self.submethod, self.p,
161
                                         self.q, str(self.parameter_clamp_flag),
162
                                         str(self.robust_mean),
163
                                         str(self.diag_error_structure),
164
                                         params)
165
        return desc
4✔
166

167
    def init_phi(self, var_threshold: float = None):
4✔
168
        """
169
        Initialization of orthogonal basis, computed with singular value decomposition.
170
        """
171
        y, q = self.y, self.q
4✔
172
        n, p = self.n, self.p
4✔
173

174
        singvals, left_u, _ = tf.linalg.svd(y, full_matrices=False)
4✔
175

176
        if (q is None) and (var_threshold is None):
4✔
177
            q = p
4✔
178
        elif (q is None) and (var_threshold is not None):
4✔
179
            cumvar = tf.cumsum(singvals ** 2) / tf.reduce_sum(singvals ** 2)
4✔
180
            q = int(tf.argmax(cumvar > var_threshold) + 1)
4✔
181

182
        assert left_u.shape[1] == min(n, p)
4✔
183
        singvals = singvals[:q]
4✔
184

185
        # Compute phi and diag_D
186
        phi = left_u[:, :q] * tf.sqrt(tf.cast(n, tf.float64)) / singvals
4✔
187
        diag_D = tf.reduce_sum(phi ** 2, axis=0)
4✔
188

189
        g = tf.matmul(phi, y, transpose_a=True)
4✔
190
        return g, phi, diag_D, q
4✔
191

192
    def init_params(self):
4✔
193
        """
194
        Initializes parameters for LCGP.
195
        """
196
        x = self.x
4✔
197
        d = self.d
4✔
198

199
        llmb = np.exp(0.5 * np.log(d) + np.log(np.std(x, axis=0)))
4✔
200
        lLmb = np.tile(llmb, self.q).reshape((self.q, d))
4✔
201
        lLmb0 = np.ones(self.q, dtype=np.float64)
4✔
202
        lnugGPs = np.exp(-10.) * np.ones(self.q, dtype=np.float64)
4✔
203

204
        err_struct = self.diag_error_structure
4✔
205
        lsigma2_diag = np.zeros(len(err_struct), dtype=np.float64)
4✔
206
        col = 0
4✔
207
        for k in range(len(err_struct)):
4✔
208
            lsigma2_diag[k] = np.log(np.var(self.y[col:(col + err_struct[k])]))
4✔
209
            col += err_struct[k]
4✔
210

211
        self.lLmb.assign(lLmb)
4✔
212
        self.lLmb0.assign(lLmb0)
4✔
213
        self.lnugGPs.assign(lnugGPs)
4✔
214
        self.lsigma2s.assign(lsigma2_diag)
4✔
215
        return
4✔
216

217
    def verify_dim(self, y, x):
4✔
218
        """
219
        Verifies if input and output dimensions match. Sets class variables for
220
        dimensions. Throws error if the dimensions do not match.
221
        """
222
        p, ny = tf.shape(y)[0], tf.shape(y)[1]
4✔
223
        nx, d = tf.shape(x)[0], tf.shape(x)[1]
4✔
224

225
        assert ny == nx, 'Number of inputs (x) differs from number of outputs (y), y.shape[1] != x.shape[0]'
4✔
226

227
        self.n = tf.constant(nx, tf.int32)
4✔
228
        self.d = tf.constant(d, tf.int32)
4✔
229
        self.p = tf.constant(p, tf.int32)
4✔
230
        return
4✔
231

232
    def tx_x(self, xs):
4✔
233
        """
234
        Reverts standardization of inputs.
235
        """
236
        return xs * (self.x_max - self.x_min) + self.x_min
4✔
237

238
    def tx_y(self, ys):
4✔
239
        """
240
        Reverts output standardization.
241
        """
242
        return ys * self.ystd + self.ymean
4✔
243

244
    def fit(self, verbose=False):
4✔
245
        opt = gpflow.optimizers.Scipy()
4✔
246
        opt.minimize(self.loss, self.trainable_variables)
4✔
247
        return
4✔
248

249
    def loss(self):
4✔
250
        """
251
        Computes the loss based on the submethod.
252
        """
253
        if self.submethod == 'full':
4✔
254
            return self.neglpost()
4✔
255
        # elif self.submethod == 'elbo':
256
        #     return self.negelbo()
257
        # elif self.submethod == 'proflik':
258
        #     return self.negproflik()
259
        else:
260
            raise ValueError("Invalid submethod. Choices are 'full', 'elbo', or 'proflik'.")
4✔
261

262
    @tf.function
4✔
263
    def neglpost(self):
4✔
264
        lLmb, lLmb0, lsigma2s, lnugGPs = self.get_param()
4✔
265
        x = self.x
4✔
266
        y = self.y
4✔
267

268
        pc = self.penalty_const
4✔
269

270
        n = self.n
4✔
271
        q = self.q
4✔
272
        D = self.diag_D
4✔
273
        phi = self.phi
4✔
274
        psi_c = tf.transpose(phi) / tf.sqrt(tf.exp(lsigma2s))
4✔
275

276
        nlp = tf.constant(0., dtype=tf.float64)
4✔
277

278
        for k in range(q):
4✔
279
            Ck = Matern32(x, x, llmb=lLmb[k], llmb0=lLmb0[k], lnug=lnugGPs[k])
4✔
280

281
            Wk, Uk = tf.linalg.eigh(Ck)
4✔
282

283
            Qk = tf.matmul(Uk, tf.matmul(tf.linalg.diag(1 / (D[k] + 1 / Wk)), tf.transpose(Uk)))
4✔
284
            Pk = tf.matmul(tf.expand_dims(psi_c[k], axis=1), tf.expand_dims(psi_c[k], axis=0))
4✔
285

286
            yQk = tf.matmul(y, Qk)
4✔
287
            yPk = tf.matmul(tf.transpose(y), tf.transpose(Pk))
4✔
288

289
            nlp += (0.5 * tf.reduce_sum(tf.math.log(1 + D[k] * Wk)))
4✔
290
            nlp += -(0.5 * tf.reduce_sum(yQk * tf.transpose(yPk)))
4✔
291

292
        nlp += (n / 2 * tf.reduce_sum(lsigma2s))
4✔
293
        nlp += (0.5 * tf.reduce_sum(tf.square(tf.transpose(y) / tf.sqrt(tf.exp(lsigma2s)))))
4✔
294

295
        # Regularization
296
        nlp += (pc['lLmb'] * tf.reduce_sum(tf.square(tf.math.log(lLmb))) +
4✔
297
                pc['lLmb0'] * (2 / n) * tf.reduce_sum(tf.square(lLmb0.unconstrained_variable)))
298
        nlp += (-tf.reduce_sum(tf.math.log(tf.math.log(lnugGPs) + 100)))
4✔
299
        # nlp += (tf.reduce_sum(tf.math.log(lnugGPs - 100)))
300
        nlp /= tf.cast(n, tf.float64)
4✔
301
        return nlp
4✔
302

303
    # def negelbo(self):
304
    #     n = self.n
305
    #     x = self.x
306
    #     y = self.y
307
    #     pc = self.penalty_const
308
    #
309
    #     lLmb, lLmb0, lsigma2s, lnugGPs = self.get_param()
310
    #     B = tf.matmul(tf.transpose(y / tf.sqrt(tf.exp(lsigma2s))), self.phi)
311
    #     D = self.diag_D
312
    #     phi = self.phi
313
    #
314
    #     psi = tf.transpose(phi) * tf.sqrt(tf.exp(lsigma2s))
315
    #
316
    #     M = tf.zeros([self.q, n], dtype=tf.float64)
317
    #
318
    #     negelbo = tf.constant(0., dtype=tf.float64)
319
    #     for k in range(self.q):
320
    #         Ck = Matern32(x, x, llmb=lLmb[k], llmb0=lLmb0[k], lnug=lnugGPs[k])
321
    #
322
    #         Wk, Uk = tf.linalg.eigh(Ck)
323
    #         dkInpCkinv = tf.matmul(Uk, tf.matmul(tf.linalg.diag(1 / Wk), tf.transpose(Uk))) + \
324
    #                      D[k] * tf.eye(n, dtype=tf.float64)
325
    #
326
    #         # (dk * I + Ck^{-1})^{-1}
327
    #         dkInpCkinv_inv = tf.matmul(Uk, tf.matmul(tf.linalg.diag(1 / (D[k] + 1 / Wk)), tf.transpose(Uk)))
328
    #         Mk = tf.linalg.matvec(dkInpCkinv_inv, tf.transpose(B)[k])
329
    #         Vk = 1 / tf.linalg.diag_part(dkInpCkinv)
330
    #
331
    #         CkinvhMk = tf.linalg.matvec(tf.matmul(tf.matmul(Uk, tf.linalg.diag(1 / tf.sqrt(Wk))), tf.transpose(Uk)),
332
    #                                     Mk)
333
    #
334
    #         M = tf.tensor_scatter_nd_update(M, [[k]], tf.expand_dims(Mk, axis=0))
335
    #
336
    #         negelbo += 0.5 * tf.reduce_sum(tf.math.log(Wk))
337
    #         negelbo += 0.5 * tf.reduce_sum(tf.square(CkinvhMk))
338
    #         negelbo -= 0.5 * tf.reduce_sum(tf.math.log(Vk))
339
    #         negelbo += 0.5 * tf.reduce_sum(
340
    #             Vk * D[k] * tf.linalg.diag_part(tf.matmul(Uk, tf.matmul(tf.linalg.diag(1 / Wk), tf.transpose(Uk)))))
341
    #
342
    #     resid = (tf.transpose(y) - tf.matmul(tf.transpose(M), psi)) / tf.sqrt(tf.exp(lsigma2s))
343
    #
344
    #     negelbo += 0.5 * tf.reduce_sum(tf.square(resid))
345
    #     negelbo += n / 2 * tf.reduce_sum(lsigma2s)
346
    #
347
    #     # Regularization
348
    #     negelbo += pc['lLmb'] * tf.reduce_sum(tf.square(lLmb)) + \
349
    #                pc['lLmb0'] * (2 / n) * tf.reduce_sum(tf.square(lLmb0))
350
    #     negelbo += -tf.reduce_sum(tf.math.log(lnugGPs + 100))
351
    #
352
    #     negelbo /= tf.cast(n, tf.float64)
353
    #
354
    #     return negelbo
355
    #
356
    # def negproflik(self):
357
    #     lLmb, lLmb0, lsigma2s, lnugGPs = self.get_param()
358
    #     x = self.x
359
    #     y = self.y
360
    #
361
    #     pc = self.penalty_const
362
    #
363
    #     n = self.n
364
    #     q = self.q
365
    #     D = self.diag_D
366
    #     phi = self.phi
367
    #     psi = tf.transpose(phi) * tf.sqrt(tf.exp(lsigma2s))
368
    #
369
    #     B = tf.matmul(tf.transpose(y / tf.sqrt(tf.exp(lsigma2s))), self.phi)
370
    #     G = tf.zeros([self.q, n], dtype=tf.float64)
371
    #
372
    #     negproflik = tf.constant(0., dtype=tf.float64)
373
    #
374
    #     for k in range(q):
375
    #         Ck = Matern32(x, x, llmb=lLmb[k], llmb0=lLmb0[k], lnug=lnugGPs[k])
376
    #         Wk, Uk = tf.linalg.eigh(Ck)
377
    #
378
    #         dkInpCkinv_inv = tf.matmul(Uk, tf.matmul(tf.linalg.diag(1 / (D[k] + 1 / Wk)), tf.transpose(Uk)))
379
    #         Gk = tf.matmul(dkInpCkinv_inv, tf.transpose(B)[k])
380
    #
381
    #         CkinvhGk = tf.matmul(tf.matmul(Uk, tf.linalg.diag(1 / Wk)), tf.transpose(Uk)) @ Gk
382
    #
383
    #         G = tf.tensor_scatter_nd_update(G, [[k]], tf.expand_dims(Gk, axis=0))
384
    #
385
    #         negproflik += 0.5 * tf.reduce_sum(tf.math.log(Wk))
386
    #         negproflik += 0.5 * tf.reduce_sum(tf.square(CkinvhGk))
387
    #
388
    #     resid = (tf.transpose(y) - tf.matmul(tf.transpose(G), psi)) / tf.sqrt(tf.exp(lsigma2s))
389
    #
390
    #     negproflik += 0.5 * tf.reduce_sum(tf.square(resid))
391
    #     negproflik += n / 2 * tf.reduce_sum(lsigma2s)
392
    #
393
    #     negproflik += pc['lLmb'] * tf.reduce_sum(tf.square(lLmb)) + \
394
    #                   pc['lLmb0'] * (2 / n) * tf.reduce_sum(tf.square(lLmb0))
395
    #     negproflik += -tf.reduce_sum(tf.math.log(lnugGPs + 100))
396
    #
397
    #     negproflik /= tf.cast(n, tf.float64)
398
    #     return negproflik
399

400

401
    def predict(self, x0, return_fullcov=False):
4✔
402
        """
403
        Returns predictive quantities at new input `x0`.  Both outputs are of
404
        size (number of new input, output dimension).
405
        :param x0: New input of size (number of new input, dimension of input).
406
        :param return_fullcov: Returns (predictive mean, predictive variance,
407
        variance for the true mean, full predictive covariance) if True.  Otherwise,
408
        only return the first three quantities.
409
        """
410
        x0 = self._verify_data_types(x0)
4✔
411
        submethod = self.submethod
4✔
412
        predict_map = self.submethod_predict_map
4✔
413
        try:
4✔
414
            predict_call = predict_map[submethod]
4✔
415
        except KeyError as e:
×
416
            print(e)
×
417
            # print('Invalid submethod.  Choices are \'full\', \'elbo\', or \'proflik\'.')
418
            raise KeyError('Invalid submethod.  Choices are \'full\'.')
×
419
        return tf.stop_gradient(predict_call(x0=x0, return_fullcov=return_fullcov))
4✔
420

421

422
    def compute_aux_predictive_quantities(self):
4✔
423
        """
424
        Compute auxiliary quantities for predictions using full posterior approach.
425
        """
426
        x = self.x
4✔
427
        lLmb, lLmb0, lsigma2s, lnugGPs = self.get_param()
4✔
428

429
        D = self.diag_D
4✔
430
        # B := Y @ Sigma^{-1/2} @ Phi
431
        B = tf.matmul(tf.transpose(self.y) / tf.sqrt(tf.exp(lsigma2s)), self.phi)
4✔
432

433
        CinvM = tf.zeros([self.q, self.n], dtype=tf.float64)
4✔
434
        Th = tf.zeros([self.q, self.n, self.n], dtype=tf.float64)
4✔
435

436
        for k in range(self.q):
4✔
437
            Ck = Matern32(x, x, llmb=lLmb[k], llmb0=lLmb0[k], lnug=lnugGPs[k])
4✔
438

439
            Wk, Uk = tf.linalg.eigh(Ck)
4✔
440

441
            # (I + D_k * C_k)^{-1}
442
            IpdkCkinv = tf.matmul(Uk, tf.matmul(tf.linalg.diag(1.0 / (1.0 + D[k] * Wk)), tf.transpose(Uk)))
4✔
443

444
            CkinvMk = tf.linalg.matvec(IpdkCkinv, tf.transpose(B)[k])
4✔
445
            Thk = tf.matmul(Uk, tf.matmul(tf.linalg.diag(tf.sqrt((D[k] * Wk ** 2) / (Wk ** 2 + D[k] * Wk ** 3))),
4✔
446
                                          tf.transpose(Uk)))
447

448
            CinvM = tf.tensor_scatter_nd_update(CinvM, [[k]], tf.expand_dims(CkinvMk, axis=0))
4✔
449
            Th = tf.tensor_scatter_nd_update(Th, [[k]], tf.expand_dims(Thk, axis=0))
4✔
450

451
        self.CinvMs = CinvM
4✔
452
        self.Ths = Th
4✔
453

454
    def predict_full(self, x0, return_fullcov=False):
4✔
455
        """
456
        Returns predictions using full posterior approach.
457
        """
458
        if tf.reduce_any(tf.math.is_nan(self.CinvMs)) or tf.reduce_any(tf.math.is_nan(self.Ths)):
4✔
459
            self.compute_aux_predictive_quantities()
4✔
460

461
        x = self.x
4✔
462
        lLmb, lLmb0, lsigma2s, lnugGPs = self.get_param()
4✔
463

464
        phi = self.phi
4✔
465

466
        CinvM = self.CinvMs
4✔
467
        Th = self.Ths
4✔
468

469
        x0 = self._verify_data_types(x0)
4✔
470
        x0 = (x0 - self.x_min) / (self.x_max - self.x_min)  # Standardize x0
4✔
471
        n0 = tf.shape(x0)[0]
4✔
472

473
        ghat = tf.zeros([self.q, n0], dtype=tf.float64)
4✔
474
        gvar = tf.zeros([self.q, n0], dtype=tf.float64)
4✔
475
        for k in range(self.q):
4✔
476
            c00k = Matern32(x0, x0, llmb=lLmb[k], llmb0=lLmb0[k], lnug=lnugGPs[k],
4✔
477
                            diag_only=True)  # Diagonal-only covariance
478
            c0k = Matern32(x0, x, llmb=lLmb[k], llmb0=lLmb0[k], lnug=lnugGPs[k],
4✔
479
                           diag_only=False)
480

481
            ghat_k = tf.linalg.matvec(c0k, CinvM[k])
4✔
482
            gvar_k = c00k - tf.reduce_sum(tf.square(tf.matmul(c0k, Th[k])), axis=1)
4✔
483

484
            ghat = tf.tensor_scatter_nd_update(ghat, [[k]], [ghat_k])
4✔
485
            gvar = tf.tensor_scatter_nd_update(gvar, [[k]], [gvar_k])
4✔
486

487
        self.ghat = ghat
4✔
488
        self.gvar = gvar
4✔
489

490
        psi = tf.transpose(phi) * tf.sqrt(tf.exp(lsigma2s))
4✔
491

492
        predmean = tf.matmul(psi, ghat, transpose_a=True)
4✔
493
        confvar = tf.matmul(tf.transpose(gvar), tf.square(psi))
4✔
494
        predvar = confvar + tf.exp(lsigma2s)
4✔
495

496
        ypred = self.tx_y(predmean)
4✔
497
        yconfvar = tf.transpose(confvar) * tf.square(self.ystd)
4✔
498
        ypredvar = tf.transpose(predvar) * tf.square(self.ystd)
4✔
499

500
        if return_fullcov:
4✔
501
            CH = tf.sqrt(gvar)[..., tf.newaxis] * psi[tf.newaxis, ...]
×
502
            yfullpredcov = (tf.einsum('nij,njk->nik', CH,
×
503
                                     tf.transpose(CH, perm=[0, 2, 1])) +
504
                            tf.linalg.diag(tf.exp(lsigma2s)))
505
            yfullpredcov *= tf.square(self.ystd)
×
506
            return ypred, ypredvar, yconfvar, yfullpredcov
×
507

508
        return ypred, ypredvar, yconfvar
4✔
509

510
    @staticmethod
4✔
511
    def _verify_data_types(t):
4✔
512
        """
513
        Verify if inputs are TensorFlow tensors, if not, cast into tensors.
514
        Verify if inputs are at least 2-dimensional, if not, expand dimensions to 2.
515
        """
516
        if not isinstance(t, tf.Tensor):
4✔
517
            t = tf.convert_to_tensor(t, dtype=tf.float64)
4✔
518
        if t.ndim < 2:
4✔
519
            t = tf.expand_dims(t, axis=1)
4✔
520
        return t
4✔
521

522
    # def predict_elbo(self, x0, return_fullcov=False):
523
    #     pass
524
    #
525
    # def predict_proflik(self, x0, return_fullcov=False):
526
    #     pass
527

528

529
    @staticmethod
4✔
530
    def verify_error_structure(diag_error_structure, y):
4✔
531
        """
532
        Verifies if diagonal error structure input, if any, is valid.
533
        """
534
        assert sum(diag_error_structure) == y.shape[0], \
4✔
535
            'Sum of error_structure should' \
536
            ' equal the output dimension.'
537

538
    def get_param(self):
4✔
539
        """
540
        Returns the parameters for LCGP instance.
541
        """
542
        # if self.parameter_clamp_flag:
543
        #     lLmb, lLmb0, lsigma2s, lnugGPs = \
544
        #         self.parameter_clamp(lLmb=self.lLmb, lLmb0=self.lLmb0,
545
        #                              lsigma2s=self.lsigma2s, lnugs=self.lnugGPs)
546
        # else:
547
        lLmb, lLmb0, lsigma2s, lnugGPs = \
4✔
548
            self.lLmb, self.lLmb0, self.lsigma2s, self.lnugGPs
549

550
        built_lsigma2s = tf.zeros(self.p, dtype=tf.float64)
4✔
551
        err_struct = self.diag_error_structure
4✔
552
        col = 0
4✔
553
        for k in range(len(err_struct)):
4✔
554
            built_lsigma2s = tf.tensor_scatter_nd_update(
4✔
555
                built_lsigma2s,
556
                tf.range(col, col + err_struct[k])[:, tf.newaxis],
557
                tf.fill([err_struct[k]], lsigma2s[k])
558
            )
559
            col += err_struct[k]
4✔
560

561
        return lLmb, lLmb0, built_lsigma2s, lnugGPs
4✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc