• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

libKriging / libKriging / 16689350360

10 Jul 2025 07:01PM UTC coverage: 37.371%. Remained the same
16689350360

push

github

yannrichet
if () {cout} instead of if () cout

0 of 1 new or added line in 1 file covered. (0.0%)

278 existing lines in 1 file now uncovered.

3295 of 8817 relevant lines covered (37.37%)

59122.67 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/src/lib/NuggetKriging.cpp
1
// clang-format off
2
// MUST BE at the beginning before any other <cmath> include (e.g. in armadillo's headers)
3
#define _USE_MATH_DEFINES // required for Visual Studio
4

5
#include <cmath>
6
// clang-format on
7

8
#include "libKriging/utils/lk_armadillo.hpp"
9

10
#include "libKriging/Bench.hpp"
11
#include "libKriging/CacheFunction.hpp"
12
#include "libKriging/Covariance.hpp"
13
#include "libKriging/KrigingException.hpp"
14
#include "libKriging/LinearAlgebra.hpp"
15
#include "libKriging/NuggetKriging.hpp"
16
#include "libKriging/Optim.hpp"
17
#include "libKriging/Random.hpp"
18
#include "libKriging/Trend.hpp"
19
#include "libKriging/utils/custom_hash_function.hpp"
20
#include "libKriging/utils/data_from_arma_vec.hpp"
21
#include "libKriging/utils/jsonutils.hpp"
22
#include "libKriging/utils/nlohmann/json.hpp"
23
#include "libKriging/utils/utils.hpp"
24

25
#include <cassert>
26
#include <lbfgsb_cpp/lbfgsb.hpp>
27
#include <map>
28
#include <tuple>
29
#include <vector>
30

31
/************************************************/
32
/**      NuggetKriging implementation        **/
33
/************************************************/
34

35
// This will create the dist(xi,xj) function above. Need to parse "covType".
36
void NuggetKriging::make_Cov(const std::string& covType) {
×
37
  m_covType = covType;
×
38
  if (covType.compare("gauss") == 0) {
×
39
    _Cov = Covariance::Cov_gauss;
×
40
    _DlnCovDtheta = Covariance::DlnCovDtheta_gauss;
×
41
    _DlnCovDx = Covariance::DlnCovDx_gauss;
×
42
    _Cov_pow = 2;
×
43
  } else if (covType.compare("exp") == 0) {
×
44
    _Cov = Covariance::Cov_exp;
×
45
    _DlnCovDtheta = Covariance::DlnCovDtheta_exp;
×
46
    _DlnCovDx = Covariance::DlnCovDx_exp;
×
47
    _Cov_pow = 1;
×
48
  } else if (covType.compare("matern3_2") == 0) {
×
49
    _Cov = Covariance::Cov_matern32;
×
50
    _DlnCovDtheta = Covariance::DlnCovDtheta_matern32;
×
51
    _DlnCovDx = Covariance::DlnCovDx_matern32;
×
52
    _Cov_pow = 1.5;
×
53
  } else if (covType.compare("matern5_2") == 0) {
×
54
    _Cov = Covariance::Cov_matern52;
×
55
    _DlnCovDtheta = Covariance::DlnCovDtheta_matern52;
×
56
    _DlnCovDx = Covariance::DlnCovDx_matern52;
×
57
    _Cov_pow = 2.5;
×
58
  } else
59
    throw std::invalid_argument("Unsupported covariance kernel: " + covType);
×
60

61
  // arma::cout << "make_Cov done." << arma::endl;
62
}
×
63

64
LIBKRIGING_EXPORT arma::mat NuggetKriging::covMat(const arma::mat& X1, const arma::mat& X2) {
×
65
  arma::mat Xn1 = X1;
×
66
  arma::mat Xn2 = X2;
×
67
  Xn1.each_row() -= m_centerX;
×
68
  Xn1.each_row() /= m_scaleX;
×
69
  Xn2.each_row() -= m_centerX;
×
70
  Xn2.each_row() /= m_scaleX;
×
71

72
  arma::mat R = arma::mat(X1.n_rows, X2.n_rows, arma::fill::none);
×
73
  for (arma::uword i = 0; i < Xn1.n_rows; i++) {
×
74
    for (arma::uword j = 0; j < Xn2.n_rows; j++) {
×
75
      R.at(i, j) = _Cov((Xn1.row(i) - Xn2.row(j)).t(), m_theta);
×
76
    }
77
  }
78
  return R * m_sigma2;
×
79
}
×
80

81
// at least, just call make_Cov(kernel)
82
LIBKRIGING_EXPORT NuggetKriging::NuggetKriging(const std::string& covType) {
×
83
  make_Cov(covType);
×
84
}
×
85

86
LIBKRIGING_EXPORT NuggetKriging::NuggetKriging(const arma::vec& y,
×
87
                                               const arma::mat& X,
88
                                               const std::string& covType,
89
                                               const Trend::RegressionModel& regmodel,
90
                                               bool normalize,
91
                                               const std::string& optim,
92
                                               const std::string& objective,
93
                                               const Parameters& parameters) {
×
94
  if (y.n_elem != X.n_rows)
×
95
    throw std::runtime_error("Dimension of new data should be the same:\n X: (" + std::to_string(X.n_rows) + "x"
×
96
                             + std::to_string(X.n_cols) + "), y: (" + std::to_string(y.n_elem) + ")");
×
97

98
  make_Cov(covType);
×
99
  fit(y, X, regmodel, normalize, optim, objective, parameters);
×
100
}
×
101

102
LIBKRIGING_EXPORT NuggetKriging::NuggetKriging(const NuggetKriging& other, ExplicitCopySpecifier)
×
103
    : NuggetKriging{other} {}
×
104

105
arma::vec NuggetKriging::ones = arma::ones<arma::vec>(0);
106

107
NuggetKriging::KModel NuggetKriging::make_Model(const arma::vec& theta,
×
108
                                                const double alpha,
109
                                                std::map<std::string, double>* bench) const {
110
  arma::mat R;
×
111
  arma::mat L;
×
112
  arma::mat Linv;
×
113
  arma::mat Fstar;
×
114
  arma::vec ystar;
×
115
  arma::mat Rstar;
×
116
  arma::mat Qstar;
×
117
  arma::vec Estar;
×
118
  double SSEstar{};
×
119
  arma::vec betahat;
×
120
  NuggetKriging::KModel m{R, L, Linv, Fstar, ystar, Rstar, Qstar, Estar, SSEstar, betahat};
×
121

122
  arma::uword n = m_X.n_rows;
×
123
  arma::uword d = m_X.n_cols;
×
124
  arma::uword p = m_F.n_cols;
×
125

126
  auto t0 = Bench::tic();
×
127
  m.R = arma::mat(n, n, arma::fill::none);
×
128
  // check if we want to recompute model for same theta, for augmented Xy (using cholesky fast update).
129
  bool update = false;
×
130
  if (!m_is_empty)
×
131
    update = (m_sigma2 / (m_sigma2 + m_nugget) == alpha) && (m_theta.size() == theta.size())
×
132
             && (theta - m_theta).is_zero() && (this->m_T.memptr() != nullptr) && (n > this->m_T.n_rows);
×
133
  if (update) {
×
134
    m.L = LinearAlgebra::update_cholCov(&(m.R), m_dX, theta, _Cov, alpha, NuggetKriging::ones, m_T, m_R);
×
135
  } else
136
    m.L = LinearAlgebra::cholCov(&(m.R), m_dX, theta, _Cov, alpha, NuggetKriging::ones);
×
137
  t0 = Bench::toc(bench, "R = _Cov(dX) & L = Chol(R)", t0);
×
138

139
  // Compute intermediate useful matrices
140
  arma::mat Fystar = LinearAlgebra::solve(m.L, arma::join_rows(m_F, m_y));
×
141
  t0 = Bench::toc(bench, "Fy* = L \\ [F,y]", t0);
×
142
  m.Fstar = Fystar.head_cols(p);
×
143
  m.ystar = Fystar.tail_cols(1);
×
144

145
  arma::mat Q_qr;
×
146
  arma::mat R_qr;
×
147
  arma::qr_econ(Q_qr, R_qr, Fystar);
×
148
  t0 = Bench::toc(bench, "Q_qr,R_qr = QR(Fy*)", t0);
×
149

150
  m.Rstar = R_qr.head_cols(p);
×
151
  m.Qstar = Q_qr.head_cols(p);
×
152
  m.Estar = Q_qr.tail_cols(1) * R_qr.at(p, p);
×
153
  m.SSEstar = R_qr.at(p, p) * R_qr.at(p, p);
×
154

155
  if (m_est_beta) {
×
156
    m.betahat = LinearAlgebra::solve(m.Rstar, R_qr.tail_cols(1));
×
157
    t0 = Bench::toc(bench, "^b = R* \\ R_qr[1:p, p+1]", t0);
×
158
  } else {
159
    m.betahat = arma::vec(p, arma::fill::zeros);  // whatever: not used
×
160
  }
161

162
  return m;
×
163
}
×
164

165
// Objective function for fit : -logLikelihood
166

167
double NuggetKriging::_logLikelihood(const arma::vec& _theta_alpha,
×
168
                                     arma::vec* grad_out,
169
                                     NuggetKriging::KModel* model,
170
                                     std::map<std::string, double>* bench) const {
171
  // arma::cout << " theta, alpha: " << _theta_alpha.t() << arma::endl;
172

173
  arma::uword d = m_X.n_cols;
×
174
  double _alpha = _theta_alpha.at(d);
×
175
  // closure: (alpha,sigma2,nugget)
176
  // * alpha = sigma2 / (nugget + sigma2)
177
  // * sigma2 = nugget * alpha / (1-alpha)
178
  // * nugget = sigma2 * (1-alpha) / alpha
179
  double _sigma2 = m_sigma2;
×
180
  double _nugget = m_nugget;
×
181
  if (m_est_sigma2) {
×
182
    if (m_est_nugget) {
×
183
      // nothing to do now: use alpha as input, compute var = sigma2 + nugget later and then sigma2 and nugget
184
    } else {
185
      _sigma2 = m_nugget * _alpha / (1 - _alpha);
×
186
    }
187
  } else {
188
    if (m_est_nugget) {
×
189
      _nugget = m_sigma2 * (1 - _alpha) / _alpha;
×
190
    } else {
191
      _alpha
192
          = m_sigma2
×
193
            / (m_nugget + m_sigma2);  // force alpha value, and its derivative will be 0 (to force gradient convergence)
×
194
    }
195
  }
196
  arma::vec _theta = _theta_alpha.head(d);
×
197

198
  NuggetKriging::KModel m = make_Model(_theta, _alpha, bench);
×
199
  if (model != nullptr)
×
200
    *model = m;
×
201

202
  arma::uword n = m_X.n_rows;
×
203

204
  if (m_est_sigma2 && m_est_nugget) {  // DiceKriging: model@case == "LLconcentration_beta_alpha"
×
205
    // back to case 1: sigma2 + nugget = var (=z*z/n)
206
    double var = as_scalar(LinearAlgebra::crossprod(m.Estar)) / n;
×
207
    _sigma2 = _alpha * var;
×
208
    _nugget = (1 - _alpha) * var;
×
209
  }
210

211
  double ll = -0.5
212
              * (n * log(2 * M_PI * _sigma2 / _alpha) + 2 * sum(log(m.L.diag()))
×
213
                 + as_scalar(LinearAlgebra::crossprod(m.Estar)) * (_alpha / _sigma2));
×
214

215
  if (grad_out != nullptr) {
×
216
    auto t0 = Bench::tic();
×
217
    arma::vec terme1 = arma::vec(d);
×
218

219
    if ((m.Linv.memptr() == nullptr) || (arma::size(m.Linv) != arma::size(m.L))) {
×
220
      m.Linv = LinearAlgebra::solve(m.L, arma::mat(n, n, arma::fill::eye));
×
221
      t0 = Bench::toc(bench, "L ^-1", t0);
×
222
    }
223

224
    arma::mat Rinv = LinearAlgebra::crossprod(m.Linv);
×
225
    t0 = Bench::toc(bench, "R^-1 = t(L^-1) * L^-1", t0);
×
226

227
    arma::mat x = LinearAlgebra::solve(m.L.t(), m.Estar);
×
228
    t0 = Bench::toc(bench, "x = tL \\ z", t0);
×
229

230
    arma::cube gradR = arma::cube(n, n, d, arma::fill::none);
×
231
    const arma::vec zeros = arma::vec(d, arma::fill::zeros);
×
232
    for (arma::uword i = 0; i < n; i++) {
×
233
      gradR.tube(i, i) = zeros;
×
234
      for (arma::uword j = 0; j < i; j++) {
×
235
        gradR.tube(i, j) = m.R.at(i, j) * _DlnCovDtheta(m_dX.col(i * n + j), _theta);
×
236
        gradR.tube(j, i) = gradR.tube(i, j);
×
237
      }
238
    }
239
    t0 = Bench::toc(bench, "gradR = R * dlnCov(dX)", t0);
×
240

241
    for (arma::uword k = 0; k < d; k++) {
×
242
      t0 = Bench::tic();
×
243
      arma::mat gradR_k = gradR.slice(k);
×
244
      t0 = Bench::toc(bench, "gradR_k = gradR[k]", t0);
×
245

246
      // should make a fast function trace_prod(A,B) -> sum_i(sum_j(Ai,j*Bj,i))
247
      terme1.at(k) = as_scalar(x.t() * gradR_k * x) / (_sigma2 + _nugget);
×
248
      double terme2 = -arma::trace(Rinv * gradR_k);  //-arma::accu(Rinv % gradR_k_upper)
×
249
      (*grad_out).at(k) = (terme1.at(k) + terme2) / 2;
×
250
      t0 = Bench::toc(bench, "grad_ll[k] = xt * gradR_k / S2 + tr(Ri * gradR_k)", t0);
×
251
    }
×
252

253
    //  # partial derivative with respect to (alpha=) v = sigma^2 + delta^2
254
    //  dCdv <- R0 - diag(model@n)
255
    //  term1 <- -t(x) %*% dCdv %*% x / v
256
    //  term2 <- sum(Cinv * dCdv) # economic computation of trace(Cinv%*%C0)
257
    //  logLik.derivative[nparam] <- -0.5 * (term1 + term2) # /sigma2
258

259
    if (m_est_sigma2 && m_est_nugget) {
×
260
      arma::mat dRdv = m.R / _alpha;
×
261
      dRdv.diag().zeros();
×
262
      double _terme1 = -as_scalar((trans(x) * dRdv) * x) / (_sigma2 + _nugget);
×
263
      double _terme2 = arma::accu(arma::dot(Rinv, dRdv));
×
264
      (*grad_out).at(d) = -0.5 * (_terme1 + _terme2);
×
265
    } else if (m_est_sigma2 && !m_est_nugget) {
×
266
      arma::mat dRdv = m.R / _alpha;
×
267
      dRdv.diag().ones();
×
268
      double _terme1 = -as_scalar((trans(x) * dRdv) * x) / ((_sigma2 + _nugget) * (_sigma2 + _nugget));
×
269
      double _terme2 = arma::accu(arma::dot(Rinv / (_sigma2 + _nugget), dRdv));
×
270
      (*grad_out).at(d) = -0.5 * (_terme1 + _terme2) * _nugget / (1 - _alpha) / (1 - _alpha);
×
271
    } else                    // we do not support explicitely the case where nugget is estimated and sigma2 is fixed
×
272
      (*grad_out).at(d) = 0;  // if sigma2 and nugget are defined & fixed by user
×
273

274
    // arma::cout << " grad_out:" << *grad_out << arma::endl;
275
  }
×
276
  return ll;
×
277
}
×
278

279
LIBKRIGING_EXPORT std::tuple<double, arma::vec> NuggetKriging::logLikelihoodFun(const arma::vec& _theta_alpha,
×
280
                                                                                const bool _grad,
281
                                                                                const bool _bench) {
282
  double ll = -1;
×
283
  arma::vec grad;
×
284

285
  if (_bench) {
×
286
    std::map<std::string, double> bench;
×
287
    if (_grad) {
×
288
      grad = arma::vec(_theta_alpha.n_elem);
×
289
      ll = _logLikelihood(_theta_alpha, &grad, nullptr, &bench);
×
290
    } else
291
      ll = _logLikelihood(_theta_alpha, nullptr, nullptr, &bench);
×
292

293
    size_t num = 0;
×
294
    for (auto& kv : bench)
×
295
      num = std::max(kv.first.size(), num);
×
NEW
296
    for (auto& kv : bench) {
×
297
      arma::cout << "| " << Bench::pad(kv.first, num, ' ') << " | " << kv.second << " |" << arma::endl;
×
298
    }
299

UNCOV
300
  } else {
×
301
    if (_grad) {
×
302
      grad = arma::vec(_theta_alpha.n_elem);
×
303
      ll = _logLikelihood(_theta_alpha, &grad, nullptr, nullptr);
×
304
    } else
UNCOV
305
      ll = _logLikelihood(_theta_alpha, nullptr, nullptr, nullptr);
×
306
  }
307

UNCOV
308
  return std::make_tuple(ll, std::move(grad));
×
309
}
×
310

311
// Objective function for fit: bayesian-like approach fromm RobustGaSP
312

UNCOV
313
double NuggetKriging::_logMargPost(const arma::vec& _theta_alpha,
×
314
                                   arma::vec* grad_out,
315
                                   NuggetKriging::KModel* model,
316
                                   std::map<std::string, double>* bench) const {
317
  // arma::cout << " theta: " << _theta << arma::endl;
318

319
  // In RobustGaSP:
320
  // neg_log_marginal_post_approx_ref <- function(param,nugget,
321
  // nugget.est,R0,X,zero_mean,output,CL,a,b,kernel_type,alpha) {
322
  //  lml=log_marginal_lik(param,nugget,nugget.est,R0,X,zero_mean,output,kernel_type,alpha);
323
  //  lp=log_approx_ref_prior(param,nugget,nugget.est,CL,a,b);
324
  //  -(lml+lp)
325
  //}
326
  // double log_marginal_lik(const Vec param,double nugget, const bool nugget_est, const List R0, const
327
  // Eigen::Map<Eigen::MatrixXd> & X,const String zero_mean,const Eigen::Map<Eigen::MatrixXd> & output, Eigen::VectorXi
328
  // kernel_type,const Eigen::VectorXd alpha ){
329
  //  double nu=nugget;
330
  //  int param_size=param.size();
331
  //  Eigen::VectorXd beta= param.array().exp().matrix();
332
  //  ...beta
333
  //  R=R+nu*MatrixXd::Identity(num_obs,num_obs);  //not sure
334
  //
335
  //  LLT<MatrixXd> lltOfR(R);             // compute the cholesky decomposition of R called lltofR
336
  //  MatrixXd L = lltOfR.matrixL();   //retrieve factor L  in the decomposition
337
  //
338
  //  if(zero_mean=="Yes"){...}else{
339
  //
340
  //  int q=X.cols();
341
  //
342
  //  MatrixXd R_inv_X=L.transpose().triangularView<Upper>().solve(L.triangularView<Lower>().solve(X)); //one forward
343
  //  and one backward to compute R.inv%*%X MatrixXd Xt_R_inv_X=X.transpose()*R_inv_X; //Xt%*%R.inv%*%X
344
  //
345
  //  LLT<MatrixXd> lltOfXRinvX(Xt_R_inv_X); // cholesky decomposition of Xt_R_inv_X called lltOfXRinvX
346
  //  MatrixXd LX = lltOfXRinvX.matrixL();  //  retrieve factor LX  in the decomposition
347
  //  MatrixXd Rinv_X_Xt_Rinv_X_inv_Xt_Rinv=
348
  //  R_inv_X*(LX.transpose().triangularView<Upper>().solve(LX.triangularView<Lower>().solve(R_inv_X.transpose())));
349
  //  //compute  Rinv_X_Xt_Rinv_X_inv_Xt_Rinv through one forward and one backward solve MatrixXd yt_R_inv=
350
  //  (L.transpose().triangularView<Upper>().solve(L.triangularView<Lower>().solve(output))).transpose(); MatrixXd S_2=
351
  //  (yt_R_inv*output-output.transpose()*Rinv_X_Xt_Rinv_X_inv_Xt_Rinv*output); double log_S_2=log(S_2(0,0)); return
352
  //  (-(L.diagonal().array().log().matrix().sum())-(LX.diagonal().array().log().matrix().sum())-(num_obs-q)/2.0*log_S_2);
353
  //  }
354
  //}
355
  // double log_approx_ref_prior(const Vec param,double nugget, bool nugget_est, const Eigen::VectorXd CL,const double
356
  // a,const double b ){
357
  //  double nu=nugget;
358
  //  int param_size=param.size();beta
359
  //  Eigen::VectorX beta= param.array().exp().matrix();
360
  //  ...
361
  //  double t=CL.cwiseProduct(beta).sum()+nu;
362
  //  return -b*t + a*log(t);
363
  //}
364

UNCOV
365
  arma::uword d = m_X.n_cols;
×
366
  double _alpha = _theta_alpha.at(d);
×
367
  // closure: (alpha,sigma2,nugget)
368
  // * alpha = sigma2 / (nugget + sigma2)
369
  // * sigma2 = nugget * alpha / (1-alpha)
370
  // * nugget = sigma2 * (1-alpha) / alpha
371
  double _sigma2;
372
  double _nugget;
UNCOV
373
  if (m_est_sigma2) {
×
374
    if (m_est_nugget) {
×
375
      // nothing to do now: use alpha as input, compute var = sigma2 + nugget later and then sigma2 and nugget
376
    } else {
UNCOV
377
      _sigma2 = m_nugget * _alpha / (1 - _alpha);
×
378
    }
379
  } else {
UNCOV
380
    if (m_est_nugget) {
×
381
      _nugget = m_sigma2 * (1 - _alpha) / _alpha;
×
382
    } else {
383
      _alpha
UNCOV
384
          = m_sigma2
×
385
            / (m_nugget + m_sigma2);  // force alpha value, and its derivative will be 0 (to force gradient convergence)
×
386
    }
387
  }
UNCOV
388
  arma::vec _theta = _theta_alpha.head(d);
×
389

UNCOV
390
  NuggetKriging::KModel m = make_Model(_theta, _alpha, bench);
×
391
  if (model != nullptr)
×
392
    *model = m;
×
393

UNCOV
394
  arma::uword n = m_X.n_rows;
×
395
  arma::uword p = m_F.n_cols;
×
396

397
  // RobustGaSP naming...
398
  // arma::mat X = m_F;
399
  // arma::mat L = fd->T;
400

UNCOV
401
  auto t0 = Bench::tic();
×
402
  // m.Fstar : fd->M = solve(L, X, LinearAlgebra::default_solve_opts);
403

404
  // arma::mat Rinv_X = solve(trans(L), fd->M, LinearAlgebra::default_solve_opts);
UNCOV
405
  arma::mat Rinv_X = LinearAlgebra::solve(m.L.t(), m.Fstar);
×
406

407
  // arma::mat Xt_Rinv_X = trans(X) * Rinv_X;  // Xt%*%R.inv%*%X
UNCOV
408
  arma::mat Xt_Rinv_X = m_F.t() * Rinv_X;
×
409

410
  // arma::mat LX = chol(Xt_Rinv_X, "lower");  //  retrieve factor LX  in the decomposition
UNCOV
411
  arma::mat LX = LinearAlgebra::safe_chol_lower(Xt_Rinv_X);
×
412

413
  // arma::mat Rinv_X_Xt_Rinv_X_inv_Xt_Rinv
414
  //     = Rinv_X
415
  //       * (solve(trans(LX),
416
  //                solve(LX, trans(Rinv_X), LinearAlgebra::default_solve_opts),
417
  //                LinearAlgebra::default_solve_opts));  // compute  Rinv_X_Xt_Rinv_X_inv_Xt_Rinv through one forward
418
  arma::mat Rinv_X_Xt_Rinv_X_inv_Xt_Rinv
419
      = Rinv_X
UNCOV
420
        * (LinearAlgebra::solve(
×
421
            trans(LX),
×
422
            LinearAlgebra::solve(LX, trans(Rinv_X))));  // compute  Rinv_X_Xt_Rinv_X_inv_Xt_Rinv through one forward
×
423

UNCOV
424
  arma::mat yt_Rinv = trans(solve(trans(m.L), m.ystar));
×
425
  t0 = Bench::toc(bench, "YtRi = Yt \\ Tt", t0);
×
426

UNCOV
427
  arma::mat S_2 = (yt_Rinv * m_y - trans(m_y) * Rinv_X_Xt_Rinv_X_inv_Xt_Rinv * m_y);
×
428
  t0 = Bench::toc(bench, "S2 = YtRi * y - yt * RiFFtRiFiFtRi * y", t0);
×
429

UNCOV
430
  if (m_est_sigma2 && m_est_nugget) {
×
431
    _sigma2 = S_2(0, 0) / (n - p);  //? - 2);
×
432
    _nugget = _sigma2 * (1 - _alpha) / _alpha;
×
433
  }
UNCOV
434
  double log_S_2 = log(_sigma2 * (n - p));
×
435

UNCOV
436
  double log_marginal_lik = -sum(log(m.L.diag())) - sum(log(LX.diag())) - (n - p) / 2.0 * log_S_2;
×
437
  t0 = Bench::toc(bench, "lml = -Sum(log(diag(T))) - Sum(log(diag(TF)))...", t0);
×
438
  // arma::cout << " log_marginal_lik:" << log_marginal_lik << arma::endl;
439

440
  // Default prior params
UNCOV
441
  double a = 0.2;
×
442
  double b = 1.0 / pow(n, 1.0 / d) * (a + d);
×
443
  // t0 = Bench::toc(bench,"b             ", t0);
444

UNCOV
445
  arma::vec CL = trans(max(m_X, 0) - min(m_X, 0)) / pow(n, 1.0 / d);
×
446
  t0 = Bench::toc(bench, "CL = (max(X) - min(X)) / n^1/d", t0);
×
447

UNCOV
448
  double t = arma::accu(CL / _theta) + _nugget / _sigma2;
×
449
  // arma::cout << " a:" << a << arma::endl;
450
  // arma::cout << " b:" << b << arma::endl;
451
  // arma::cout << " t:" << t << arma::endl;
UNCOV
452
  double log_approx_ref_prior = -b * t + a * log(t);
×
453
  // arma::cout << " log_approx_ref_prior:" << log_approx_ref_prior << arma::endl;
454

UNCOV
455
  if (grad_out != nullptr) {
×
456
    // Eigen::VectorXd log_marginal_lik_deriv(const Eigen::VectorXd param,double nugget,  bool nugget_est, const List
457
    // R0, const Eigen::Map<Eigen::MatrixXd> & X,const String zero_mean,const Eigen::Map<Eigen::MatrixXd> & output,
458
    // Eigen::VectorXi kernel_type,const Eigen::VectorXd alpha){
459
    // ...
460
    // VectorXd ans=VectorXd::Ones(param_size);
461
    // ...
462
    // MatrixXd Q_output= yt_R_inv.transpose()-Rinv_X_Xt_Rinv_X_inv_Xt_Rinv*output;
463
    // MatrixXd dev_R_i;
464
    // MatrixXd Wb_ti;
465
    // //allow different choices of kernels
466
    //
467
    // for(int ti=0;ti<p;ti++){
468
    //   //kernel_type_ti=kernel_type[ti];
469
    //   if(kernel_type[ti]==3){
470
    //     dev_R_i=matern_5_2_deriv( R0[ti],R_ori,beta[ti]);  //now here I have R_ori instead of R
471
    //   }else {...}
472
    //   Wb_ti=(L.transpose().triangularView<Upper>().solve(L.triangularView<Lower>().solve(dev_R_i))).transpose()-dev_R_i*Rinv_X_Xt_Rinv_X_inv_Xt_Rinv;
473
    //   ans[ti]=-0.5*Wb_ti.diagonal().sum()+(num_obs-q)/2.0*(output.transpose()*Wb_ti.transpose()*Q_output/S_2(0,0))(0,0);
474
    // }
475

UNCOV
476
    t0 = Bench::tic();
×
477
    arma::vec ans = arma::vec(d, arma::fill::ones);
×
478
    arma::mat Q_output = trans(yt_Rinv) - Rinv_X_Xt_Rinv_X_inv_Xt_Rinv * m_y;
×
479
    t0 = Bench::toc(bench, "Qo = YtRi - RiFFtRiFiFtRi * y", t0);
×
480

UNCOV
481
    arma::cube gradR = arma::cube(n, n, d, arma::fill::zeros);
×
482
    // const arma::vec zeros = arma::vec(d,arma::fill::zeros);
UNCOV
483
    for (arma::uword i = 0; i < n; i++) {
×
484
      // gradR.tube(i, i) = zeros;
UNCOV
485
      for (arma::uword j = 0; j < i; j++) {
×
486
        gradR.tube(i, j) = m.R.at(i, j) * _DlnCovDtheta(m_dX.col(i * n + j), _theta);
×
487
        gradR.tube(j, i) = gradR.tube(i, j);
×
488
      }
489
    }
UNCOV
490
    t0 = Bench::toc(bench, "gradR = R * dlnCov(dX)", t0);
×
491

UNCOV
492
    arma::mat Wb_k;
×
493
    for (arma::uword k = 0; k < d; k++) {
×
494
      t0 = Bench::tic();
×
495
      arma::mat gradR_k = gradR.slice(k);
×
496
      t0 = Bench::toc(bench, "gradR_k = gradR[k]", t0);
×
497

UNCOV
498
      Wb_k = trans(LinearAlgebra::solve(trans(m.L), LinearAlgebra::solve(m.L, gradR_k)))
×
499
             - gradR_k * Rinv_X_Xt_Rinv_X_inv_Xt_Rinv;
×
500
      t0 = Bench::toc(bench, "Wb_k = gradR_k \\ L \\ Tt - gradR_k * RiFFtRiFiFtRi", t0);
×
501

UNCOV
502
      ans[k] = -sum(Wb_k.diag()) / 2.0 + as_scalar(trans(m_y) * trans(Wb_k) * Q_output) / (2.0 * _sigma2);
×
503
      t0 = Bench::toc(bench, "ans[k] = Sum(diag(Wb_k)) + yt * Wb_kt * Qo / S2...", t0);
×
504
    }
×
505
    // arma::cout << " log_marginal_lik_deriv:" << -ans * pow(_theta,2) << arma::endl;
506
    // arma::cout << " log_approx_ref_prior_deriv:" <<  a*CL/t - b*CL << arma::endl;
507

UNCOV
508
    (*grad_out).head(d) = ans - (a * CL / t - b * CL) / square(_theta);
×
509

UNCOV
510
    if (m_est_sigma2 || m_est_nugget) {
×
511
      arma::mat gradR_d = m.R / _alpha;
×
512
      gradR_d.diag().zeros();
×
513
      Wb_k = trans(LinearAlgebra::solve(trans(m.L), LinearAlgebra::solve(m.L, gradR_d)))
×
514
             - gradR_d * Rinv_X_Xt_Rinv_X_inv_Xt_Rinv;
×
515
      double ans_d = -sum(Wb_k.diag()) / 2.0 + as_scalar(trans(m_y) * trans(Wb_k) * Q_output) / (2.0 * _sigma2);
×
516
      (*grad_out).at(d) = ans_d - (a / t - b) / pow(_alpha, 2.0);
×
517
    } else
×
518
      (*grad_out).at(d) = 0;  // if sigma2 and nugget are defined & fixed by user
×
519
    // arma::cout << " grad_out:" << *grad_out << arma::endl;
UNCOV
520
  }
×
521

522
  // arma::cout << " lmp:" << (log_marginal_lik+log_approx_ref_prior) << arma::endl;
UNCOV
523
  return (log_marginal_lik + log_approx_ref_prior);
×
524
}
×
525

UNCOV
526
LIBKRIGING_EXPORT std::tuple<double, arma::vec> NuggetKriging::logMargPostFun(const arma::vec& _theta_alpha,
×
527
                                                                              const bool _grad,
528
                                                                              const bool _bench) {
UNCOV
529
  double lmp = -1;
×
530
  arma::vec grad;
×
531

UNCOV
532
  if (_bench) {
×
533
    std::map<std::string, double> bench;
×
534
    if (_grad) {
×
535
      grad = arma::vec(_theta_alpha.n_elem);
×
536
      lmp = _logMargPost(_theta_alpha, &grad, nullptr, &bench);
×
537
    } else
UNCOV
538
      lmp = _logMargPost(_theta_alpha, nullptr, nullptr, &bench);
×
539

UNCOV
540
    size_t num = 0;
×
541
    for (auto& kv : bench)
×
542
      num = std::max(kv.first.size(), num);
×
543
    for (auto& kv : bench) {
×
544
      arma::cout << "| " << Bench::pad(kv.first, num, ' ') << " | " << kv.second << " |" << arma::endl;
×
545
    }
546

UNCOV
547
  } else {
×
548
    if (_grad) {
×
549
      grad = arma::vec(_theta_alpha.n_elem);
×
550
      lmp = _logMargPost(_theta_alpha, &grad, nullptr, nullptr);
×
551
    } else
UNCOV
552
      lmp = _logMargPost(_theta_alpha, nullptr, nullptr, nullptr);
×
553
  }
554

UNCOV
555
  return std::make_tuple(lmp, std::move(grad));
×
556
}
×
557

UNCOV
558
LIBKRIGING_EXPORT double NuggetKriging::logLikelihood() {
×
559
  int d = m_theta.n_elem;
×
560
  arma::vec _theta_alpha = arma::vec(d + 1);
×
561
  _theta_alpha.head(d) = m_theta;
×
562
  _theta_alpha.at(d) = m_sigma2 / (m_sigma2 + m_nugget);
×
563
  return std::get<0>(NuggetKriging::logLikelihoodFun(_theta_alpha, false, false));
×
564
}
×
565

UNCOV
566
LIBKRIGING_EXPORT double NuggetKriging::logMargPost() {
×
567
  int d = m_theta.n_elem;
×
568
  arma::vec _theta_alpha = arma::vec(d + 1);
×
569
  _theta_alpha.head(d) = m_theta;
×
570
  _theta_alpha.at(d) = m_sigma2 / (m_sigma2 + m_nugget);
×
571
  return std::get<0>(NuggetKriging::logMargPostFun(_theta_alpha, false, false));
×
572
}
×
573

UNCOV
574
std::function<arma::vec(const arma::vec&)> NuggetKriging::reparam_to = [](const arma::vec& _theta_alpha) {
×
575
  arma::vec _theta_malpha = _theta_alpha;
×
576
  const arma::uword d = _theta_alpha.n_elem - 1;
×
577
  _theta_malpha.at(d) = 1 + alpha_lower - _theta_malpha.at(d);
×
578
  return Optim::reparam_to(_theta_malpha);
×
579
};
×
580

UNCOV
581
std::function<arma::vec(const arma::vec&)> NuggetKriging::reparam_from = [](const arma::vec& _gamma) {
×
582
  arma::vec _theta_alpha = Optim::reparam_from(_gamma);
×
583
  const arma::uword d = _theta_alpha.n_elem - 1;
×
584
  _theta_alpha.at(d) = 1 + alpha_lower - _theta_alpha.at(d);
×
585
  return _theta_alpha;
×
586
};
587

588
std::function<arma::vec(const arma::vec&, const arma::vec&)> NuggetKriging::reparam_from_deriv
UNCOV
589
    = [](const arma::vec& _theta_alpha, const arma::vec& _grad) {
×
590
        arma::vec D_theta_alpha = arma::conv_to<arma::vec>::from(-_grad % _theta_alpha);
×
591
        const arma::uword d = D_theta_alpha.n_elem - 1;
×
592
        D_theta_alpha.at(d) = (1 + alpha_lower - _theta_alpha.at(d)) * _grad.at(d);
×
593
        return D_theta_alpha;
×
594
      };
595

596
double NuggetKriging::alpha_upper = 1.0;
597
double NuggetKriging::alpha_lower = 1E-3;
598

599
/** Fit the kriging object on (X,y):
600
 * @param y is n length column vector of output
601
 * @param X is n*d matrix of input
602
 * @param regmodel is the regression model to be used for the GP mean (choice between contant, linear, quadratic)
603
 * @param normalize is a boolean to enforce inputs/output normalization
604
 * @param optim is an optimizer name from OptimLib, or 'none' to keep parameters unchanged
605
 * @param objective is 'LOO' or 'LL'. Ignored if optim=='none'.
606
 * @param parameters starting values for hyper-parameters for optim, or final values if optim=='none'.
607
 */
UNCOV
608
LIBKRIGING_EXPORT void NuggetKriging::fit(const arma::vec& y,
×
609
                                          const arma::mat& X,
610
                                          const Trend::RegressionModel& regmodel,
611
                                          bool normalize,
612
                                          const std::string& optim,
613
                                          const std::string& objective,
614
                                          const Parameters& parameters) {
UNCOV
615
  const arma::uword n = X.n_rows;
×
616
  const arma::uword d = X.n_cols;
×
617

UNCOV
618
  std::function<double(const arma::vec& _gamma, arma::vec* grad_out, NuggetKriging::KModel* okm_data)> fit_ofn;
×
619
  m_optim = optim;
×
620
  m_objective = objective;
×
621
  if (objective.compare("LL") == 0) {
×
622
    if (Optim::reparametrize) {
×
623
      fit_ofn = CacheFunction([this](const arma::vec& _gamma, arma::vec* grad_out, NuggetKriging::KModel* okm_data) {
×
624
        // Change variable for opt: . -> 1/exp(.)
625
        // DEBUG: if (Optim::log_level>3) arma::cout << "> gamma: " << _gamma << arma::endl;
UNCOV
626
        const arma::vec _theta_alpha = NuggetKriging::reparam_from(_gamma);
×
627
        // DEBUG: if (Optim::log_level>3) arma::cout << "> theta_alpha: " << _theta_alpha << arma::endl;
UNCOV
628
        double ll = this->_logLikelihood(_theta_alpha, grad_out, okm_data, nullptr);
×
629
        // DEBUG: if (Optim::log_level>3) arma::cout << "  > -ll: " << -ll << arma::endl;
UNCOV
630
        if (grad_out != nullptr) {
×
631
          *grad_out = -NuggetKriging::reparam_from_deriv(_theta_alpha, *grad_out);
×
632
          // DEBUG:
633
          // if (Optim::log_level>3) {
634
          //  arma::cout << "  > grad -ll: " << *grad_out << arma::endl;
635
          //  //// Check with numerical gradient:
636
          //  //for (size_t i = 0; i <_gamma.n_elem; i++) {
637
          //  //  arma::vec eps = arma::zeros(_gamma.n_elem);
638
          //  //  eps(i) = 0.000001;
639
          //  //  const arma::vec _theta_alpha_eps = reparam_from(_gamma + eps);
640
          //  //  double ll_eps = this->_logLikelihood(_theta_alpha_eps, nullptr, okm_data);
641
          //  //  arma::cout << "  > num_grad -ll: " << -(ll_eps - ll)/0.000001 << arma::endl;
642
          //  //}
643
          //}
644
        }
UNCOV
645
        return -ll;
×
646
      });
×
647
    } else {
UNCOV
648
      fit_ofn = CacheFunction([this](const arma::vec& _gamma, arma::vec* grad_out, NuggetKriging::KModel* okm_data) {
×
649
        const arma::vec _theta_alpha = _gamma;
×
650
        // DEBUG: if (Optim::log_level>3) arma::cout << "> theta_alpha: " << _theta_alpha << arma::endl;
UNCOV
651
        double ll = this->_logLikelihood(_theta_alpha, grad_out, okm_data, nullptr);
×
652
        // DEBUG: if (Optim::log_level>3) arma::cout << "  > ll: " << ll << arma::endl;
UNCOV
653
        if (grad_out != nullptr) {
×
654
          // DEBUG: if (Optim::log_level>3) arma::cout << "  > grad ll: " << grad_out << arma::endl;
UNCOV
655
          *grad_out = -*grad_out;
×
656
        }
UNCOV
657
        return -ll;
×
658
      });
×
659
    }
UNCOV
660
  } else if (objective.compare("LMP") == 0) {
×
661
    // Our impl. of https://github.com/cran/RobustGaSP/blob/5cf21658e6a6e327be6779482b93dfee25d24592/R/rgasp.R#L303
662
    //@see Mengyang Gu, Xiao-jing Wang and Jim Berger, 2018, Annals of Statistics.
UNCOV
663
    if (Optim::reparametrize) {
×
664
      fit_ofn = CacheFunction([this](const arma::vec& _gamma, arma::vec* grad_out, NuggetKriging::KModel* okm_data) {
×
665
        // Change variable for opt: . -> 1/exp(.)
666
        // DEBUG: if (Optim::log_level>3) arma::cout << "> gamma: " << _gamma << arma::endl;
UNCOV
667
        const arma::vec _theta_alpha = NuggetKriging::reparam_from(_gamma);
×
668
        // DEBUG: if (Optim::log_level>3) arma::cout << "> theta_alpha: " << _theta_alpha << arma::endl;
UNCOV
669
        double lmp = this->_logMargPost(_theta_alpha, grad_out, okm_data, nullptr);
×
670
        // DEBUG: if (Optim::log_level>3) arma::cout << "  > lmp: " << lmp << arma::endl;
UNCOV
671
        if (grad_out != nullptr) {
×
672
          // DEBUG: if (Optim::log_level>3) arma::cout << "  > grad lmp: " << grad_out << arma::endl;
UNCOV
673
          *grad_out = -NuggetKriging::reparam_from_deriv(_theta_alpha, *grad_out);
×
674
        }
UNCOV
675
        return -lmp;
×
676
      });
×
677
    } else {
UNCOV
678
      fit_ofn = CacheFunction([this](const arma::vec& _gamma, arma::vec* grad_out, NuggetKriging::KModel* okm_data) {
×
679
        const arma::vec _theta_alpha = _gamma;
×
680
        // DEBUG: if (Optim::log_level>3) arma::cout << "> theta_alpha: " << _theta_alpha << arma::endl;
UNCOV
681
        double lmp = this->_logMargPost(_theta_alpha, grad_out, okm_data, nullptr);
×
682
        // DEBUG: if (Optim::log_level>3) arma::cout << "  > lmp: " << lmp << arma::endl;
UNCOV
683
        if (grad_out != nullptr) {
×
684
          // DEBUG: if (Optim::log_level>3) arma::cout << "  > grad lmp: " << grad_out << arma::endl;
UNCOV
685
          *grad_out = -*grad_out;
×
686
        }
UNCOV
687
        return -lmp;
×
688
      });
×
689
    }
690
  } else
UNCOV
691
    throw std::invalid_argument("Unsupported fit objective: " + objective + " (supported are: LL, LMP)");
×
692

UNCOV
693
  arma::rowvec centerX;
×
694
  arma::rowvec scaleX;
×
695
  double centerY;
696
  double scaleY;
697
  // Normalization of inputs and output
UNCOV
698
  m_normalize = normalize;
×
699
  if (m_normalize) {
×
700
    centerX = min(X, 0);
×
701
    scaleX = max(X, 0) - min(X, 0);
×
702
    centerY = min(y);
×
703
    scaleY = max(y) - min(y);
×
704
  } else {
UNCOV
705
    centerX = arma::rowvec(d, arma::fill::zeros);
×
706
    scaleX = arma::rowvec(d, arma::fill::ones);
×
707
    centerY = 0;
×
708
    scaleY = 1;
×
709
  }
UNCOV
710
  m_centerX = centerX;
×
711
  m_scaleX = scaleX;
×
712
  m_centerY = centerY;
×
713
  m_scaleY = scaleY;
×
714
  {  // FIXME why copies of newX and newy
UNCOV
715
    arma::mat newX = X;
×
716
    newX.each_row() -= centerX;
×
717
    newX.each_row() /= scaleX;
×
718
    arma::vec newy = (y - centerY) / scaleY;
×
719
    this->m_X = newX;
×
720
    this->m_y = newy;
×
721
  }
×
722

723
  // Now we compute the distance matrix between points. Will be used to compute R(theta) later (e.g. when fitting)
724
  // Note: m_dX is transposed compared to m_X
UNCOV
725
  m_dX = arma::mat(d, n * n, arma::fill::zeros);
×
726
  for (arma::uword ij = 0; ij < m_dX.n_cols; ij++) {
×
727
    int i = (int)ij / n;
×
728
    int j = ij % n;  // i,j <-> i*n+j
×
729
    if (i < j) {
×
730
      m_dX.col(ij) = trans(m_X.row(i) - m_X.row(j));
×
731
      m_dX.col(j * n + i) = m_dX.col(ij);
×
732
    }
733
  }
UNCOV
734
  m_maxdX = arma::max(arma::abs(m_dX), 1);
×
735

736
  // Define regression matrix
UNCOV
737
  m_regmodel = regmodel;
×
738
  m_F = Trend::regressionModelMatrix(regmodel, m_X);
×
739
  m_est_beta = parameters.is_beta_estim && (m_regmodel != Trend::RegressionModel::None);
×
740
  if (!m_est_beta && parameters.beta.has_value()
×
741
      && parameters.beta.value().n_elem > 0) {  // Then force beta to be fixed (not estimated, no variance)
×
742
    m_est_beta = false;
×
743
    m_beta = parameters.beta.value();
×
744
    if (m_normalize)
×
745
      m_beta /= scaleY;
×
746
  } else
UNCOV
747
    m_est_beta = true;
×
748

UNCOV
749
  arma::mat theta0;
×
750
  if (parameters.theta.has_value()) {
×
751
    theta0 = parameters.theta.value();
×
752
    if ((theta0.n_cols != d) && (theta0.n_rows == d))
×
753
      theta0 = theta0.t();
×
754
    if (m_normalize)
×
755
      theta0.each_row() /= scaleX;
×
756
    if (theta0.n_cols != d)
×
757
      throw std::runtime_error("Dimension of theta should be nx" + std::to_string(d) + " instead of "
×
758
                               + std::to_string(theta0.n_rows) + "x" + std::to_string(theta0.n_cols));
×
759
  }
760

UNCOV
761
  if (optim == "none") {  // just keep given theta, no optimisation of ll (but estim beta still possible)
×
762
    if (!parameters.theta.has_value())
×
763
      throw std::runtime_error("Theta should be given (1x" + std::to_string(d) + ") matrix, when optim=none");
×
764
    if (!parameters.nugget.has_value())
×
765
      throw std::runtime_error("Nugget should be given, when optim=none");
×
766
    if (!parameters.sigma2.has_value())
×
767
      throw std::runtime_error("Sigma2 should be given, when optim=none");
×
768

UNCOV
769
    m_theta = trans(theta0.row(0));
×
770
    m_est_theta = false;
×
771

UNCOV
772
    double sigma2 = -1;
×
773
    m_est_sigma2 = parameters.is_sigma2_estim;
×
774
    if (parameters.sigma2.has_value()) {
×
775
      sigma2 = parameters.sigma2.value()[0];  // otherwise sigma2 will be re-calculated using given theta
×
776
      if (m_normalize)
×
777
        sigma2 /= (scaleY * scaleY);
×
778
    } else
UNCOV
779
      m_est_sigma2 = true;
×
780
    double nugget = 0;
×
781
    m_est_nugget = parameters.is_nugget_estim;
×
782
    if (parameters.nugget.has_value()) {
×
783
      nugget = parameters.nugget.value()[0];
×
784
      if (m_normalize)
×
785
        nugget /= (scaleY * scaleY);
×
786
    } else
UNCOV
787
      m_est_nugget = true;
×
788

UNCOV
789
    NuggetKriging::KModel m = make_Model(m_theta, sigma2 / (nugget + sigma2), nullptr);
×
790
    m_is_empty = false;
×
791
    m_T = std::move(m.L);
×
792
    m_R = std::move(m.R);
×
793
    m_M = std::move(m.Fstar);
×
794
    m_circ = std::move(m.Rstar);
×
795
    m_star = std::move(m.Qstar);
×
796
    if (m_est_beta) {
×
797
      m_beta = std::move(m.betahat);
×
798
      m_z = std::move(m.Estar);
×
799
    } else {
800
      // m_beta = parameters.beta.value(); already done above
UNCOV
801
      m_z = std::move(m.ystar) - m_M * m_beta;
×
802
    }
UNCOV
803
    if (m_est_sigma2) {
×
804
      m_sigma2 = m.SSEstar / n;
×
805
    } else {
UNCOV
806
      m_sigma2 = sigma2;
×
807
    }
UNCOV
808
    if (m_est_nugget) {
×
809
      m_nugget = 0;
×
810
    } else {
UNCOV
811
      m_nugget = nugget;
×
812
    }
813

UNCOV
814
  } else if (optim.rfind("BFGS", 0) == 0) {
×
815
    Random::init();
×
816

UNCOV
817
    arma::vec theta_lower = Optim::theta_lower_factor * m_maxdX;
×
818
    arma::vec theta_upper = Optim::theta_upper_factor * m_maxdX;
×
819

UNCOV
820
    if (Optim::variogram_bounds_heuristic) {
×
821
      arma::vec dy2 = arma::vec(n * n, arma::fill::zeros);
×
822
      for (arma::uword ij = 0; ij < dy2.n_elem; ij++) {
×
823
        int i = (int)ij / n;
×
824
        int j = ij % n;  // i,j <-> i*n+j
×
825
        if (i < j) {
×
826
          dy2[ij] = m_y.at(i) - m_y.at(j);
×
827
          dy2[ij] *= dy2[ij];
×
828
          dy2[j * n + i] = dy2[ij];
×
829
        }
830
      }
831
      // dy2 /= arma::var(m_y);
UNCOV
832
      arma::vec dy2dX2_slope = dy2 / arma::sum(m_dX % m_dX, 0).t();
×
833
      // arma::cout << "dy2dX_slope:" << dy2dX_slope << arma::endl;
UNCOV
834
      dy2dX2_slope.replace(arma::datum::nan, 0.0);  // we are not interested in same points where dX=0, and dy=0
×
835
      arma::vec w = dy2dX2_slope / sum(dy2dX2_slope);
×
836
      arma::mat steepest_dX_mean = arma::abs(m_dX) * w;
×
837
      // arma::cout << "steepest_dX_mean:" << steepest_dX_mean << arma::endl;
838

UNCOV
839
      theta_lower = arma::max(theta_lower, Optim::theta_lower_factor * steepest_dX_mean);
×
840
      // no, only relevant for inf bound: theta_upper = arma::min(theta_upper, Optim::theta_upper_factor *
841
      // steepest_dX_mean);
UNCOV
842
      theta_lower = arma::min(theta_lower, theta_upper);
×
843
      theta_upper = arma::max(theta_lower, theta_upper);
×
844
    }
×
845
    // arma::cout << "theta_lower:" << theta_lower << arma::endl;
846
    // arma::cout << "theta_upper:" << theta_upper << arma::endl;
847

UNCOV
848
    int multistart = 1;
×
849
    try {
UNCOV
850
      multistart = std::stoi(optim.substr(4));
×
851
    } catch (std::invalid_argument&) {
×
852
      // let multistart = 1
UNCOV
853
    }
×
854

855
    arma::mat theta0_rand
UNCOV
856
        = arma::repmat(trans(theta_lower), multistart, 1)
×
857
          + Random::randu_mat(multistart, d) % arma::repmat(trans(theta_upper - theta_lower), multistart, 1);
×
858
    // theta0 = arma::abs(0.5 + Random::randn_mat(multistart, d) / 6.0)
859
    //          % arma::repmat(max(m_X, 0) - min(m_X, 0), multistart, 1);
860

UNCOV
861
    if (parameters.theta.has_value()) {  // just use given theta(s) as starting values for multi-bfgs
×
862
      multistart = std::max(multistart, (int)theta0.n_rows);
×
863
      theta0 = arma::join_cols(theta0, theta0_rand);  // append random starting points to given ones
×
864
      theta0.resize(multistart, theta0.n_cols);       // keep only multistart first rows
×
865
    } else {
UNCOV
866
      theta0 = theta0_rand;
×
867
    }
868
    // arma::cout << "theta0:" << theta0 << arma::endl;
869

UNCOV
870
    arma::vec alpha0;
×
871
    if (parameters.sigma2.has_value() && parameters.nugget.has_value()) {
×
872
      alpha0 = arma::vec(parameters.sigma2.value().n_elem * parameters.nugget.value().n_elem);
×
873
      for (size_t i = 0; i < parameters.sigma2.value().n_elem; i++) {
×
874
        for (size_t j = 0; j < parameters.nugget.value().n_elem; j++) {
×
875
          if ((parameters.sigma2.value()[i] < 0) || (parameters.nugget.value()[j] < 0)
×
876
              || (parameters.sigma2.value()[i] + parameters.nugget.value()[j] < 0))
×
877
            alpha0[i + j * parameters.sigma2.value().n_elem]
×
878
                = alpha_lower + (alpha_upper - alpha_lower) * (1 - std::pow(Random::randu(), 3.0));
×
879
          else
UNCOV
880
            alpha0[i + j * parameters.sigma2.value().n_elem]
×
881
                = parameters.sigma2.value()[i] / (parameters.sigma2.value()[i] + parameters.nugget.value()[j]);
×
882
        }
883
      }
884
    } else {
UNCOV
885
      alpha0 = alpha_lower + (alpha_upper - alpha_lower) * (1 - arma::pow(Random::randu_vec(theta0.n_rows), 3.0));
×
886
    }
887
    // arma::cout << "alpha0:" << alpha0 << arma::endl;
888

UNCOV
889
    arma::vec gamma_lower = arma::vec(d + 1);
×
890
    gamma_lower.head(d) = theta_lower;
×
891
    gamma_lower.at(d) = alpha_lower;
×
892
    arma::vec gamma_upper = arma::vec(d + 1);
×
893
    gamma_upper.head(d) = theta_upper;
×
894
    gamma_upper.at(d) = alpha_upper;
×
895
    if (Optim::reparametrize) {
×
896
      arma::vec gamma_lower_tmp = gamma_lower;
×
897
      gamma_lower = NuggetKriging::reparam_to(gamma_upper);
×
898
      gamma_upper = NuggetKriging::reparam_to(gamma_lower_tmp);
×
899
      double gamma_lower_at_d = gamma_lower.at(d);
×
900
      gamma_lower.at(d) = gamma_upper.at(d);
×
901
      gamma_upper.at(d) = gamma_lower_at_d;
×
902
    }
×
903

UNCOV
904
    double min_ofn = std::numeric_limits<double>::infinity();
×
905

UNCOV
906
    for (arma::uword i = 0; i < multistart; i++) {
×
907
      arma::vec gamma_tmp = arma::vec(d + 1);
×
908
      gamma_tmp.head(d) = theta0.row(i % multistart).t();
×
909
      gamma_tmp.at(d) = alpha0[i % alpha0.n_elem];
×
910
      if (Optim::reparametrize) {
×
911
        gamma_tmp = NuggetKriging::reparam_to(gamma_tmp);
×
912
      }
913

UNCOV
914
      gamma_lower = arma::min(gamma_tmp, gamma_lower);
×
915
      gamma_upper = arma::max(gamma_tmp, gamma_upper);
×
916

UNCOV
917
      if (Optim::log_level > 0) {
×
918
        arma::cout << "BFGS:" << arma::endl;
×
919
        arma::cout << "  max iterations: " << Optim::max_iteration << arma::endl;
×
920
        arma::cout << "  null gradient tolerance: " << Optim::gradient_tolerance << arma::endl;
×
921
        arma::cout << "  constant objective tolerance: " << Optim::objective_rel_tolerance << arma::endl;
×
922
        arma::cout << "  reparametrize: " << Optim::reparametrize << arma::endl;
×
923
        arma::cout << "  normalize: " << m_normalize << arma::endl;
×
924
        arma::cout << "  lower_bounds: " << theta_lower.t() << "";
×
925
        arma::cout << "                " << alpha_lower << arma::endl;
×
926
        arma::cout << "  upper_bounds: " << theta_upper.t() << "";
×
927
        arma::cout << "                " << alpha_upper << arma::endl;
×
928
        arma::cout << "  start_point: " << theta0.row(i % multistart) << "";
×
929
        arma::cout << "               " << alpha0[i % alpha0.n_elem] << arma::endl;
×
930
      }
931

UNCOV
932
      m_est_sigma2 = parameters.is_sigma2_estim;
×
933
      if ((!m_est_sigma2) && (parameters.sigma2.has_value())) {
×
934
        m_sigma2 = parameters.sigma2.value()[0];
×
935
        if (m_normalize)
×
936
          m_sigma2 /= (scaleY * scaleY);
×
937
      } else {
UNCOV
938
        m_est_sigma2 = true;  // force estim if no value given
×
939
      }
UNCOV
940
      m_est_nugget = parameters.is_nugget_estim;
×
941
      if ((!m_est_nugget) && (parameters.nugget.has_value())) {
×
942
        m_nugget = parameters.nugget.value()[0];
×
943
        if (m_normalize)
×
944
          m_nugget /= (scaleY * scaleY);
×
945
      } else {
UNCOV
946
        m_est_nugget = true;  // force estim if no value given
×
947
      }
948

UNCOV
949
      lbfgsb::Optimizer optimizer{d + 1};
×
950
      optimizer.iprint = Optim::log_level - 2;
×
951
      optimizer.max_iter = Optim::max_iteration;
×
952
      optimizer.pgtol = Optim::gradient_tolerance;
×
953
      optimizer.factr = Optim::objective_rel_tolerance / 1E-13;
×
954
      arma::ivec bounds_type{d + 1, arma::fill::value(2)};  // means both upper & lower bounds
×
955

UNCOV
956
      int retry = 0;
×
957
      double best_f_opt = std::numeric_limits<double>::infinity();
×
958
      arma::vec best_gamma = gamma_tmp;
×
959
      NuggetKriging::KModel m = make_Model(theta0.row(i % multistart).t(), alpha0[i % alpha0.n_elem], nullptr);
×
960

UNCOV
961
      while (retry <= Optim::max_restart) {
×
962
        arma::vec gamma_0 = gamma_tmp;
×
963
        auto result = optimizer.minimize(
UNCOV
964
            [&m, &fit_ofn](const arma::vec& vals_inp, arma::vec& grad_out) -> double {
×
965
              return fit_ofn(vals_inp, &grad_out, &m);
×
966
            },
967
            gamma_tmp,
UNCOV
968
            gamma_lower.memptr(),
×
969
            gamma_upper.memptr(),
×
970
            bounds_type.memptr());
×
971

UNCOV
972
        if (Optim::log_level > 0) {
×
973
          arma::cout << "     iterations: " << result.num_iters << arma::endl;
×
974
          arma::cout << "     status: " << result.task << arma::endl;
×
975
          if (Optim::reparametrize) {
×
976
            arma::cout << "     start_point: " << NuggetKriging::reparam_from(gamma_0).t() << " ";
×
977
            arma::cout << "     solution: " << NuggetKriging::reparam_from(gamma_tmp).t() << " ";
×
978
          } else {
UNCOV
979
            arma::cout << "     start_point: " << gamma_0.t() << " ";
×
980
            arma::cout << "     solution: " << gamma_tmp.t() << " ";
×
981
          }
982
        }
983

UNCOV
984
        if (result.f_opt < best_f_opt) {
×
985
          best_f_opt = result.f_opt;
×
986
          best_gamma = gamma_tmp;
×
987
        }
988

UNCOV
989
        double sol_to_lb_theta = arma::min(arma::abs(gamma_tmp.head(d) - gamma_lower.head(d)));
×
990
        double sol_to_ub_theta = arma::min(arma::abs(gamma_tmp.head(d) - gamma_upper.head(d)));
×
991
        double sol_to_b_theta = std::min(sol_to_ub_theta, sol_to_lb_theta);
×
992
        //= Optim::reparametrize ? sol_to_ub_theta : sol_to_lb_theta;         // just consider theta lower bound
UNCOV
993
        double sol_to_lb_alpha = std::abs(gamma_tmp.at(d) - gamma_lower.at(d));
×
994
        double sol_to_ub_alpha = std::abs(gamma_tmp.at(d) - gamma_upper.at(d));
×
995
        double sol_to_b_alpha = std::min(sol_to_ub_alpha, sol_to_lb_alpha);
×
996
        //= Optim::reparametrize ? sol_to_ub_alpha : sol_to_lb_alpha;         // just consider alpha lower bound
997
        // Optim::reparametrize
998
        //    ? std::abs(gamma_tmp.at(d) - gamma_upper.at(d))
999
        //    : std::abs(gamma_tmp.at(d) - gamma_lower.at(d));
UNCOV
1000
        double sol_to_b = sol_to_b_theta < sol_to_b_alpha ? sol_to_b_theta : sol_to_b_alpha;
×
1001
        if ((retry < Optim::max_restart)  //&& (result.num_iters <= 2 * d)
×
1002
            && ((result.task.rfind("ABNORMAL_TERMINATION_IN_LNSRCH", 0) == 0)
×
1003
                || ((sol_to_b < arma::datum::eps) && (result.num_iters <= 2))  // we fastly converged to one bound
×
1004
                || (result.f_opt > best_f_opt))) {
×
1005
          gamma_tmp.head(d)
×
1006
              = (theta0.row(i % multistart).t() + theta_lower)
×
1007
                / pow(2.0, retry + 1);  // so, re-use previous starting point and change it to middle-point
×
1008
          gamma_tmp.at(d) = alpha_upper - (alpha0[i % alpha0.n_elem] + alpha_upper) / pow(2.0, retry + 1);
×
1009

UNCOV
1010
          if (Optim::reparametrize)
×
1011
            gamma_tmp = NuggetKriging::reparam_to(gamma_tmp);
×
1012

UNCOV
1013
          gamma_lower = arma::min(gamma_tmp, gamma_lower);
×
1014
          gamma_upper = arma::max(gamma_tmp, gamma_upper);
×
1015

UNCOV
1016
          retry++;
×
1017
        } else {
UNCOV
1018
          if (Optim::log_level > 1) {
×
1019
            result.print();
×
1020
          }
UNCOV
1021
          break;
×
1022
        }
UNCOV
1023
      }
×
1024

1025
      // last call to ensure that T and z are up-to-date with solution.
UNCOV
1026
      double min_ofn_tmp = fit_ofn(best_gamma, nullptr, &m);
×
1027

UNCOV
1028
      if (Optim::log_level > 0) {
×
1029
        arma::cout << "  best objective: " << min_ofn_tmp << arma::endl;
×
1030
        if (Optim::reparametrize) {
×
1031
          arma::cout << "  best solution: " << NuggetKriging::reparam_from(best_gamma).t() << " ";
×
1032
        } else {
UNCOV
1033
          arma::cout << "  best solution: " << best_gamma.t() << " ";
×
1034
        }
1035
      }
1036

UNCOV
1037
      if (min_ofn_tmp < min_ofn) {
×
1038
        arma::vec best_theta_alpha = best_gamma;
×
1039
        if (Optim::reparametrize)
×
1040
          best_theta_alpha = NuggetKriging::reparam_from(best_gamma);
×
1041
        m_theta = best_theta_alpha.head(d);
×
1042
        m_est_theta = true;
×
1043
        min_ofn = min_ofn_tmp;
×
1044

UNCOV
1045
        m_is_empty = false;
×
1046
        m_T = std::move(m.L);
×
1047
        m_R = std::move(m.R);
×
1048
        m_M = std::move(m.Fstar);
×
1049
        m_circ = std::move(m.Rstar);
×
1050
        m_star = std::move(m.Qstar);
×
1051
        if (m_est_beta) {
×
1052
          m_beta = std::move(m.betahat);
×
1053
          m_z = std::move(m.Estar);
×
1054
        } else {
1055
          // m_beta = parameters.beta.value(); already done above
UNCOV
1056
          m_z = std::move(m.ystar) - m_M * m_beta;
×
1057
        }
UNCOV
1058
        double m_alpha = best_theta_alpha.at(d);
×
1059
        if (m_est_sigma2) {
×
1060
          if (m_est_nugget) {
×
1061
            m_sigma2 = m_alpha * as_scalar(LinearAlgebra::crossprod(m_z)) / n;
×
1062
            if (m_objective.compare("LMP") == 0) {
×
1063
              m_sigma2 = m_sigma2 * n / (n - m_F.n_cols - 2);
×
1064
            }
UNCOV
1065
            m_nugget = m_sigma2 / m_alpha - m_sigma2;
×
1066
          } else {
UNCOV
1067
            m_sigma2 = m_nugget * m_alpha / (1 - m_alpha);
×
1068
          }
1069
        } else {
UNCOV
1070
          if (m_est_nugget) {
×
1071
            m_nugget = m_sigma2 * (1 - m_alpha) / m_alpha;
×
1072
          } else {
1073
            // don't care about alpha
1074
          }
1075
        }
UNCOV
1076
      }
×
1077
    }
×
1078
  } else
×
1079
    throw std::runtime_error("Unsupported optim: " + optim + " (supported are: none, BFGS[#])");
×
1080

1081
  // arma::cout << "theta:" << m_theta << arma::endl;
UNCOV
1082
}
×
1083

1084
/** Compute the prediction for given points X'
1085
 * @param X_n is n_n*d matrix of points where to predict output
1086
 * @param return_stdev is true if return also stdev column vector
1087
 * @param return_cov is true if return also cov matrix between X_n
1088
 * @param return_deriv is true if return also derivative of prediction wrt x
1089
 * @return output prediction: n_n means, [n_n standard deviations], [n_n*n_n full covariance matrix]
1090
 */
1091
LIBKRIGING_EXPORT std::tuple<arma::vec, arma::vec, arma::mat, arma::mat, arma::mat>
UNCOV
1092
NuggetKriging::predict(const arma::mat& X_n, bool return_stdev, bool return_cov, bool return_deriv) {
×
1093
  arma::uword n_n = X_n.n_rows;
×
1094
  arma::uword n_o = m_X.n_rows;
×
1095
  arma::uword d = m_X.n_cols;
×
1096
  if (X_n.n_cols != d)
×
1097
    throw std::runtime_error("Predict locations have wrong dimension: " + std::to_string(X_n.n_cols) + " instead of "
×
1098
                             + std::to_string(d));
×
1099

UNCOV
1100
  arma::vec yhat_n(n_n);
×
1101
  arma::vec ysd2_n = arma::vec(n_n, arma::fill::zeros);
×
1102
  arma::mat Sigma_n = arma::mat(n_n, n_n, arma::fill::zeros);
×
1103
  arma::mat Dyhat_n = arma::mat(n_n, d, arma::fill::zeros);
×
1104
  arma::mat Dysd2_n = arma::mat(n_n, d, arma::fill::zeros);
×
1105

UNCOV
1106
  arma::mat Xn_o = trans(m_X);  // already normalized if needed
×
1107
  arma::mat Xn_n = X_n;
×
1108
  // Normalize X_n
UNCOV
1109
  Xn_n.each_row() -= m_centerX;
×
1110
  Xn_n.each_row() /= m_scaleX;
×
1111

UNCOV
1112
  double sigma2 = m_sigma2 * (m_objective.compare("LMP") == 0 ? (n_o - m_F.n_cols) / (n_o - m_F.n_cols - 2) : 1.0);
×
1113

UNCOV
1114
  arma::mat F_n = Trend::regressionModelMatrix(m_regmodel, Xn_n);
×
1115
  Xn_n = trans(Xn_n);
×
1116

UNCOV
1117
  double m_alpha = m_sigma2 / (m_sigma2 + m_nugget);
×
1118

UNCOV
1119
  auto t0 = Bench::tic();
×
1120
  arma::mat R_on = arma::mat(n_o, n_n, arma::fill::none);
×
1121
  for (arma::uword i = 0; i < n_o; i++) {
×
1122
    for (arma::uword j = 0; j < n_n; j++) {
×
1123
      arma::vec dij = Xn_o.col(i) - Xn_n.col(j);
×
1124
      if (dij.is_zero(arma::datum::eps))
×
1125
        R_on.at(i, j) = 1.0;
×
1126
      else
UNCOV
1127
        R_on.at(i, j) = _Cov(dij, m_theta) * m_alpha;
×
1128
    }
×
1129
  }
UNCOV
1130
  t0 = Bench::toc(nullptr, "R_on       ", t0);
×
1131

UNCOV
1132
  arma::mat Rstar_on = LinearAlgebra::solve(m_T, R_on);
×
1133
  t0 = Bench::toc(nullptr, "Rstar_on   ", t0);
×
1134

UNCOV
1135
  yhat_n = F_n * m_beta + trans(Rstar_on) * m_z;
×
1136
  t0 = Bench::toc(nullptr, "yhat_n     ", t0);
×
1137

1138
  // Un-normalize predictor
UNCOV
1139
  yhat_n = m_centerY + m_scaleY * yhat_n;
×
1140

UNCOV
1141
  arma::mat Fhat_n = trans(Rstar_on) * m_M;
×
1142
  arma::mat E_n = F_n - Fhat_n;
×
1143
  arma::mat Ecirc_n = LinearAlgebra::rsolve(m_circ, E_n);
×
1144
  t0 = Bench::toc(nullptr, "Ecirc_n    ", t0);
×
1145

UNCOV
1146
  if (return_stdev) {
×
1147
    ysd2_n = 1.0 - sum(Rstar_on % Rstar_on, 0).as_col() + sum(Ecirc_n % Ecirc_n, 1).as_col();
×
1148
    ysd2_n.transform([](double val) { return (std::isnan(val) || val < 0 ? 0.0 : val); });
×
1149
    ysd2_n *= sigma2 / m_alpha * m_scaleY * m_scaleY;
×
1150
    t0 = Bench::toc(nullptr, "ysd2_n     ", t0);
×
1151
  }
1152

UNCOV
1153
  if (return_cov) {
×
1154
    // Compute the covariance matrix between new data points
UNCOV
1155
    arma::mat R_nn = arma::mat(n_n, n_n, arma::fill::none);
×
1156
    for (arma::uword i = 0; i < n_n; i++) {
×
1157
      // R_nn.at(i, i) = 1;
UNCOV
1158
      for (arma::uword j = 0; j < i; j++) {
×
1159
        R_nn.at(i, j) = R_nn.at(j, i) = _Cov((Xn_n.col(i) - Xn_n.col(j)), m_theta);
×
1160
      }
1161
    }
UNCOV
1162
    R_nn *= m_alpha;
×
1163
    R_nn.diag().ones();
×
1164
    t0 = Bench::toc(nullptr, "R_nn       ", t0);
×
1165

UNCOV
1166
    Sigma_n = R_nn - trans(Rstar_on) * Rstar_on + Ecirc_n * trans(Ecirc_n);
×
1167
    Sigma_n *= sigma2 / m_alpha * m_scaleY * m_scaleY;
×
1168
    t0 = Bench::toc(nullptr, "Sigma_n    ", t0);
×
1169
  }
×
1170

UNCOV
1171
  if (return_deriv) {
×
1172
    //// https://github.com/libKriging/dolka/blob/bb1dbf0656117756165bdcff0bf5e0a1f963fbef/R/kmStuff.R#L322C1-L363C10
1173
    // for (i in 1:n_n) {
1174
    //
1175
    //   ## =================================================================
1176
    //   ## 'DF_n_i' and 'DR_on_i' are matrices with
1177
    //   ## dimension c(n_n, d)
1178
    //   ## =================================================================
1179
    //
1180
    //   DF_n_i <- trend.deltax(x = XNew[i, ], model = object)
1181
    //   KOldNewi <- as.vector(KOldNew[ , i])
1182
    //   DR_on_i <- covVector.dx(x = as.vector(XNew[i, ]),
1183
    //                               X = X,
1184
    //                               object = object@covariance,
1185
    //                               c = KOldNewi)
1186
    //
1187
    //   KOldNewStarDer[ , i, i, ] <-
1188
    //       backsolve(L, DR_on_i, upper.tri = FALSE)
1189
    //
1190
    //   ## Gradient of the kriging trend and mean
1191
    //   muNewHatDer[i, i, ] <- crossprod(DF_n_i, betaHat)
1192
    //   ## dim in product c(d, n) and NULL(length d)
1193
    //   mean_nHatDer[i, i, ] <- muNewHatDer[i, i, ] +
1194
    //       crossprod(KOldNewStarDer[ , i, i,  ], zStar)
1195
    //   ## dim in product c(d, n) and NULL(length n)
1196
    //   s2Der[i, i,  ] <-
1197
    //       - 2 * crossprod(KOldNewStarDer[ , i, i, ],
1198
    //                       drop(KOldNewStar[ , i]))
1199
    //
1200
    //   ## dim in product c(d, n) and c(n, p)
1201
    //
1202
    //   if (type == "UK") {
1203
    //       ENewDagDer[i, i, , ] <-
1204
    //           backsolve(t(RStar),
1205
    //                     DF_n_i -
1206
    //                     t(crossprod(KOldNewStarDer[ , i, i, ], FStar)),
1207
    //                     upper.tri = FALSE)
1208
    //       ## dim in product NULL (length p) and c(p, d) because of 'drop'
1209
    //       s2Der[i, i, ] <- s2Der[i, i, ] + 2 * drop(ENewDagT[ , i]) %*%
1210
    //           drop(ENewDagDer[i, i, , ])
1211
    //   }
1212
    //  numerical derivative step : value is sensitive only for non linear trends. Otherwise, it gives exact results.
UNCOV
1213
    const double h = 1.0E-5;
×
1214
    arma::mat h_eye_d = h * arma::mat(d, d, arma::fill::eye);
×
1215

1216
    // Compute the derivatives of the covariance and trend functions
UNCOV
1217
    for (arma::uword i = 0; i < n_n; i++) {  // for each predict point... should be parallel ?
×
1218
      arma::mat DR_on_i = arma::mat(n_o, d, arma::fill::none);
×
1219
      for (arma::uword j = 0; j < n_o; j++) {
×
1220
        DR_on_i.row(j) = R_on.at(j, i) * trans(_DlnCovDx(Xn_n.col(i) - Xn_o.col(j), m_theta));
×
1221
      }
UNCOV
1222
      t0 = Bench::toc(nullptr, "DR_on_i    ", t0);
×
1223

1224
      arma::mat tXn_n_repd_i
UNCOV
1225
          = arma::trans(Xn_n.col(i) * arma::mat(1, d, arma::fill::ones));  // just duplicate X_n.row(i) d times
×
1226
      arma::mat DF_n_i = (Trend::regressionModelMatrix(m_regmodel, tXn_n_repd_i + h_eye_d)
×
1227
                          - Trend::regressionModelMatrix(m_regmodel, tXn_n_repd_i - h_eye_d))
×
1228
                         / (2 * h);
×
1229
      t0 = Bench::toc(nullptr, "DF_n_i     ", t0);
×
1230

UNCOV
1231
      arma::mat W_i = LinearAlgebra::solve(m_T, DR_on_i);
×
1232
      t0 = Bench::toc(nullptr, "W_i        ", t0);
×
1233
      Dyhat_n.row(i) = trans(DF_n_i * m_beta + trans(W_i) * m_z);
×
1234
      t0 = Bench::toc(nullptr, "Dyhat_n    ", t0);
×
1235

UNCOV
1236
      if (return_stdev) {
×
1237
        arma::mat DEcirc_n_i = LinearAlgebra::solve(m_circ.t(), trans(DF_n_i - W_i.t() * m_M));
×
1238
        Dysd2_n.row(i) = -2 * Rstar_on.col(i).t() * W_i + 2 * Ecirc_n.row(i) * DEcirc_n_i;
×
1239
        t0 = Bench::toc(nullptr, "Dysd2_n    ", t0);
×
1240
      }
×
1241
    }
×
1242
    Dyhat_n *= m_scaleY;
×
1243
    Dysd2_n *= sigma2 / m_alpha * m_scaleY * m_scaleY;
×
1244
  }
×
1245

UNCOV
1246
  return std::make_tuple(std::move(yhat_n),
×
1247
                         std::move(arma::sqrt(ysd2_n)),
×
1248
                         std::move(Sigma_n),
×
1249
                         std::move(Dyhat_n),
×
1250
                         std::move(Dysd2_n / (2 * arma::sqrt(ysd2_n) * arma::mat(1, d, arma::fill::ones))));
×
1251
  /*if (return_stdev)
1252
    if (return_cov)
1253
      return std::make_tuple(std::move(yhat_n), std::move(pred_stdev), std::move(pred_cov));
1254
    else
1255
      return std::make_tuple(std::move(yhat_n), std::move(pred_stdev), nullptr);
1256
  else if (return_cov)
1257
    return std::make_tuple(std::move(yhat_n), std::move(pred_cov), nullptr);
1258
  else
1259
    return std::make_tuple(std::move(yhat_n), nullptr, nullptr);*/
UNCOV
1260
}
×
1261

1262
/** Draw sample trajectories of kriging at given points X'
1263
 * @param X_n is n_n*d matrix of points where to simulate output
1264
 * @param seed is seed for random number generator
1265
 * @param nsim is number of simulations to draw
1266
 * @param with_nugget is true if we want to include nugget effect in simulations
1267
 * @param will_update is true if we want to keep simulations data for future update
1268
 * @return output is n_n*nsim matrix of simulations at X_n
1269
 */
UNCOV
1270
LIBKRIGING_EXPORT arma::mat NuggetKriging::simulate(const int nsim,
×
1271
                                                    const int seed,
1272
                                                    const arma::mat& X_n,
1273
                                                    const bool with_nugget,
1274
                                                    const bool will_update) {
UNCOV
1275
  arma::uword n_n = X_n.n_rows;
×
1276
  arma::uword n_o = m_X.n_rows;
×
1277
  arma::uword d = m_X.n_cols;
×
1278
  if (X_n.n_cols != d)
×
1279
    throw std::runtime_error("Simulate locations have wrong dimension: " + std::to_string(X_n.n_cols) + " instead of "
×
1280
                             + std::to_string(d));
×
1281

UNCOV
1282
  arma::mat Xn_o = trans(m_X);  // already normalized if needed
×
1283
  arma::mat Xn_n = X_n;
×
1284
  // Normalize X_n
UNCOV
1285
  Xn_n.each_row() -= m_centerX;
×
1286
  Xn_n.each_row() /= m_scaleX;
×
1287

1288
  // Define regression matrix
UNCOV
1289
  arma::mat F_n = Trend::regressionModelMatrix(m_regmodel, Xn_n);
×
1290

UNCOV
1291
  Xn_n = trans(Xn_n);
×
1292

UNCOV
1293
  double m_alpha = m_sigma2 / (m_sigma2 + m_nugget);
×
1294

UNCOV
1295
  auto t0 = Bench::tic();
×
1296
  // Compute covariance between new data
UNCOV
1297
  arma::mat R_nn = arma::mat(n_n, n_n, arma::fill::none);
×
1298
  for (arma::uword i = 0; i < n_n; i++) {
×
1299
    // R_nn.at(i, i) = 1.0;
UNCOV
1300
    for (arma::uword j = 0; j < i; j++) {
×
1301
      R_nn.at(i, j) = R_nn.at(j, i) = _Cov((Xn_n.col(i) - Xn_n.col(j)), m_theta);
×
1302
    }
1303
  }
UNCOV
1304
  R_nn.diag().ones();  // replaces R_nn.at(i, i) = 1.0;
×
1305
  R_nn *= m_alpha;
×
1306
  if (with_nugget)
×
1307
    R_nn.diag().ones();  // increase diag to have nugget
×
1308
  t0 = Bench::toc(nullptr, "R_nn          ", t0);
×
1309

1310
  // Compute covariance between training data and new data to predict
UNCOV
1311
  arma::mat R_on = arma::mat(n_o, n_n, arma::fill::none);
×
1312
  for (arma::uword i = 0; i < n_o; i++) {
×
1313
    for (arma::uword j = 0; j < n_n; j++) {
×
1314
      arma::mat dij = Xn_o.col(i) - Xn_n.col(j);
×
1315
      if (with_nugget && dij.is_zero(arma::datum::eps))
×
1316
        R_on.at(i, j) = 1.0;
×
1317
      else
UNCOV
1318
        R_on.at(i, j) = _Cov(dij, m_theta) * m_alpha;  // force m_alpha here, to be consistent with R_oo (=T*t(T))
×
1319
      // R_on.at(i, j) = _Cov(Xn_o.col(i) - Xn_n.col(j), m_theta);
UNCOV
1320
    }
×
1321
  }
1322
  // R_on *= alpha;
UNCOV
1323
  t0 = Bench::toc(nullptr, "R_on       ", t0);
×
1324

UNCOV
1325
  arma::mat Rstar_on = LinearAlgebra::solve(m_T, R_on);
×
1326
  t0 = Bench::toc(nullptr, "Rstar_on   ", t0);
×
1327

UNCOV
1328
  arma::vec yhat_n = F_n * m_beta + trans(Rstar_on) * m_z;
×
1329
  t0 = Bench::toc(nullptr, "yhat_n        ", t0);
×
1330

UNCOV
1331
  arma::mat Fhat_n = trans(Rstar_on) * m_M;
×
1332
  arma::mat E_n = F_n - Fhat_n;
×
1333
  arma::mat Ecirc_n = LinearAlgebra::rsolve(m_circ, E_n);
×
1334
  t0 = Bench::toc(nullptr, "Ecirc_n       ", t0);
×
1335

UNCOV
1336
  arma::mat SigmaNoTrend_nKo = R_nn - trans(Rstar_on) * Rstar_on;
×
1337

UNCOV
1338
  arma::mat Sigma_nKo = SigmaNoTrend_nKo + Ecirc_n * trans(Ecirc_n);
×
1339
  t0 = Bench::toc(nullptr, "Sigma_nKo     ", t0);
×
1340

UNCOV
1341
  arma::mat LSigma_nKo = LinearAlgebra::safe_chol_lower(Sigma_nKo / m_alpha);
×
1342
  t0 = Bench::toc(nullptr, "LSigma_nKo     ", t0);
×
1343

UNCOV
1344
  arma::mat y_n = arma::mat(n_n, nsim, arma::fill::none);
×
1345
  y_n.each_col() = yhat_n;
×
1346
  Random::reset_seed(seed);
×
1347
  y_n += LSigma_nKo * Random::randn_mat(n_n, nsim) * std::sqrt(m_sigma2);
×
1348

1349
  // Un-normalize simulations
UNCOV
1350
  y_n = m_centerY + m_scaleY * y_n;
×
1351

UNCOV
1352
  if (will_update) {
×
1353
    lastsimup_Xn_u.clear();  // force reset to force update_simulate consider new data
×
1354
    lastsim_y_n = y_n;
×
1355

UNCOV
1356
    lastsim_Xn_n = Xn_n;
×
1357
    lastsim_seed = seed;
×
1358
    lastsim_nsim = nsim;
×
1359
    lastsim_with_nugget = with_nugget;
×
1360

UNCOV
1361
    lastsim_R_nn = R_nn;
×
1362
    lastsim_F_n = F_n;
×
1363

UNCOV
1364
    lastsim_L_oCn = Rstar_on;
×
1365
    lastsim_L_nCn = LinearAlgebra::safe_chol_lower(SigmaNoTrend_nKo);
×
1366
    t0 = Bench::toc(nullptr, "L_nCn     ", t0);
×
1367

UNCOV
1368
    lastsim_L_on = arma::join_rows(arma::join_cols(m_T, lastsim_L_oCn.t()),
×
1369
                                   arma::join_cols(arma::zeros(n_o, n_n), lastsim_L_nCn));
×
1370

UNCOV
1371
    arma::mat Linv_on = LinearAlgebra::solve(lastsim_L_on, arma::mat(n_o + n_n, n_o + n_n, arma::fill::eye));
×
1372
    t0 = Bench::toc(nullptr, "Linv_on     ", t0);
×
1373
    lastsim_Rinv_on = Linv_on.t() * Linv_on;
×
1374
    t0 = Bench::toc(nullptr, "Rinv_on     ", t0);
×
1375

UNCOV
1376
    lastsim_F_on = arma::join_cols(m_F, lastsim_F_n);
×
1377
    lastsim_Fstar_on = LinearAlgebra::solve(lastsim_L_on, lastsim_F_on);
×
1378
    t0 = Bench::toc(nullptr, "Fstar_on     ", t0);
×
1379
    arma::mat Q_Fstar_on;
×
1380
    arma::qr(Q_Fstar_on, lastsim_circ_on, lastsim_Fstar_on);
×
1381
    lastsim_Fcirc_on = LinearAlgebra::rsolve(lastsim_circ_on, lastsim_F_on);
×
1382
    t0 = Bench::toc(nullptr, "Fcirc_on     ", t0);
×
1383

UNCOV
1384
    lastsim_Fhat_nKo = lastsim_L_oCn.t() * m_M;
×
1385
    t0 = Bench::toc(nullptr, "Fhat_nKo     ", t0);
×
1386
    lastsim_Ecirc_nKo = LinearAlgebra::rsolve(m_circ, F_n - lastsim_Fhat_nKo);
×
1387
    t0 = Bench::toc(nullptr, "Ecirc_nKo     ", t0);
×
1388
  }
×
1389

1390
  // Un-normalize simulations
UNCOV
1391
  return y_n;
×
1392
}
×
1393

UNCOV
1394
LIBKRIGING_EXPORT arma::mat NuggetKriging::update_simulate(const arma::vec& y_u, const arma::mat& X_u) {
×
1395
  if (y_u.n_elem != X_u.n_rows)
×
1396
    throw std::runtime_error("Dimension of new data should be the same:\n X: (" + std::to_string(X_u.n_rows) + "x"
×
1397
                             + std::to_string(X_u.n_cols) + "), y: (" + std::to_string(y_u.n_elem) + ")");
×
1398

UNCOV
1399
  if (X_u.n_cols != m_X.n_cols)
×
1400
    throw std::runtime_error("Dimension of new data should be the same:\n X: (...x" + std::to_string(m_X.n_cols)
×
1401
                             + "), new X: (...x" + std::to_string(X_u.n_cols) + ")");
×
1402

UNCOV
1403
  if (lastsim_y_n.is_empty() || lastsim_y_n.n_rows == 0)
×
1404
    throw std::runtime_error("No previous simulation data available");
×
1405

UNCOV
1406
  if (lastsim_Xn_n.n_rows != X_u.n_cols)
×
1407
    throw std::runtime_error("Dimension of new data should be the same:\n X: (...x" + std::to_string(X_u.n_cols)
×
1408
                             + "), last sim X: (...x" + std::to_string(lastsim_Xn_n.n_rows) + ")");
×
1409

UNCOV
1410
  arma::uword n_n = lastsim_Xn_n.n_cols;
×
1411
  arma::uword n_o = m_X.n_rows;
×
1412
  arma::uword d = m_X.n_cols;
×
1413
  arma::mat Xn_o = trans(m_X);    // already normalized if needed
×
1414
  arma::mat Xn_n = lastsim_Xn_n;  // already normalized
×
1415

UNCOV
1416
  arma::uword n_on = n_o + n_n;
×
1417
  // arma::mat Xn_on = arma::join_cols(Xn_o, Xn_n);
UNCOV
1418
  arma::mat F_on = arma::join_cols(m_F, lastsim_F_n);
×
1419

UNCOV
1420
  arma::uword n_u = X_u.n_rows;
×
1421
  // Normalize X_u
UNCOV
1422
  arma::mat Xn_u = X_u;
×
1423
  Xn_u.each_row() -= m_centerX;
×
1424
  Xn_u.each_row() /= m_scaleX;
×
1425

1426
  // Define regression matrix
UNCOV
1427
  arma::mat F_u = Trend::regressionModelMatrix(m_regmodel, Xn_u);
×
1428

UNCOV
1429
  auto t0 = Bench::tic();
×
1430
  Xn_u = trans(Xn_u);
×
1431
  t0 = Bench::toc(nullptr, "Xn_u.t()      ", t0);
×
1432

UNCOV
1433
  double m_alpha = m_sigma2 / (m_sigma2 + m_nugget);
×
1434

UNCOV
1435
  bool use_lastsimup = (!lastsimup_Xn_u.is_empty()) && (lastsimup_Xn_u - Xn_u).is_zero(arma::datum::eps);
×
1436
  if (!use_lastsimup) {
×
1437
    lastsimup_Xn_u = Xn_u;
×
1438

1439
    // Compute covariance between updated data
UNCOV
1440
    lastsimup_R_uu = arma::mat(n_u, n_u, arma::fill::none);
×
1441
    for (arma::uword i = 0; i < n_u; i++) {
×
1442
      // lastsimup_R_uu.at(i, i) = 1.0;
UNCOV
1443
      for (arma::uword j = 0; j < i; j++) {
×
1444
        lastsimup_R_uu.at(i, j) = lastsimup_R_uu.at(j, i) = _Cov((Xn_u.col(i) - Xn_u.col(j)), m_theta);
×
1445
      }
1446
    }
UNCOV
1447
    lastsimup_R_uu.diag().ones();  // replaces R_uu.at(i, i) = 1.0;
×
1448
    lastsimup_R_uu *= m_alpha;
×
1449
    if (lastsim_with_nugget)
×
1450
      lastsimup_R_uu.diag().ones();  // so diag will include nugget
×
1451
    t0 = Bench::toc(nullptr, "R_uu          ", t0);
×
1452

1453
    // Compute covariance between updated/old data
UNCOV
1454
    lastsimup_R_uo = arma::mat(n_u, n_o, arma::fill::none);
×
1455
    for (arma::uword i = 0; i < n_u; i++) {
×
1456
      for (arma::uword j = 0; j < n_o; j++) {
×
1457
        lastsimup_R_uo.at(i, j) = _Cov((Xn_u.col(i) - Xn_o.col(j)), m_theta);
×
1458
      }
1459
    }
UNCOV
1460
    lastsimup_R_uo *= m_alpha;
×
1461
    t0 = Bench::toc(nullptr, "R_uo          ", t0);
×
1462

1463
    // Compute covariance between updated/new data
UNCOV
1464
    lastsimup_R_un = arma::mat(n_u, n_n, arma::fill::none);
×
1465
    for (arma::uword i = 0; i < n_u; i++) {
×
1466
      for (arma::uword j = 0; j < n_n; j++) {
×
1467
        arma::vec dij = Xn_u.col(i) - Xn_n.col(j);
×
1468
        if (lastsim_with_nugget && dij.is_zero(arma::datum::eps))
×
1469
          lastsimup_R_un.at(i, j) = 1.0;
×
1470
        else
UNCOV
1471
          lastsimup_R_un.at(i, j) = _Cov(dij, m_theta) * m_alpha;
×
1472
        // lastsimup_R_un.at(i, j) = _Cov(Xn_u.col(i) - Xn_n.col(j), m_theta) * m_alpha;
UNCOV
1473
      }
×
1474
    }
UNCOV
1475
    t0 = Bench::toc(nullptr, "R_un          ", t0);
×
1476
  }
1477

1478
  // ======================================================================
1479
  // FOXY step #1 Extend the simulation to the design 'X_u' IF
1480
  // NECESSARY. Remind that the simulation number j is
1481
  // conditional on the given 'y_o' and on 'y_n = Y_sim[ , j]'
1482
  //
1483
  // CAUTION. To avoid unnecessary re-computations we here use
1484
  // auxiliary variables that where computed in the creation of
1485
  // the KM0 step AND later in the simulation. The first ones are
1486
  // in 'theKM0$Extra' and the second ones are in 'Ex'
1487
  //
1488
  // In indices 'C' means comma and 'K' means pipe '|'
1489
  //
1490
  // ======================================================================
1491

UNCOV
1492
  if (!use_lastsimup) {
×
1493
    arma::mat R_onCu = arma::join_rows(lastsimup_R_uo, lastsimup_R_un).t();
×
1494
    arma::mat Rstar_onCu = LinearAlgebra::solve(lastsim_L_on, R_onCu);
×
1495
    t0 = Bench::toc(nullptr, "Rstar_onCu          ", t0);
×
1496

UNCOV
1497
    arma::mat Ecirc_uKon = LinearAlgebra::rsolve(lastsim_circ_on, F_u - Rstar_onCu.t() * lastsim_Fstar_on);
×
1498
    t0 = Bench::toc(nullptr, "Ecirc_uKon          ", t0);
×
1499

UNCOV
1500
    arma::mat Sigma_uKon = lastsimup_R_uu - Rstar_onCu.t() * Rstar_onCu + Ecirc_uKon * Ecirc_uKon.t();
×
1501
    t0 = Bench::toc(nullptr, "Sigma_uKon          ", t0);
×
1502

UNCOV
1503
    arma::mat LSigma_uKon = LinearAlgebra::safe_chol_lower(Sigma_uKon / m_alpha);
×
1504
    t0 = Bench::toc(nullptr, "LSigma_uKon          ", t0);
×
1505

UNCOV
1506
    arma::mat W_uCon = (R_onCu.t() + Ecirc_uKon * lastsim_Fcirc_on.t()) * lastsim_Rinv_on;
×
1507
    t0 = Bench::toc(nullptr, "W_uCon          ", t0);
×
1508

UNCOV
1509
    arma::mat m_u = W_uCon.head_cols(n_o) * m_y;
×
1510
    arma::mat M_u = arma::repmat(m_u, 1, lastsim_nsim) + W_uCon.tail_cols(n_n) * lastsim_y_n;
×
1511

UNCOV
1512
    Random::reset_seed(lastsim_seed);
×
1513
    lastsimup_y_u = M_u + LSigma_uKon * Random::randn_mat(n_u, lastsim_nsim) * std::sqrt(m_sigma2);
×
1514
    t0 = Bench::toc(nullptr, "y_u          ", t0);
×
1515
  }
×
1516

1517
  // ======================================================================
1518
  // FOXY step #2   Update the simulated paths on 'X_n'
1519
  // ======================================================================
1520

UNCOV
1521
  if (!use_lastsimup) {
×
1522
    arma::mat Rstar_ou = LinearAlgebra::solve(m_T, lastsimup_R_uo.t());
×
1523
    t0 = Bench::toc(nullptr, "Rstar_ou          ", t0);
×
1524

UNCOV
1525
    arma::mat Fhat_uKo = Rstar_ou.t() * m_M;
×
1526
    arma::mat Ecirc_uKo = LinearAlgebra::rsolve(m_circ, F_u - Fhat_uKo);
×
1527
    t0 = Bench::toc(nullptr, "Ecirc_uKo          ", t0);
×
1528

UNCOV
1529
    arma::mat Rtild_uCu = lastsimup_R_uu - Rstar_ou.t() * Rstar_ou + Ecirc_uKo * Ecirc_uKo.t();
×
1530
    t0 = Bench::toc(nullptr, "Rtild_uCu          ", t0);
×
1531

UNCOV
1532
    arma::mat Rtild_nCu = lastsimup_R_un - Rstar_ou.t() * lastsim_L_oCn + Ecirc_uKo * lastsim_Ecirc_nKo.t();
×
1533
    t0 = Bench::toc(nullptr, "Rtild_nCu          ", t0);
×
1534

UNCOV
1535
    lastsimup_Wtild_nKu = LinearAlgebra::solve(Rtild_uCu, Rtild_nCu).t();
×
1536
    t0 = Bench::toc(nullptr, "Wtild_nKu          ", t0);
×
1537
  }
×
1538

UNCOV
1539
  return lastsim_y_n + lastsimup_Wtild_nKu * (arma::repmat(y_u, 1, lastsim_nsim) - lastsimup_y_u);
×
1540
}
×
1541

1542
/** Add new conditional data points to previous (X,y), then perform new fit.
1543
 * @param y_u is n_u length column vector of new output
1544
 * @param X_u is n_u*d matrix of new input
1545
 * @param refit is true if we want to re-fit the model
1546
 */
UNCOV
1547
LIBKRIGING_EXPORT void NuggetKriging::update(const arma::vec& y_u, const arma::mat& X_u, const bool refit) {
×
1548
  if (y_u.n_elem != X_u.n_rows)
×
1549
    throw std::runtime_error("Dimension of new data should be the same:\n X: (" + std::to_string(X_u.n_rows) + "x"
×
1550
                             + std::to_string(X_u.n_cols) + "), y: (" + std::to_string(y_u.n_elem) + ")");
×
1551

UNCOV
1552
  if (X_u.n_cols != m_X.n_cols)
×
1553
    throw std::runtime_error("Dimension of new data should be the same:\n X: (...x" + std::to_string(m_X.n_cols)
×
1554
                             + "), new X: (...x" + std::to_string(X_u.n_cols) + ")");
×
1555
  // rebuild starting parameters
UNCOV
1556
  Parameters parameters;
×
1557
  if (refit) {  // re-fit
×
1558
    if (m_est_beta)
×
1559
      parameters = Parameters{
×
1560
          std::make_optional(arma::vec(1, arma::fill::value(this->m_nugget * this->m_scaleY * this->m_scaleY))),
×
1561
          this->m_est_nugget,
×
1562
          std::make_optional(arma::vec(1, arma::fill::value(this->m_sigma2 * this->m_scaleY * this->m_scaleY))),
×
1563
          this->m_est_sigma2,
×
1564
          std::make_optional(trans(this->m_theta) % this->m_scaleX),
×
1565
          this->m_est_theta,
×
1566
          std::make_optional(arma::ones<arma::vec>(0)),
×
1567
          true};
×
1568
    else
UNCOV
1569
      parameters = Parameters{
×
1570
          std::make_optional(arma::vec(1, arma::fill::value(this->m_nugget * this->m_scaleY * this->m_scaleY))),
×
1571
          this->m_est_nugget,
×
1572
          std::make_optional(arma::vec(1, arma::fill::value(this->m_sigma2 * this->m_scaleY * this->m_scaleY))),
×
1573
          this->m_est_sigma2,
×
1574
          std::make_optional(trans(this->m_theta) % this->m_scaleX),
×
1575
          this->m_est_theta,
×
1576
          std::make_optional(trans(this->m_beta) * this->m_scaleY),
×
1577
          false};
×
1578
    this->fit(arma::join_cols(m_y * this->m_scaleY + this->m_centerY,
×
1579
                              y_u),  // de-normalize previous data according to suite unnormed new data
UNCOV
1580
              arma::join_cols((m_X.each_row() % this->m_scaleX).each_row() + this->m_centerX, X_u),
×
1581
              m_regmodel,
×
1582
              m_normalize,
×
1583
              m_optim,
×
1584
              m_objective,
×
1585
              parameters);
1586
  } else {  // just update
UNCOV
1587
    parameters = Parameters{
×
1588
        std::make_optional(arma::vec(1, arma::fill::value(this->m_nugget * this->m_scaleY * this->m_scaleY))),
×
1589
        false,
UNCOV
1590
        std::make_optional(arma::vec(1, arma::fill::value(this->m_sigma2 * this->m_scaleY * this->m_scaleY))),
×
1591
        false,
UNCOV
1592
        std::make_optional(trans(this->m_theta) % this->m_scaleX),
×
1593
        false,
UNCOV
1594
        std::make_optional(arma::ones<arma::vec>(0)),
×
1595
        false};
×
1596
    this->fit(arma::join_cols(m_y * this->m_scaleY + this->m_centerY,
×
1597
                              y_u),  // de-normalize previous data according to suite unnormed new data
UNCOV
1598
              arma::join_cols((m_X.each_row() % this->m_scaleX).each_row() + this->m_centerX, X_u),
×
1599
              m_regmodel,
×
1600
              m_normalize,
×
1601
              "none",
UNCOV
1602
              m_objective,
×
1603
              parameters);
1604
  }
UNCOV
1605
}
×
1606

UNCOV
1607
LIBKRIGING_EXPORT std::string NuggetKriging::summary() const {
×
1608
  std::ostringstream oss;
×
1609
  auto vec_printer = [&oss](const arma::vec& v) {
×
1610
    v.for_each([&oss, i = 0](const arma::vec::elem_type& val) mutable {
×
1611
      if (i++ > 0)
×
1612
        oss << ", ";
×
1613
      oss << val;
×
1614
    });
×
1615
  };
×
1616

UNCOV
1617
  if (m_X.is_empty() || m_X.n_rows == 0) {  // not yet fitted
×
1618
    oss << "* covariance:\n";
×
1619
    oss << "  * kernel: " << m_covType << "\n";
×
1620
  } else {
UNCOV
1621
    oss << "* data";
×
1622
    oss << ((m_normalize) ? " (normalized): " : ": ") << m_X.n_rows << "x";
×
1623
    arma::rowvec Xmins = arma::min(m_X, 0);
×
1624
    arma::rowvec Xmaxs = arma::max(m_X, 0);
×
1625
    for (arma::uword i = 0; i < m_X.n_cols; i++) {
×
1626
      oss << "[" << Xmins[i] << "," << Xmaxs[i] << "]";
×
1627
      if (i < m_X.n_cols - 1)
×
1628
        oss << ",";
×
1629
    }
UNCOV
1630
    oss << " -> " << m_y.n_elem << "x[" << arma::min(m_y) << "," << arma::max(m_y) << "]\n";
×
1631
    oss << "* trend " << Trend::toString(m_regmodel);
×
1632
    oss << ((m_est_beta) ? " (est.): " : ": ");
×
1633
    vec_printer(m_beta);
×
1634
    oss << "\n";
×
1635
    oss << "* variance";
×
1636
    oss << ((m_est_sigma2) ? " (est.): " : ": ");
×
1637
    oss << m_sigma2;
×
1638
    oss << "\n";
×
1639
    oss << "* covariance:\n";
×
1640
    oss << "  * kernel: " << m_covType << "\n";
×
1641
    oss << "  * range";
×
1642
    oss << ((m_est_theta) ? " (est.): " : ": ");
×
1643
    vec_printer(m_theta);
×
1644
    oss << "\n";
×
1645
    oss << "  * nugget";
×
1646
    oss << ((m_est_nugget) ? " (est.): " : ": ");
×
1647
    oss << m_nugget;
×
1648
    oss << "\n";
×
1649
    oss << "  * fit:\n";
×
1650
    oss << "    * objective: " << m_objective << "\n";
×
1651
    oss << "    * optim: " << m_optim << "\n";
×
1652
  }
×
1653
  return oss.str();
×
1654
}
×
1655

UNCOV
1656
void NuggetKriging::save(const std::string filename) const {
×
1657
  nlohmann::json j;
×
1658

UNCOV
1659
  j["version"] = 2;
×
1660
  j["content"] = "NuggetKriging";
×
1661

1662
  // _Cov_pow & std::function embedded by make_Cov
UNCOV
1663
  j["covType"] = m_covType;
×
1664
  j["X"] = to_json(m_X);
×
1665
  j["centerX"] = to_json(m_centerX);
×
1666
  j["scaleX"] = to_json(m_scaleX);
×
1667
  j["y"] = to_json(m_y);
×
1668
  j["centerY"] = m_centerY;
×
1669
  j["scaleY"] = m_scaleY;
×
1670
  j["normalize"] = m_normalize;
×
1671
  j["regmodel"] = Trend::toString(m_regmodel);
×
1672
  j["optim"] = m_optim;
×
1673
  j["objective"] = m_objective;
×
1674
  // Auxiliary data
UNCOV
1675
  j["dX"] = to_json(m_dX);
×
1676
  j["maxdX"] = to_json(m_maxdX);
×
1677
  j["F"] = to_json(m_F);
×
1678
  j["T"] = to_json(m_T);
×
1679
  j["R"] = to_json(m_R);
×
1680
  j["M"] = to_json(m_M);
×
1681
  j["star"] = to_json(m_star);
×
1682
  j["circ"] = to_json(m_circ);
×
1683
  j["z"] = to_json(m_z);
×
1684
  j["beta"] = to_json(m_beta);
×
1685
  j["est_beta"] = m_est_beta;
×
1686
  j["theta"] = to_json(m_theta);
×
1687
  j["est_theta"] = m_est_theta;
×
1688
  j["sigma2"] = m_sigma2;
×
1689
  j["est_sigma2"] = m_est_sigma2;
×
1690
  j["nugget"] = m_nugget;
×
1691
  j["est_nugget"] = m_est_nugget;
×
1692

UNCOV
1693
  std::ofstream f(filename);
×
1694
  f << std::setw(4) << j;
×
1695
}
×
1696

UNCOV
1697
NuggetKriging NuggetKriging::load(const std::string filename) {
×
1698
  std::ifstream f(filename);
×
1699
  nlohmann::json j = nlohmann::json::parse(f);
×
1700

UNCOV
1701
  uint32_t version = j["version"].template get<uint32_t>();
×
1702
  if (version != 2) {
×
1703
    throw std::runtime_error(asString("Bad version to load from '", filename, "'; found ", version, ", requires 2"));
×
1704
  }
UNCOV
1705
  std::string content = j["content"].template get<std::string>();
×
1706
  if (content != "NuggetKriging") {
×
1707
    throw std::runtime_error(
×
1708
        asString("Bad content to load from '", filename, "'; found '", content, "', requires 'NuggetKriging'"));
×
1709
  }
1710

UNCOV
1711
  std::string covType = j["covType"].template get<std::string>();
×
1712
  NuggetKriging kr(covType);  // _Cov_pow & std::function embedded by make_Cov
×
1713

UNCOV
1714
  kr.m_X = mat_from_json(j["X"]);
×
1715
  kr.m_centerX = rowvec_from_json(j["centerX"]);
×
1716
  kr.m_scaleX = rowvec_from_json(j["scaleX"]);
×
1717
  kr.m_y = colvec_from_json(j["y"]);
×
1718
  kr.m_centerY = j["centerY"].template get<decltype(kr.m_centerY)>();
×
1719
  kr.m_scaleY = j["scaleY"].template get<decltype(kr.m_scaleY)>();
×
1720
  kr.m_normalize = j["normalize"].template get<decltype(kr.m_normalize)>();
×
1721

UNCOV
1722
  std::string model = j["regmodel"].template get<std::string>();
×
1723
  kr.m_regmodel = Trend::fromString(model);
×
1724

UNCOV
1725
  kr.m_optim = j["optim"].template get<decltype(kr.m_optim)>();
×
1726
  kr.m_objective = j["objective"].template get<decltype(kr.m_objective)>();
×
1727
  // Auxiliary data
UNCOV
1728
  kr.m_dX = mat_from_json(j["dX"]);
×
1729
  kr.m_maxdX = colvec_from_json(j["maxdX"]);
×
1730
  kr.m_F = mat_from_json(j["F"]);
×
1731
  kr.m_T = mat_from_json(j["T"]);
×
1732
  kr.m_R = mat_from_json(j["R"]);
×
1733
  kr.m_M = mat_from_json(j["M"]);
×
1734
  kr.m_star = mat_from_json(j["star"]);
×
1735
  kr.m_circ = mat_from_json(j["circ"]);
×
1736
  kr.m_z = colvec_from_json(j["z"]);
×
1737
  kr.m_beta = colvec_from_json(j["beta"]);
×
1738
  kr.m_est_beta = j["est_beta"].template get<decltype(kr.m_est_beta)>();
×
1739
  kr.m_theta = colvec_from_json(j["theta"]);
×
1740
  kr.m_est_theta = j["est_theta"].template get<decltype(kr.m_est_theta)>();
×
1741
  kr.m_sigma2 = j["sigma2"].template get<decltype(kr.m_sigma2)>();
×
1742
  kr.m_est_sigma2 = j["est_sigma2"].template get<decltype(kr.m_est_sigma2)>();
×
1743
  kr.m_nugget = j["nugget"].template get<decltype(kr.m_nugget)>();
×
1744
  kr.m_est_nugget = j["est_nugget"].template get<decltype(kr.m_est_nugget)>();
×
1745
  kr.m_is_empty = false;
×
1746

UNCOV
1747
  return kr;
×
1748
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc