• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

libKriging / libKriging / 16194641062

10 Jul 2025 12:00PM UTC coverage: 37.371%. Remained the same
16194641062

push

github

yannrichet
sync version in R

3295 of 8817 relevant lines covered (37.37%)

59122.67 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

49.37
/src/lib/Kriging.cpp
1
// clang-format off
2
// MUST BE at the beginning before any other <cmath> include (e.g. in armadillo's headers)
3
#define _USE_MATH_DEFINES // required for Visual Studio
4

5
#include <cmath>
6
// clang-format on
7

8
#include "libKriging/utils/lk_armadillo.hpp"
9

10
#include "libKriging/Bench.hpp"
11
#include "libKriging/CacheFunction.hpp"
12
#include "libKriging/Covariance.hpp"
13
#include "libKriging/Kriging.hpp"
14
#include "libKriging/KrigingException.hpp"
15
#include "libKriging/LinearAlgebra.hpp"
16
#include "libKriging/Optim.hpp"
17
#include "libKriging/Random.hpp"
18
#include "libKriging/Trend.hpp"
19
#include "libKriging/utils/custom_hash_function.hpp"
20
#include "libKriging/utils/data_from_arma_vec.hpp"
21
#include "libKriging/utils/jsonutils.hpp"
22
#include "libKriging/utils/nlohmann/json.hpp"
23
#include "libKriging/utils/utils.hpp"
24

25
#include <cassert>
26
#include <lbfgsb_cpp/lbfgsb.hpp>
27
#include <map>
28
#include <tuple>
29
#include <vector>
30

31
/************************************************/
32
/**      Kriging implementation        **/
33
/************************************************/
34

35
// This will create the dist(xi,xj) function above. Need to parse "covType".
36
void Kriging::make_Cov(const std::string& covType) {
117✔
37
  m_covType = covType;
117✔
38
  if (covType.compare("gauss") == 0) {
117✔
39
    _Cov = Covariance::Cov_gauss;
117✔
40
    _DlnCovDtheta = Covariance::DlnCovDtheta_gauss;
117✔
41
    _DlnCovDx = Covariance::DlnCovDx_gauss;
117✔
42
    _Cov_pow = 2;
117✔
43
  } else if (covType.compare("exp") == 0) {
×
44
    _Cov = Covariance::Cov_exp;
×
45
    _DlnCovDtheta = Covariance::DlnCovDtheta_exp;
×
46
    _DlnCovDx = Covariance::DlnCovDx_exp;
×
47
    _Cov_pow = 1;
×
48
  } else if (covType.compare("matern3_2") == 0) {
×
49
    _Cov = Covariance::Cov_matern32;
×
50
    _DlnCovDtheta = Covariance::DlnCovDtheta_matern32;
×
51
    _DlnCovDx = Covariance::DlnCovDx_matern32;
×
52
    _Cov_pow = 1.5;
×
53
  } else if (covType.compare("matern5_2") == 0) {
×
54
    _Cov = Covariance::Cov_matern52;
×
55
    _DlnCovDtheta = Covariance::DlnCovDtheta_matern52;
×
56
    _DlnCovDx = Covariance::DlnCovDx_matern52;
×
57
    _Cov_pow = 2.5;
×
58
  } else
59
    throw std::invalid_argument("Unsupported covariance kernel: " + covType);
×
60

61
  // arma::cout << "make_Cov done." << arma::endl;
62
}
117✔
63

64
LIBKRIGING_EXPORT arma::mat Kriging::covMat(const arma::mat& X1, const arma::mat& X2) {
×
65
  arma::mat Xn1 = X1;
×
66
  arma::mat Xn2 = X2;
×
67
  Xn1.each_row() -= m_centerX;
×
68
  Xn1.each_row() /= m_scaleX;
×
69
  Xn2.each_row() -= m_centerX;
×
70
  Xn2.each_row() /= m_scaleX;
×
71

72
  arma::mat R = arma::mat(X1.n_rows, X2.n_rows, arma::fill::none);
×
73
  for (arma::uword i = 0; i < Xn1.n_rows; i++) {
×
74
    for (arma::uword j = 0; j < Xn2.n_rows; j++) {
×
75
      R.at(i, j) = _Cov((Xn1.row(i) - Xn2.row(j)).t(), m_theta);
×
76
    }
77
  }
78
  return R * m_sigma2;
×
79
}
×
80

81
// at least, just call make_Cov(kernel)
82
LIBKRIGING_EXPORT Kriging::Kriging(const std::string& covType) {
53✔
83
  make_Cov(covType);
53✔
84
}
53✔
85

86
LIBKRIGING_EXPORT Kriging::Kriging(const arma::vec& y,
64✔
87
                                   const arma::mat& X,
88
                                   const std::string& covType,
89
                                   const Trend::RegressionModel& regmodel,
90
                                   bool normalize,
91
                                   const std::string& optim,
92
                                   const std::string& objective,
93
                                   const Parameters& parameters) {
64✔
94
  if (y.n_elem != X.n_rows)
64✔
95
    throw std::runtime_error("Dimension of new data should be the same:\n X: (" + std::to_string(X.n_rows) + "x"
×
96
                             + std::to_string(X.n_cols) + "), y: (" + std::to_string(y.n_elem) + ")");
×
97

98
  make_Cov(covType);
64✔
99
  fit(y, X, regmodel, normalize, optim, objective, parameters);
64✔
100
}
64✔
101

102
LIBKRIGING_EXPORT Kriging::Kriging(const Kriging& other, ExplicitCopySpecifier) : Kriging{other} {}
14✔
103

104
arma::vec Kriging::ones = arma::ones<arma::vec>(0);
105

106
Kriging::KModel Kriging::make_Model(const arma::vec& theta, std::map<std::string, double>* bench) const {
4,961✔
107
  arma::mat R;
4,961✔
108
  arma::mat L;
4,961✔
109
  arma::mat Linv;
4,961✔
110
  arma::mat Fstar;
4,961✔
111
  arma::vec ystar;
4,961✔
112
  arma::mat Rstar;
4,961✔
113
  arma::mat Qstar;
4,961✔
114
  arma::vec Estar;
4,961✔
115
  double SSEstar{};
4,961✔
116
  arma::vec betahat;
4,961✔
117
  Kriging::KModel m{R, L, Linv, Fstar, ystar, Rstar, Qstar, Estar, SSEstar, betahat};
4,961✔
118

119
  arma::uword n = m_X.n_rows;
4,961✔
120
  arma::uword d = m_X.n_cols;
4,961✔
121
  arma::uword p = m_F.n_cols;
4,961✔
122

123
  auto t0 = Bench::tic();
4,961✔
124
  m.R = arma::mat(n, n, arma::fill::none);
4,961✔
125
  // check if we want to recompute model for same theta, for augmented Xy (using cholesky fast update).
126
  bool update = false;
4,961✔
127
  if (!m_is_empty)
4,961✔
128
    update = (m_theta.size() == theta.size()) && (theta - m_theta).is_zero() && (this->m_T.memptr() != nullptr)
536✔
129
             && (n > this->m_T.n_rows);
315✔
130
  if (update) {
4,961✔
131
    m.L = LinearAlgebra::update_cholCov(&(m.R), m_dX, theta, _Cov, 1, Kriging::ones, m_T, m_R);
11✔
132
  } else
133
    m.L = LinearAlgebra::cholCov(&(m.R), m_dX, theta, _Cov, 1, Kriging::ones);
4,950✔
134
  t0 = Bench::toc(bench, "R = _Cov(dX)  &L = Chol(R)", t0);
4,961✔
135

136
  // Compute intermediate useful matrices
137
  arma::mat Fystar = LinearAlgebra::solve(m.L, arma::join_rows(m_F, m_y));
9,922✔
138
  t0 = Bench::toc(bench, "Fy* = L \\ [F,y]", t0);
4,961✔
139
  m.Fstar = Fystar.head_cols(p);
4,961✔
140
  m.ystar = Fystar.tail_cols(1);
4,961✔
141

142
  arma::mat Q_qr;
4,961✔
143
  arma::mat R_qr;
4,961✔
144
  arma::qr_econ(Q_qr, R_qr, Fystar);
4,961✔
145
  t0 = Bench::toc(bench, "Q_qr,R_qr = QR(Fy*)", t0);
4,961✔
146

147
  m.Rstar = R_qr.head_cols(p);
4,961✔
148
  m.Qstar = Q_qr.head_cols(p);
4,961✔
149
  m.Estar = Q_qr.tail_cols(1) * R_qr.at(p, p);
9,922✔
150
  m.SSEstar = R_qr.at(p, p) * R_qr.at(p, p);
4,961✔
151

152
  if (m_est_beta) {
4,961✔
153
    m.betahat = LinearAlgebra::solve(m.Rstar, R_qr.tail_cols(1));
4,961✔
154
    t0 = Bench::toc(bench, "^b = R* \\ R_qr[1:p, p+1]", t0);
4,961✔
155
  } else {
156
    m.betahat = arma::vec(p, arma::fill::zeros);  // whatever: not used
×
157
  }
158

159
  return m;
9,922✔
160
}
4,961✔
161

162
// Objective function for fit : -logLikelihood
163

164
double Kriging::_logLikelihood(const arma::vec& _theta,
4,870✔
165
                               arma::vec* grad_out,
166
                               arma::mat* hess_out,
167
                               Kriging::KModel* model,
168
                               std::map<std::string, double>* bench) const {
169
  // arma::cout << " theta: " << _theta << arma::endl;
170

171
  Kriging::KModel m = make_Model(_theta, bench);
4,870✔
172
  if (model != nullptr)
4,870✔
173
    *model = m;
4,806✔
174

175
  arma::uword n = m_X.n_rows;
4,870✔
176

177
  double sigma2;
178
  double ll;
179
  if (m_est_sigma2) {  // DiceKriging: model@case == "LLconcentration_beta_sigma2"
4,870✔
180
    sigma2 = m.SSEstar / n;
4,870✔
181
    ll = -0.5 * (n * log(2 * M_PI * sigma2) + 2 * arma::sum(log(m.L.diag())) + n);
9,740✔
182
  } else {  // DiceKriging: model@case == "LLconcentration_beta"
183
    sigma2 = m_sigma2;
×
184
    ll = -0.5
×
185
         * (n * log(2 * M_PI * sigma2) + 2 * arma::sum(log(m.L.diag()))
×
186
            + as_scalar(LinearAlgebra::crossprod(m.Estar)) / sigma2);
×
187
  }
188

189
  if (grad_out != nullptr) {
4,870✔
190
    arma::uword d = m_X.n_cols;
4,781✔
191
    arma::uword p = m_F.n_cols;
4,781✔
192

193
    auto t0 = Bench::tic();
4,781✔
194
    arma::vec terme1 = arma::vec(d);  // useful if (hess_out != nullptr)
4,781✔
195

196
    if ((m.Linv.memptr() == nullptr) || (arma::size(m.Linv) != arma::size(m.L))) {
4,781✔
197
      m.Linv = LinearAlgebra::solve(m.L, arma::mat(n, n, arma::fill::eye));
4,781✔
198
      t0 = Bench::toc(bench, "L ^-1", t0);
4,781✔
199
    }
200

201
    arma::mat Rinv = LinearAlgebra::crossprod(m.Linv);
4,781✔
202
    t0 = Bench::toc(bench, "R^-1 = t(L^-1) * L^-1", t0);
4,781✔
203

204
    arma::mat x = LinearAlgebra::solve(m.L.t(), m.Estar);
9,562✔
205
    t0 = Bench::toc(bench, "x = tL \\ z", t0);
4,781✔
206

207
    arma::cube gradR = arma::cube(n, n, d, arma::fill::none);
4,781✔
208
    const arma::vec zeros = arma::vec(d, arma::fill::zeros);
4,781✔
209
    for (arma::uword i = 0; i < n; i++) {
197,865✔
210
      gradR.tube(i, i) = zeros;
193,084✔
211
      for (arma::uword j = 0; j < i; j++) {
5,421,487✔
212
        gradR.tube(i, j) = m.R.at(i, j) * _DlnCovDtheta(m_dX.col(i * n + j), _theta);
20,913,612✔
213
        gradR.tube(j, i) = gradR.tube(i, j);
5,228,403✔
214
      }
215
    }
216
    t0 = Bench::toc(bench, "gradR = R * dlnCov(dX)", t0);
4,781✔
217

218
    for (arma::uword k = 0; k < d; k++) {
21,024✔
219
      t0 = Bench::tic();
16,243✔
220
      arma::mat gradR_k = gradR.slice(k);
16,243✔
221
      t0 = Bench::toc(bench, "gradR_k = gradR[k]", t0);
16,243✔
222

223
      // should make a fast function trace_prod(A,B) -> sum_i(sum_j(Ai,j*Bj,i))
224
      terme1.at(k) = as_scalar(x.t() * gradR_k * x) / sigma2;
32,486✔
225
      double terme2 = -arma::trace(Rinv * gradR_k);  //-arma::accu(Rinv % gradR_k_upper)
16,243✔
226
      (*grad_out).at(k) = (terme1.at(k) + terme2) / 2;
16,243✔
227
      t0 = Bench::toc(bench, "grad_ll[k] = xt * gradR_k / S2 + tr(Ri * gradR_k)", t0);
16,243✔
228

229
      if (hess_out != nullptr) {
16,243✔
230
        //' @ref O. Roustant
231
        // for (k in 1:d) {
232
        //   for (l in 1:k) {
233
        //     aux <- grad_R[[k]] %*% Rinv %*% grad_R[[l]]
234
        //     Dkl <- d2_matcor(X, modele_proba$covariance, R, grad_logR, k,l)
235
        //     xk <- backsolve(t(T),grad_R[[k]]%*%x, upper.tri=FALSE)
236
        //     xl <- backsolve(t(T),grad_R[[l]]%*%x, upper.tri=FALSE)
237
        //
238
        //     hess_A <- - (t(xk) %*% H %*% xl) / sigma2_hat
239
        //     hess_B <- (t(x) %*% ( -Dkl+2*aux ) %*% x) / sigma2_hat
240
        //     hess_C <- - grad_A[k] * grad_A[l] / n
241
        //     hess_D <- - sum(diag( Rinv %*% aux ))
242
        //     hess_E <- sum(diag( Rinv %*% Dkl ))
243
        //
244
        //     hess_log_vrais[k,l] <- 2*hess_A + hess_B + hess_C + hess_D + hess_E
245
        //     hess_log_vrais[l,k] <- hess_log_vrais[k,l]
246
        //   }
247
        // }
248
        t0 = Bench::tic();
×
249
        arma::mat H = LinearAlgebra::tcrossprod(m.Qstar);
×
250
        t0 = Bench::toc(bench, "H =  Q* * t(Q*)", t0);
×
251

252
        for (arma::uword l = 0; l <= k; l++) {
×
253
          arma::mat gradR_l = gradR.slice(l);  // arma::mat(n, n);
×
254

255
          t0 = Bench::tic();
×
256
          arma::mat aux = gradR_k * Rinv * gradR_l;
×
257
          t0 = Bench::toc(bench, "aux =  gradR[k] * Ri * gradR[l]", t0);
×
258

259
          arma::mat hessR_k_l = arma::mat(n, n, arma::fill::none);
×
260
          if (k == l) {
×
261
            for (arma::uword i = 0; i < n; i++) {
×
262
              hessR_k_l.at(i, i) = 0;
×
263
              for (arma::uword j = 0; j < i; j++) {
×
264
                double dln_k = gradR_k.at(i, j);
×
265
                hessR_k_l.at(i, j) = hessR_k_l.at(j, i)
×
266
                    = dln_k * (dln_k / m.R.at(i, j) - (_Cov_pow + 1) / _theta.at(k));
×
267
                // !! NO: it just work for exp type kernels. Matern MUST have a special treatment !!!
268
              }
269
            }
270
          } else {
271
            for (arma::uword i = 0; i < n; i++) {
×
272
              hessR_k_l.at(i, i) = 0;
×
273
              for (arma::uword j = 0; j < i; j++) {
×
274
                hessR_k_l.at(i, j) = hessR_k_l.at(j, i) = gradR_k.at(i, j) * gradR_l.at(i, j) / m.R.at(i, j);
×
275
                //= gradR.slice(i).col(j)[k] * gradR.slice(i).col(j)[l] / m.R.at(i, j);
276
              }
277
            }
278
          }
279
          t0 = Bench::toc(bench, "hessR_k_l = ...", t0);
×
280

281
          // arma::mat xk =LinearAlgebra::solve(m.T, gradsR[k] * x);
282
          arma::mat xk = m.Linv * gradR_k * x;
×
283
          t0 = Bench::toc(bench, "xk = L \\ gradR[k] * x", t0);
×
284
          arma::mat xl;
×
285
          if (k == l)
×
286
            xl = xk;
×
287
          else
288
            xl = m.Linv * gradR_l * x;
×
289
          t0 = Bench::toc(bench, "xl = L \\ gradR[l] * x", t0);
×
290

291
          // arma::cout << " hess_A:" << -xk.t() * H * xl / sigma2 << arma::endl;
292
          // arma::cout << " hess_B:" << -x.t() * (hessR_k_l - 2*aux) * x / sigma2 << arma::endl;
293
          // arma::cout << " hess_C:" << -terme1.at(k) * terme1.at(l) / n << arma::endl;
294
          // arma::cout << " hess_D:" << -arma::trace(Rinv * aux)  << arma::endl;
295
          // arma::cout << " hess_E:" << arma::trace(Rinv * hessR_k_l) << arma::endl;
296

297
          double h_lk
298
              = (2.0 * xk.t() * H * xl / (sigma2) + x.t() * (hessR_k_l - 2 * aux) * x / (sigma2)
×
299
                 + arma::trace(Rinv * aux) + -arma::trace(Rinv * hessR_k_l))[0];  // should optim there using accu  &%
×
300
          if (m_est_sigma2)
×
301
            h_lk += terme1.at(k) * terme1.at(l) / n;
×
302

303
          (*hess_out).at(l, k) = (*hess_out).at(k, l) = h_lk / 2;
×
304
          t0 = Bench::toc(bench, "hess_ll[l,k] = ...", t0);
×
305
        }
×
306
      }
×
307
    }
16,243✔
308
  }
4,781✔
309
  return ll;
4,870✔
310
}
4,870✔
311

312
LIBKRIGING_EXPORT std::tuple<double, arma::vec, arma::mat> Kriging::logLikelihoodFun(const arma::vec& _theta,
64✔
313
                                                                                     const bool _grad,
314
                                                                                     const bool _hess,
315
                                                                                     const bool _bench) {
316
  double ll = -1;
64✔
317
  arma::vec grad;
64✔
318
  arma::mat hess;
64✔
319

320
  if (_bench) {
64✔
321
    std::map<std::string, double> bench;
×
322
    if (_grad || _hess) {
×
323
      grad = arma::vec(_theta.n_elem);
×
324
      if (!_hess) {
×
325
        ll = _logLikelihood(_theta, &grad, nullptr, nullptr, &bench);
×
326
      } else {
327
        hess = arma::mat(_theta.n_elem, _theta.n_elem, arma::fill::none);
×
328
        ll = _logLikelihood(_theta, &grad, &hess, nullptr, &bench);
×
329
      }
330
    } else
331
      ll = _logLikelihood(_theta, nullptr, nullptr, nullptr, &bench);
×
332

333
    size_t num = 0;
×
334
    for (auto& kv : bench)
×
335
      num = std::max(kv.first.size(), num);
×
336
    for (auto& kv : bench)
×
337
      arma::cout << "| " << Bench::pad(kv.first, num, ' ') << " | " << kv.second << " |" << arma::endl;
×
338

339
  } else {
×
340
    if (_grad || _hess) {
64✔
341
      grad = arma::vec(_theta.n_elem);
64✔
342
      if (!_hess) {
64✔
343
        ll = _logLikelihood(_theta, &grad, nullptr, nullptr, nullptr);
64✔
344
      } else {
345
        hess = arma::mat(_theta.n_elem, _theta.n_elem, arma::fill::none);
×
346
        ll = _logLikelihood(_theta, &grad, &hess, nullptr, nullptr);
×
347
      }
348
    } else
349
      ll = _logLikelihood(_theta, nullptr, nullptr, nullptr, nullptr);
×
350
  }
351

352
  return std::make_tuple(ll, std::move(grad), std::move(hess));
128✔
353
}
64✔
354

355
// Objective function for fit : -LOO
356

357
double Kriging::_leaveOneOut(const arma::vec& _theta,
2✔
358
                             arma::vec* grad_out,
359
                             arma::mat* yhat_out,
360
                             Kriging::KModel* model,
361
                             std::map<std::string, double>* bench) const {
362
  // arma::cout << " theta: " << _theta << arma::endl;
363
  //' @ref https://github.com/DiceKrigingClub/DiceKriging/blob/master/R/leaveOneOutFun.R
364
  // model@covariance <- vect2covparam(model@covariance, param)
365
  // model@covariance@sd2 <- 1                # to get the correlation matrix
366
  //
367
  // R <- covMatrix(model@covariance, model@X)[[1]]
368
  // T <- chol(R)
369
  //
370
  // M <- backsolve(t(T), model@F, upper.tri = FALSE)
371
  //
372
  // Rinv <- chol2inv(T)             # cost : n*n*n/3
373
  //...
374
  //  Rinv.F <- Rinv %*% (model@F)    # cost : 2*n*n*p
375
  //  T.M <- chol(crossprod(M))       # cost : p*p*p/3, neglected
376
  //  aux <- backsolve(t(T.M), t(Rinv.F), upper.tri=FALSE)   # cost : p*p*n, neglected
377
  //  Q <- Rinv - crossprod(aux)      # cost : 2*n*n*(p-1/2)
378
  //  Q.y <- Q %*% (model@y)          # cost : 2*n*n
379
  //  ## Remark:   Q <- Cinv - Cinv.F %*% solve(t(M)%*%M) %*% t(Cinv.F)
380
  //...
381
  // sigma2LOO <- 1/diag(Q)
382
  // errorsLOO <- sigma2LOO * (Q.y)       # cost : n, neglected
383
  //
384
  // LOOfun <- as.numeric(crossprod(errorsLOO)/model@n)
385

386
  // arma::cout << " theta: " << _theta << arma::endl;
387
  Kriging::KModel m = make_Model(_theta, bench);
2✔
388
  if (model != nullptr)
2✔
389
    *model = m;
×
390

391
  arma::uword n = m_X.n_rows;
2✔
392

393
  auto t0 = Bench::tic();
2✔
394
  if ((m.Linv.memptr() == nullptr) || (arma::size(m.Linv) != arma::size(m.L))) {
2✔
395
    m.Linv = LinearAlgebra::solve(m.L, arma::mat(n, n, arma::fill::eye));
2✔
396
    t0 = Bench::toc(bench, "L ^-1", t0);
2✔
397
  }
398
  arma::mat By = m.Linv.t() * m.Estar;
4✔
399
  t0 = Bench::toc(bench, "By = L^-1 * E*", t0);
2✔
400
  arma::mat A = m.Qstar.t() * m.Linv;
4✔
401
  t0 = Bench::toc(bench, "A = Q* * L^-1", t0);
2✔
402
  arma::mat B = LinearAlgebra::crossprod(m.Linv) - LinearAlgebra::crossprod(A);
6✔
403
  t0 = Bench::toc(bench, "B = t(L^-1) * L^-1 - t(A) * A", t0);
2✔
404

405
  arma::vec sigma2LOO = 1 / B.diag();
4✔
406
  t0 = Bench::toc(bench, "S2l = 1 / diag(Q)", t0);
2✔
407

408
  arma::vec errorsLOO = sigma2LOO % By;
2✔
409
  t0 = Bench::toc(bench, "E = S2l * Qy", t0);
2✔
410

411
  double loo = arma::accu(errorsLOO % errorsLOO) / n;
2✔
412
  t0 = Bench::toc(bench, "loo = Acc(E * E) / n", t0);
2✔
413

414
  if (yhat_out != nullptr) {
2✔
415
    (*yhat_out).col(0) = m_y - errorsLOO;
×
416
    (*yhat_out).col(1) = arma::sqrt(sigma2LOO);
×
417
  }
418

419
  if (grad_out != nullptr) {
2✔
420
    //' @ref https://github.com/cran/DiceKriging/blob/master/R/leaveOneOutGrad.R
421
    // leaveOneOutDer <- matrix(0, nparam, 1)
422
    // for (k in 1:nparam) {
423
    //        gradR.k <- covMatrixDerivative(model@covariance, X=model@X, C0=R, k=k)
424
    //        diagdQ <- - diagABA(A=Q, B=gradR.k)
425
    //        dsigma2LOO <- - (sigma2LOO^2) * diagdQ
426
    //        derrorsLOO <- dsigma2LOO * Q.y - sigma2LOO * (Q%*%(gradR.k%*%Q.y))
427
    //        leaveOneOutDer[k] <- 2*crossprod(errorsLOO, derrorsLOO)/model@n
428
    //}
429

430
    arma::uword d = m_X.n_cols;
2✔
431

432
    t0 = Bench::tic();
2✔
433
    arma::cube gradR = arma::cube(n, n, d, arma::fill::none);
2✔
434
    const arma::vec zeros = arma::vec(d, arma::fill::zeros);
2✔
435
    for (arma::uword i = 0; i < n; i++) {
12✔
436
      gradR.tube(i, i) = zeros;
10✔
437
      for (arma::uword j = 0; j < i; j++) {
30✔
438
        gradR.tube(i, j) = m.R.at(i, j) * _DlnCovDtheta(m_dX.col(i * n + j), _theta);
80✔
439
        gradR.tube(j, i) = gradR.tube(i, j);
20✔
440
      }
441
    }
442
    t0 = Bench::toc(bench, "gradR = R * dlnCov(dX)", t0);
2✔
443

444
    for (arma::uword k = 0; k < m_X.n_cols; k++) {
4✔
445
      t0 = Bench::tic();
2✔
446
      arma::mat gradR_k = gradR.slice(k);
2✔
447
      t0 = Bench::toc(bench, "gradR_k = gradR[k]", t0);
2✔
448

449
      arma::vec diagdB = -LinearAlgebra::diagABA(B, gradR_k);
6✔
450
      t0 = Bench::toc(bench, "diagdQ = DiagABA(B, gradR_k)", t0);
2✔
451

452
      arma::vec dsigma2LOO = -sigma2LOO % sigma2LOO % diagdB;
4✔
453
      t0 = Bench::toc(bench, "dS2l = -S2l % S2l % diagdQ", t0);
2✔
454

455
      arma::vec derrorsLOO = dsigma2LOO % By - sigma2LOO % (B * (gradR_k * By));
6✔
456
      t0 = Bench::toc(bench, "dE = dS2l * By- S2l * (B * gradR_k * By)", t0);
2✔
457

458
      (*grad_out)(k) = 2 * dot(errorsLOO, derrorsLOO) / n;
2✔
459
      t0 = Bench::toc(bench, "grad_loo[k] = E * dE / n", t0);
2✔
460
    }
2✔
461
  }
2✔
462
  return loo;
2✔
463
}
2✔
464

465
LIBKRIGING_EXPORT std::tuple<double, arma::vec> Kriging::leaveOneOutFun(const arma::vec& _theta,
2✔
466
                                                                        const bool _grad,
467
                                                                        const bool _bench) {
468
  double loo = -1;
2✔
469
  arma::vec grad;
2✔
470

471
  if (_bench) {
2✔
472
    std::map<std::string, double> bench;
×
473
    if (_grad) {
×
474
      grad = arma::vec(_theta.n_elem);
×
475
      loo = _leaveOneOut(_theta, &grad, nullptr, nullptr, &bench);
×
476
    } else
477
      loo = _leaveOneOut(_theta, nullptr, nullptr, nullptr, &bench);
×
478

479
    size_t num = 0;
×
480
    for (auto& kv : bench)
×
481
      num = std::max(kv.first.size(), num);
×
482
    for (auto& kv : bench)
×
483
      arma::cout << "| " << Bench::pad(kv.first, num, ' ') << " | " << kv.second << " |" << arma::endl;
×
484

485
  } else {
×
486
    if (_grad) {
2✔
487
      grad = arma::vec(_theta.n_elem);
2✔
488
      loo = _leaveOneOut(_theta, &grad, nullptr, nullptr, nullptr);
2✔
489
    } else
490
      loo = _leaveOneOut(_theta, nullptr, nullptr, nullptr, nullptr);
×
491
  }
492

493
  return std::make_tuple(loo, std::move(grad));
4✔
494
}
2✔
495

496
LIBKRIGING_EXPORT std::tuple<arma::vec, arma::vec> Kriging::leaveOneOutVec(const arma::vec& _theta) {
×
497
  double loo = -1;
×
498
  arma::mat yhat = arma::mat(m_y.n_elem, 2, arma::fill::none);
×
499
  loo = _leaveOneOut(_theta, nullptr, &yhat, nullptr, nullptr);
×
500

501
  return std::make_tuple(std::move(yhat.col(0)), std::move(yhat.col(1) * std::sqrt(m_sigma2)));
×
502
}
×
503

504
// Objective function for fit: bayesian-like approach fromm RobustGaSP
505

506
double Kriging::_logMargPost(const arma::vec& _theta,
×
507
                             arma::vec* grad_out,
508
                             Kriging::KModel* model,
509
                             std::map<std::string, double>* bench) const {
510
  // arma::cout << " theta: " << _theta << arma::endl;
511

512
  // In RobustGaSP:
513
  // neg_log_marginal_post_approx_ref <- function(param,nugget,
514
  // nugget.est,R0,X,zero_mean,output,CL,a,b,kernel_type,alpha) {
515
  //  lml=log_marginal_lik(param,nugget,nugget.est,R0,X,zero_mean,output,kernel_type,alpha);
516
  //  lp=log_approx_ref_prior(param,nugget,nugget.est,CL,a,b);
517
  //  -(lml+lp)
518
  //}
519
  // double log_marginal_lik(const Vec param,double nugget, const bool nugget_est, const List R0, const
520
  // Eigen::Map<Eigen::MatrixXd>  &X,const String zero_mean,const Eigen::Map<Eigen::MatrixXd>  &output, Eigen::VectorXi
521
  // kernel_type,const Eigen::VectorXd alpha ){
522
  //  double nu=nugget;
523
  //  int param_size=param.size();
524
  //  Eigen::VectorXd beta= param.array().exp().matrix();
525
  //  ...beta
526
  //  R=R+nu*MatrixXd::Identity(num_obs,num_obs);  //not sure
527
  //
528
  //  LLT<MatrixXd> lltOfR(R);             // compute the cholesky decomposition of R called lltofR
529
  //  MatrixXd L = lltOfR.matrixL();   //retrieve factor L  in the decomposition
530
  //
531
  //  if(zero_mean=="Yes"){...}else{
532
  //
533
  //  int q=X.cols();
534
  //
535
  //  MatrixXd Rinv_X=L.transpose().triangularView<Upper>().solve(L.triangularView<Lower>().solve(X)); //one forward
536
  //  and one backward to compute R.inv%*%X MatrixXd Xt_Rinv_X=X.transpose()*Rinv_X; //Xt%*%R.inv%*%X
537
  //
538
  //  LLT<MatrixXd> lltOfXRinvX(Xt_Rinv_X); // cholesky decomposition of Xt_Rinv_X called lltOfXRinvX
539
  //  MatrixXd LX = lltOfXRinvX.matrixL();  //  retrieve factor LX  in the decomposition
540
  //  MatrixXd Rinv_X_Xt_Rinv_X_inv_Xt_Rinv=
541
  //  Rinv_X*(LX.transpose().triangularView<Upper>().solve(LX.triangularView<Lower>().solve(Rinv_X.transpose())));
542
  //  //compute  Rinv_X_Xt_Rinv_X_inv_Xt_Rinv through one forward and one backward solve MatrixXd yt_Rinv=
543
  //  (L.transpose().triangularView<Upper>().solve(L.triangularView<Lower>().solve(output))).transpose(); MatrixXd S_2=
544
  //  (yt_Rinv*output-output.transpose()*Rinv_X_Xt_Rinv_X_inv_Xt_Rinv*output); double log_S_2=log(S_2(0,0)); return
545
  //  (-(L.diagonal().array().log().matrix().sum())-(LX.diagonal().array().log().matrix().sum())-(num_obs-q)/2.0*log_S_2);
546
  //  }
547
  //}
548
  // double log_approx_ref_prior(const Vec param,double nugget, bool nugget_est, const Eigen::VectorXd CL,const double
549
  // a,const double b ){
550
  //  double nu=nugget;
551
  //  int param_size=param.size();beta
552
  //  Eigen::VectorX beta= param.array().exp().matrix();
553
  //  ...
554
  //  double t=CL.cwiseProduct(beta).sum()+nu;
555
  //  return -b*t + a*log(t);
556
  //}
557

558
  Kriging::KModel m = make_Model(_theta, bench);
×
559
  if (model != nullptr)
×
560
    *model = m;
×
561

562
  arma::uword n = m_X.n_rows;
×
563
  arma::uword d = m_X.n_cols;
×
564
  arma::uword p = m_F.n_cols;
×
565

566
  // RobustGaSP naming...
567
  // arma::mat X = m_F;
568
  // arma::mat L = fd->T;
569

570
  auto t0 = Bench::tic();
×
571
  // m.Fstar : fd->M = solve(L, X, LinearAlgebra::default_solve_opts);
572

573
  // arma::mat Rinv_X = solve(trans(L), fd->M, LinearAlgebra::default_solve_opts);
574
  arma::mat Rinv_X = LinearAlgebra::solve(m.L.t(), m.Fstar);
×
575

576
  // arma::mat Xt_Rinv_X = trans(X) * Rinv_X;  // Xt%*%R.inv%*%X
577
  arma::mat Xt_Rinv_X = m_F.t() * Rinv_X;
×
578

579
  // arma::mat LX = chol(Xt_Rinv_X, "lower");  //  retrieve factor LX  in the decomposition
580
  arma::mat LX = LinearAlgebra::safe_chol_lower(Xt_Rinv_X);
×
581

582
  // arma::mat Rinv_X_Xt_Rinv_X_inv_Xt_Rinv
583
  //     = Rinv_X
584
  //       * (solve(trans(LX),
585
  //                solve(LX, trans(Rinv_X), LinearAlgebra::default_solve_opts),
586
  //                LinearAlgebra::default_solve_opts));  // compute  Rinv_X_Xt_Rinv_X_inv_Xt_Rinv through one forward
587
  arma::mat Rinv_X_Xt_Rinv_X_inv_Xt_Rinv
588
      = Rinv_X
589
        * (LinearAlgebra::solve(
×
590
            trans(LX),
×
591
            LinearAlgebra::solve(LX, trans(Rinv_X))));  // compute  Rinv_X_Xt_Rinv_X_inv_Xt_Rinv through one forward
×
592

593
  arma::mat yt_Rinv = trans(solve(trans(m.L), m.ystar));
×
594
  t0 = Bench::toc(bench, "YtRi = Yt \\ Tt", t0);
×
595

596
  arma::mat S_2 = (yt_Rinv * m_y - trans(m_y) * Rinv_X_Xt_Rinv_X_inv_Xt_Rinv * m_y);
×
597
  t0 = Bench::toc(bench, "S2 = YtRi * y - yt * RiFFtRiFiFtRi * y", t0);
×
598

599
  double sigma2;
600
  if (m_est_sigma2) {
×
601
    sigma2 = S_2(0, 0) / (n - p);
×
602
  } else {
603
    sigma2 = m_sigma2;
×
604
  }
605
  double log_S_2 = log(sigma2 * (n - p));
×
606

607
  double log_marginal_lik = -sum(log(m.L.diag())) - sum(log(LX.diag())) - (n - p) / 2.0 * log_S_2;
×
608
  t0 = Bench::toc(bench, "lml = -Sum(log(diag(T))) - Sum(log(diag(TF)))...", t0);
×
609
  // arma::cout << " log_marginal_lik:" << log_marginal_lik << arma::endl;
610

611
  // Default prior params
612
  double a = 0.2;
×
613
  double b = 1.0 / pow(n, 1.0 / d) * (a + d);
×
614
  // t0 = Bench::toc(bench,"b             ", t0);
615

616
  arma::vec CL = trans(max(m_X, 0) - min(m_X, 0)) / pow(n, 1.0 / d);
×
617
  t0 = Bench::toc(bench, "CL = (max(X) - min(X)) / n^1/d", t0);
×
618

619
  double t = arma::accu(CL / _theta);
×
620
  // arma::cout << " a:" << a << arma::endl;
621
  // arma::cout << " b:" << b << arma::endl;
622
  // arma::cout << " t:" << t << arma::endl;
623

624
  double log_approx_ref_prior = -b * t + a * log(t);
×
625
  // arma::cout << " log_approx_ref_prior:" << log_approx_ref_prior << arma::endl;
626

627
  if (grad_out != nullptr) {
×
628
    // Eigen::VectorXd log_marginal_lik_deriv(const Eigen::VectorXd param,double nugget,  bool nugget_est, const List
629
    // R0, const Eigen::Map<Eigen::MatrixXd>  &X,const String zero_mean,const Eigen::Map<Eigen::MatrixXd>  &output,
630
    // Eigen::VectorXi kernel_type,const Eigen::VectorXd alpha){
631
    // ...
632
    // VectorXd ans=VectorXd::Ones(param_size);
633
    // ...
634
    // MatrixXd Q_output= yt_Rinv.transpose()-Rinv_X_Xt_Rinv_X_inv_Xt_Rinv*output;
635
    // MatrixXd dev_R_i;
636
    // MatrixXd Wb_ti;
637
    // //allow different choices of kernels
638
    //
639
    // for(int ti=0;ti<p;ti++){
640
    //   //kernel_type_ti=kernel_type[ti];
641
    //   if(kernel_type[ti]==3){
642
    //     dev_R_i=matern_5_2_deriv( R0[ti],R_ori,beta[ti]);  //now here I have R_ori instead of R
643
    //   }else {...}
644
    //   Wb_ti=(L.transpose().triangularView<Upper>().solve(L.triangularView<Lower>().solve(dev_R_i))).transpose()-dev_R_i*Rinv_X_Xt_Rinv_X_inv_Xt_Rinv;
645
    //   ans[ti]=-0.5*Wb_ti.diagonal().sum()+(num_obs-q)/2.0*(output.transpose()*Wb_ti.transpose()*Q_output/S_2(0,0))(0,0);
646
    // }
647

648
    if (m_est_sigma2) {
×
649
      t0 = Bench::tic();
×
650
      arma::vec ans = arma::vec(d, arma::fill::none);
×
651
      arma::mat Q_output = trans(yt_Rinv) - Rinv_X_Xt_Rinv_X_inv_Xt_Rinv * m_y;
×
652
      t0 = Bench::toc(bench, "Qo = YtRi - RiFFtRiFiFtRi * y", t0);
×
653

654
      arma::cube gradR = arma::cube(n, n, d, arma::fill::zeros);
×
655
      // const arma::vec zeros = arma::vec(d,arma::fill::zeros);
656
      for (arma::uword i = 0; i < n; i++) {
×
657
        // gradR.tube(i, i) = zeros;
658
        for (arma::uword j = 0; j < i; j++) {
×
659
          gradR.tube(i, j) = m.R.at(i, j) * _DlnCovDtheta(m_dX.col(i * n + j), _theta);
×
660
          gradR.tube(j, i) = gradR.tube(i, j);
×
661
        }
662
      }
663
      t0 = Bench::toc(bench, "gradR = R * dlnCov(dX)", t0);
×
664

665
      arma::mat Wb_k;
×
666
      for (arma::uword k = 0; k < d; k++) {
×
667
        t0 = Bench::tic();
×
668
        arma::mat gradR_k = gradR.slice(k);
×
669
        t0 = Bench::toc(bench, "gradR_k = gradR[k]", t0);
×
670

671
        Wb_k = trans(LinearAlgebra::solve(trans(m.L), LinearAlgebra::solve(m.L, gradR_k)))
×
672
               - gradR_k * Rinv_X_Xt_Rinv_X_inv_Xt_Rinv;
×
673
        t0 = Bench::toc(bench, "Wb_k = gradR_k \\ L \\ Tt - gradR_k * RiFFtRiFiFtRi", t0);
×
674

675
        ans[k] = -sum(Wb_k.diag()) / 2.0 + as_scalar(trans(m_y) * trans(Wb_k) * Q_output) / (2.0 * sigma2);
×
676
        t0 = Bench::toc(bench, "ans[k] = Sum(diag(Wb_k)) + yt * Wb_kt * Qo / S2...", t0);
×
677
      }
×
678
      // arma::cout << " log_marginal_lik_deriv:" << ans << arma::endl;
679
      // arma::cout << " log_approx_ref_prior_deriv:" <<  - (a * CL / t - b * CL) / pow(_theta, 2.0) << arma::endl;
680

681
      *grad_out = ans - (a * CL / t - b * CL) / square(_theta);
×
682
      // t0 = Bench::toc(bench," grad_out     ", t0);
683
    } else {  // TODO: we do not have (yet) formula when sigma2 is fixed... :(
×
684
      *grad_out = arma::vec(d, arma::fill::zeros);
×
685
      double _eps = 1e-6;
×
686
      for (arma::uword k = 0; k < d; k++) {
×
687
        arma::vec theta_eps = _theta;
×
688
        theta_eps[k] += _eps;
×
689
        (*grad_out)[k]
×
690
            = (_logMargPost(theta_eps, nullptr, nullptr, nullptr) - (log_marginal_lik + log_approx_ref_prior)) / _eps;
×
691
      }
×
692
    }
693
    // arma::cout << " grad_out:" << *grad_out << arma::endl;
694
  }
695

696
  // arma::cout << " lmp:" << (log_marginal_lik+log_approx_ref_prior) << arma::endl;
697
  return (log_marginal_lik + log_approx_ref_prior);
×
698
}
×
699

700
LIBKRIGING_EXPORT std::tuple<double, arma::vec> Kriging::logMargPostFun(const arma::vec& _theta,
×
701
                                                                        const bool _grad,
702
                                                                        const bool _bench) {
703
  double lmp = -1;
×
704
  arma::vec grad;
×
705

706
  if (_bench) {
×
707
    std::map<std::string, double> bench;
×
708
    if (_grad) {
×
709
      grad = arma::vec(_theta.n_elem);
×
710
      lmp = _logMargPost(_theta, &grad, nullptr, &bench);
×
711
    } else
712
      lmp = _logMargPost(_theta, nullptr, nullptr, &bench);
×
713

714
    size_t num = 0;
×
715
    for (auto& kv : bench)
×
716
      num = std::max(kv.first.size(), num);
×
717
    for (auto& kv : bench) {
×
718
      arma::cout << "| " << Bench::pad(kv.first, num, ' ') << " | " << kv.second << " |" << arma::endl;
×
719
    }
720

721
  } else {
×
722
    if (_grad) {
×
723
      grad = arma::vec(_theta.n_elem);
×
724
      lmp = _logMargPost(_theta, &grad, nullptr, nullptr);
×
725
    } else
726
      lmp = _logMargPost(_theta, nullptr, nullptr, nullptr);
×
727
  }
728

729
  return std::make_tuple(lmp, std::move(grad));
×
730
}
×
731

732
LIBKRIGING_EXPORT double Kriging::logLikelihood() {
×
733
  return std::get<0>(Kriging::logLikelihoodFun(m_theta, false, false, false));
×
734
}
735

736
LIBKRIGING_EXPORT double Kriging::leaveOneOut() {
×
737
  return std::get<0>(Kriging::leaveOneOutFun(m_theta, false, false));
×
738
}
739

740
LIBKRIGING_EXPORT double Kriging::logMargPost() {
×
741
  return std::get<0>(Kriging::logMargPostFun(m_theta, false, false));
×
742
}
743

744
double optim_newton(std::function<double(arma::vec& x, arma::vec* grad_out, arma::mat* hess_out)> f,
×
745
                    arma::vec& x_0,
746
                    const arma::vec& x_lower,
747
                    const arma::vec& x_upper) {
748
  if (Optim::log_level > 0) {
×
749
    arma::cout << "x_0: " << x_0 << " ";
×
750
  }
751

752
  double delta = 0.1;
×
753
  double x_tol = 0.01;  // Optim::solution_rel_tolerance;
×
754
  double y_tol = Optim::objective_rel_tolerance;
×
755
  double g_tol = Optim::gradient_tolerance;
×
756
  int max_iteration = Optim::max_iteration;
×
757

758
  arma::vec x_previous(x_0.n_elem);
×
759
  arma::vec x_best(x_0.n_elem);
×
760
  double f_previous = std::numeric_limits<double>::infinity();
×
761
  double f_new = std::numeric_limits<double>::infinity();
×
762
  double f_best = std::numeric_limits<double>::infinity();
×
763

764
  arma::vec x = x_0;
×
765
  arma::vec grad(x.n_elem);
×
766
  arma::mat hess(x.n_elem, x.n_elem);
×
767
  int i = 0;
×
768
  while (i < max_iteration) {
×
769
    if (Optim::log_level > 0) {
×
770
      arma::cout << "iteration: " << i << arma::endl;
×
771
      arma::cout << "  x: " << x << arma::endl;
×
772
    }
773

774
    f_previous = f_new;
×
775
    f_new = f(x, &grad, &hess);
×
776
    if (Optim::log_level > 1) {
×
777
      arma::cout << "    f_new: " << f_new << arma::endl;
×
778
    }
779
    if (f_best > f_new) {
×
780
      f_best = f_new;
×
781
      x_best = x;
×
782
    }
783

784
    if (std::abs((f_new - f_previous) / f_previous) < y_tol) {
×
785
      if (Optim::log_level > 0) {
×
786
        arma::cout << "  X f_new ~ f_previous" << arma::endl;
×
787
      }
788
      break;
×
789
    }
790
    if (arma::abs(grad).max() < g_tol) {
×
791
      if (Optim::log_level > 0) {
×
792
        arma::cout << "  X grad ~ 0" << arma::endl;
×
793
      }
794
      break;
×
795
    }
796

797
    arma::vec delta_x(x.n_elem);
×
798
    if (Optim::log_level > 2) {
×
799
      arma::cout << "  eig(hess)" << arma::eig_sym(hess) << arma::endl;
×
800
    }
801
    if (arma::all(arma::eig_sym(hess) > 0)) {
×
802
      // try to fit a second order polynom to use its minimizer. Otherwise, just iterate with conjugate gradient
803
      // arma::cout << "!";
804
      delta_x = arma::solve(hess, grad, arma::solve_opts::likely_sympd);
×
805
    } else {
806
      delta_x = delta * grad / std::sqrt(arma::sum(arma::square(grad)));
×
807
    }
808
    if (Optim::log_level > 2) {
×
809
      arma::cout << "  delta_x: " << delta_x << arma::endl;
×
810
    }
811

812
    arma::vec x_next = x - delta_x;
×
813
    if (Optim::log_level > 1) {
×
814
      arma::cout << "  x_next: " << x_next << arma::endl;
×
815
    }
816

817
    for (arma::uword j = 0; j < x_next.n_elem; j++) {
×
818
      if (x_next[j] < x_lower[j]) {
×
819
        if (Optim::log_level > 2) {
×
820
          arma::cout << "    <" << x_lower[j] << arma::endl;
×
821
        }
822
        delta_x = delta_x * (x[j] - x_lower[j]) / (x[j] - x_next[j]) / 2;
×
823
        x_next = x - delta_x;
×
824
      }
825
      if (x_next[j] > x_upper[j]) {
×
826
        if (Optim::log_level > 2) {
×
827
          arma::cout << "    >" << x_upper[j] << arma::endl;
×
828
        }
829
        delta_x = delta_x * (x_upper[j] - x[j]) / (x_next[j] - x[j]) / 2;
×
830
        x_next = x - delta_x;
×
831
      }
832
    }
833
    if (Optim::log_level > 2) {
×
834
      arma::cout << "    delta_x: " << delta << arma::endl;
×
835
    }
836
    if (Optim::log_level > 2) {
×
837
      arma::cout << "    x_next: " << x_next << arma::endl;
×
838
    }
839

840
    if (arma::abs((x - x_next) / x).max() < x_tol) {
×
841
      if (Optim::log_level > 1) {
×
842
        arma::cout << "  X x_0 ~ x_next" << arma::endl;
×
843
      }
844
      break;
×
845
    }
846

847
    x_previous = x;
×
848
    x = x_next;
×
849

850
    // TODO : keep best result instead of last one
851
    ++i;
×
852
    if (Optim::log_level > 0) {
×
853
      arma::cout << "  f_best: " << f_best << arma::endl;
×
854
    }
855
  }
×
856

857
  x_0 = x_best;
×
858
  if (Optim::log_level > 0) {
×
859
    arma::cout << " " << x_0 << " " << f_best << arma::endl;
×
860
  }
861

862
  return f_best;
×
863
}
×
864

865
/** Fit the kriging object on (X,y):
866
 * @param y is n length column vector of output
867
 * @param X is n*d matrix of input
868
 * @param regmodel is the regression model to be used for the GP mean (choice between contant, linear, quadratic)
869
 * @param normalize is a boolean to enforce inputs/output normalization
870
 * @param optim is an optimizer name from OptimLib, or 'none' to keep parameters unchanged
871
 * @param objective is 'LOO' or 'LL'. Ignored if optim=='none'.
872
 * @param parameters starting values for hyper-parameters for optim, or final values if optim=='none'.
873
 */
874
LIBKRIGING_EXPORT void Kriging::fit(const arma::vec& y,
89✔
875
                                    const arma::mat& X,
876
                                    const Trend::RegressionModel& regmodel,
877
                                    bool normalize,
878
                                    const std::string& optim,
879
                                    const std::string& objective,
880
                                    const Parameters& parameters) {
881
  const arma::uword n = X.n_rows;
89✔
882
  const arma::uword d = X.n_cols;
89✔
883

884
  std::function<double(const arma::vec& _gamma, arma::vec* grad_out, arma::mat* hess_out, Kriging::KModel* km_data)>
885
      fit_ofn;
89✔
886
  m_optim = optim;
89✔
887
  m_objective = objective;
89✔
888
  if (objective.compare("LL") == 0) {
89✔
889
    if (Optim::reparametrize) {
89✔
890
      fit_ofn = CacheFunction(
178✔
891
          [this](const arma::vec& _gamma, arma::vec* grad_out, arma::mat* hess_out, Kriging::KModel* km_data) {
9,612✔
892
            // Change variable for opt: . -> 1/exp(.)
893
            // DEBUG: if (Optim::log_level>3) arma::cout << "> gamma: " << _gamma << arma::endl;
894
            const arma::vec _theta = Optim::reparam_from(_gamma);
4,806✔
895
            // DEBUG: if (Optim::log_level>3) arma::cout << "> theta: " << _theta << arma::endl;
896
            double ll = this->_logLikelihood(_theta, grad_out, hess_out, km_data, nullptr);
4,806✔
897
            // DEBUG: if (Optim::log_level>3) arma::cout << "  > ll: " << ll << arma::endl;
898
            if (grad_out != nullptr) {
4,806✔
899
              // DEBUG: if (Optim::log_level>3) arma::cout << "  > grad ll: " << grad_out << arma::endl;
900
              *grad_out = -Optim::reparam_from_deriv(_theta, *grad_out);
9,434✔
901
            }
902
            if (hess_out != nullptr) {
4,806✔
903
              // DEBUG: if (Optim::log_level>3) arma::cout << "  > hess ll: " << hess_out << arma::endl;
904
              *hess_out = -Optim::reparam_from_deriv2(_theta, *grad_out, *hess_out);
×
905
            }
906
            return -ll;
4,806✔
907
          });
4,895✔
908
    } else {
909
      fit_ofn = CacheFunction(
×
910
          [this](const arma::vec& _gamma, arma::vec* grad_out, arma::mat* hess_out, Kriging::KModel* km_data) {
×
911
            const arma::vec _theta = _gamma;
×
912
            // DEBUG: if (Optim::log_level>3) arma::cout << "> theta: " << _theta << arma::endl;
913
            double ll = this->_logLikelihood(_theta, grad_out, hess_out, km_data, nullptr);
×
914
            // DEBUG: if (Optim::log_level>3) arma::cout << "  > ll: " << ll << arma::endl;
915
            if (grad_out != nullptr) {
×
916
              // DEBUG: if (Optim::log_level>3) arma::cout << "  > grad ll: " << grad_out << arma::endl;
917
              *grad_out = -*grad_out;
×
918
            }
919
            if (hess_out != nullptr) {
×
920
              // DEBUG: if (Optim::log_level>3) arma::cout << "  > hess ll: " << hess_out << arma::endl;
921
              *hess_out = -*hess_out;
×
922
            }
923
            return -ll;
×
924
          });
×
925
    }
926
  } else if (objective.compare("LOO") == 0) {
×
927
    if (Optim::reparametrize) {
×
928
      fit_ofn = CacheFunction(
×
929
          [this](const arma::vec& _gamma, arma::vec* grad_out, arma::mat* /*hess_out*/, Kriging::KModel* km_data) {
×
930
            // Change variable for opt: . -> 1/exp(.)
931
            // DEBUG: if (Optim::log_level>3) arma::cout << "> gamma: " << _gamma << arma::endl;
932
            const arma::vec _theta = Optim::reparam_from(_gamma);
×
933
            // DEBUG: if (Optim::log_level>3) arma::cout << "> theta: " << _theta << arma::endl;
934
            double loo = this->_leaveOneOut(_theta, grad_out, nullptr, km_data, nullptr);
×
935
            // DEBUG: if (Optim::log_level>3) arma::cout << "  > loo: " << loo << arma::endl;
936
            if (grad_out != nullptr) {
×
937
              // DEBUG: if (Optim::log_level>3) arma::cout << "  > grad ll: " << grad_out << arma::endl;
938
              *grad_out = Optim::reparam_from_deriv(_theta, *grad_out);
×
939
            }
940
            return loo;
×
941
          });
×
942
    } else {
943
      fit_ofn = CacheFunction(
×
944
          [this](const arma::vec& _gamma, arma::vec* grad_out, arma::mat* /*hess_out*/, Kriging::KModel* km_data) {
×
945
            const arma::vec _theta = _gamma;
×
946
            // DEBUG: if (Optim::log_level>3) arma::cout << "> theta: " << _theta << arma::endl;
947
            double loo = this->_leaveOneOut(_theta, grad_out, nullptr, km_data, nullptr);
×
948
            // DEBUG: if (Optim::log_level>3) arma::cout << "  > loo: " << loo << arma::endl;
949
            // if (grad_out != nullptr) {
950
            //   if (Optim::log_level>3) arma::cout << "  > grad ll: " << grad_out << arma::endl;
951
            //   *grad_out = *grad_out; // so not necessary
952
            //  }
953
            return loo;
×
954
          });
×
955
    }
956
  } else if (objective.compare("LMP") == 0) {
×
957
    // Our impl. of https://github.com/cran/RobustGaSP/blob/5cf21658e6a6e327be6779482b93dfee25d24592/R/rgasp.R#L303
958
    //@see Mengyang Gu, Xiao-jing Wang and Jim Berger, 2018, Annals of Statistics.
959
    if (Optim::reparametrize) {
×
960
      fit_ofn = CacheFunction(
×
961
          [this](const arma::vec& _gamma, arma::vec* grad_out, arma::mat* /*hess_out*/, Kriging::KModel* km_data) {
×
962
            // Change variable for opt: . -> 1/exp(.)
963
            // DEBUG: if (Optim::log_level>3) arma::cout << "> gamma: " << _gamma << arma::endl;
964
            const arma::vec _theta = Optim::reparam_from(_gamma);
×
965
            // DEBUG: if (Optim::log_level>3) arma::cout << "> theta: " << _theta << arma::endl;
966
            double lmp = this->_logMargPost(_theta, grad_out, km_data, nullptr);
×
967
            // DEBUG: if (Optim::log_level>3) arma::cout << "  > lmp: " << lmp << arma::endl;
968
            if (grad_out != nullptr) {
×
969
              // DEBUG: if (Optim::log_level>3) arma::cout << "  > grad lmp: " << grad_out << arma::endl;
970
              *grad_out = -Optim::reparam_from_deriv(_theta, *grad_out);
×
971
            }
972
            return -lmp;
×
973
          });
×
974
    } else {
975
      fit_ofn = CacheFunction(
×
976
          [this](const arma::vec& _gamma, arma::vec* grad_out, arma::mat* /*hess_out*/, Kriging::KModel* km_data) {
×
977
            const arma::vec _theta = _gamma;
×
978
            // DEBUG: if (Optim::log_level>3) arma::cout << "> theta: " << _theta << arma::endl;
979
            double lmp = this->_logMargPost(_theta, grad_out, km_data, nullptr);
×
980
            // DEBUG: if (Optim::log_level>3) arma::cout << "  > lmp: " << lmp << arma::endl;
981
            if (grad_out != nullptr) {
×
982
              // DEBUG: if (Optim::log_level>3) arma::cout << "  > grad lmp: " << grad_out << arma::endl;
983
              *grad_out = -*grad_out;
×
984
            }
985
            return -lmp;
×
986
          });
×
987
    }
988
  } else
989
    throw std::invalid_argument("Unsupported fit objective: " + objective + " (supported are: LL, LOO, LMP)");
×
990

991
  arma::rowvec centerX;
89✔
992
  arma::rowvec scaleX;
89✔
993
  double centerY;
994
  double scaleY;
995
  // Normalization of inputs and output
996
  m_normalize = normalize;
89✔
997
  if (m_normalize) {
89✔
998
    centerX = min(X, 0);
×
999
    scaleX = max(X, 0) - min(X, 0);
×
1000
    centerY = min(y);
×
1001
    scaleY = max(y) - min(y);
×
1002
  } else {
1003
    centerX = arma::rowvec(d, arma::fill::zeros);
89✔
1004
    scaleX = arma::rowvec(d, arma::fill::ones);
89✔
1005
    centerY = 0;
89✔
1006
    scaleY = 1;
89✔
1007
  }
1008
  m_centerX = centerX;
89✔
1009
  m_scaleX = scaleX;
89✔
1010
  m_centerY = centerY;
89✔
1011
  m_scaleY = scaleY;
89✔
1012
  {  // FIXME why copies of newX and newy
1013
    arma::mat newX = X;
89✔
1014
    newX.each_row() -= centerX;
89✔
1015
    newX.each_row() /= scaleX;
89✔
1016
    arma::vec newy = (y - centerY) / scaleY;
178✔
1017
    this->m_X = newX;
89✔
1018
    this->m_y = newy;
89✔
1019
  }
89✔
1020

1021
  // Now we compute the distance matrix between points. Will be used to compute R(theta) later (e.g. when fitting)
1022
  // Note: m_dX is transposed compared to m_X
1023
  m_dX = arma::mat(d, n * n, arma::fill::zeros);
89✔
1024
  for (arma::uword ij = 0; ij < m_dX.n_cols; ij++) {
206,265✔
1025
    int i = (int)ij / n;
206,176✔
1026
    int j = ij % n;  // i,j <-> i*n+j
206,176✔
1027
    if (i < j) {
206,176✔
1028
      m_dX.col(ij) = trans(m_X.row(i) - m_X.row(j));
405,876✔
1029
      m_dX.col(j * n + i) = m_dX.col(ij);
304,407✔
1030
    }
1031
  }
1032
  m_maxdX = arma::max(arma::abs(m_dX), 1);
178✔
1033

1034
  // Define regression matrix
1035
  m_regmodel = regmodel;
89✔
1036
  m_F = Trend::regressionModelMatrix(regmodel, m_X);
89✔
1037
  m_est_beta = parameters.is_beta_estim && (m_regmodel != Trend::RegressionModel::None);
89✔
1038
  if (!m_est_beta && parameters.beta.has_value()
×
1039
      && parameters.beta.value().n_elem > 0) {  // Then force beta to be fixed (not estimated, no variance)
89✔
1040
    m_est_beta = false;
×
1041
    m_beta = parameters.beta.value();
×
1042
    if (m_normalize)
×
1043
      m_beta /= scaleY;
×
1044
  } else
1045
    m_est_beta = true;
89✔
1046

1047
  arma::mat theta0;
89✔
1048
  if (parameters.theta.has_value()) {
89✔
1049
    theta0 = parameters.theta.value();
3✔
1050
    if ((theta0.n_cols != d) && (theta0.n_rows == d))
3✔
1051
      theta0 = theta0.t();
×
1052
    if (m_normalize)
3✔
1053
      theta0.each_row() /= scaleX;
×
1054
    if (theta0.n_cols != d)
3✔
1055
      throw std::runtime_error("Dimension of theta should be nx" + std::to_string(d) + " instead of "
×
1056
                               + std::to_string(theta0.n_rows) + "x" + std::to_string(theta0.n_cols));
×
1057
  }
1058

1059
  if (optim == "none") {  // just keep given theta, no optimisation of ll (but estim sigma2  &beta still possible)
89✔
1060
    if (!parameters.theta.has_value())
×
1061
      throw std::runtime_error("Theta should be given (1x" + std::to_string(d) + ") matrix, when optim=none");
×
1062

1063
    m_theta = trans(theta0.row(0));
×
1064
    m_est_theta = false;
×
1065

1066
    double sigma2 = -1;
×
1067
    m_est_sigma2 = parameters.is_sigma2_estim;
×
1068
    if (parameters.sigma2.has_value()) {
×
1069
      sigma2 = parameters.sigma2.value();  // otherwise sigma2 will be re-calculated using given theta
×
1070
      if (m_normalize)
×
1071
        sigma2 /= (scaleY * scaleY);
×
1072
    } else
1073
      m_est_sigma2 = true;
×
1074

1075
    Kriging::KModel m = make_Model(m_theta, nullptr);
×
1076
    m_is_empty = false;
×
1077
    m_T = std::move(m.L);
×
1078
    m_R = std::move(m.R);
×
1079
    m_M = std::move(m.Fstar);
×
1080
    m_circ = std::move(m.Rstar);
×
1081
    m_star = std::move(m.Qstar);
×
1082
    if (m_est_beta) {
×
1083
      m_beta = std::move(m.betahat);
×
1084
      m_z = std::move(m.Estar);
×
1085
    } else {
1086
      // m_beta = parameters.beta.value(); already done above
1087
      m_z = std::move(m.ystar) - m_M * m_beta;
×
1088
    }
1089
    if (m_est_sigma2) {
×
1090
      m_sigma2 = m.SSEstar / n;
×
1091
    } else {
1092
      m_sigma2 = sigma2;
×
1093
    }
1094

1095
  } else {
×
1096
    arma::vec theta_lower = Optim::theta_lower_factor * m_maxdX;
178✔
1097
    arma::vec theta_upper = Optim::theta_upper_factor * m_maxdX;
178✔
1098

1099
    if (Optim::variogram_bounds_heuristic) {
89✔
1100
      arma::vec dy2 = arma::vec(n * n, arma::fill::zeros);
89✔
1101
      for (arma::uword ij = 0; ij < dy2.n_elem; ij++) {
206,265✔
1102
        int i = (int)ij / n;
206,176✔
1103
        int j = ij % n;  // i,j <-> i*n+j
206,176✔
1104
        if (i < j) {
206,176✔
1105
          dy2[ij] = m_y.at(i) - m_y.at(j);
405,876✔
1106
          dy2[ij] *= dy2[ij];
202,938✔
1107
          dy2[j * n + i] = dy2[ij];
202,938✔
1108
        }
1109
      }
1110
      // dy2 /= arma::var(m_y);
1111
      arma::vec dy2dX2_slope = dy2 / arma::sum(m_dX % m_dX, 0).t();
267✔
1112
      // arma::cout << "dy2dX_slope:" << dy2dX_slope << arma::endl;
1113
      dy2dX2_slope.replace(arma::datum::nan, 0.0);  // we are not interested in same points where dX=0, and dy=0
89✔
1114
      arma::vec w = dy2dX2_slope / sum(dy2dX2_slope);
178✔
1115
      arma::mat steepest_dX_mean = arma::abs(m_dX) * w;
267✔
1116
      // arma::cout << "steepest_dX_mean:" << steepest_dX_mean << arma::endl;
1117

1118
      theta_lower = arma::max(theta_lower, Optim::theta_lower_factor * steepest_dX_mean);
178✔
1119
      // no, only relevant for inf bound: theta_upper = arma::min(theta_upper, Optim::theta_upper_factor *
1120
      // steepest_dX_mean);
1121
      theta_lower = arma::min(theta_lower, theta_upper);
89✔
1122
      theta_upper = arma::max(theta_lower, theta_upper);
89✔
1123
    }
89✔
1124
    // arma::cout << "theta_lower:" << theta_lower << arma::endl;
1125
    // arma::cout << "theta_upper:" << theta_upper << arma::endl;
1126

1127
    if (optim.rfind("BFGS", 0) == 0) {
89✔
1128
      Random::init();
89✔
1129

1130
      int multistart = 1;
89✔
1131
      try {
1132
        multistart = std::stoi(optim.substr(4));
178✔
1133
      } catch (std::invalid_argument&) {
89✔
1134
        // let multistart = 1
1135
      }
89✔
1136

1137
      arma::mat theta0_rand
1138
          = arma::repmat(trans(theta_lower), multistart, 1)
178✔
1139
            + Random::randu_mat(multistart, d) % arma::repmat(trans(theta_upper - theta_lower), multistart, 1);
445✔
1140
      // theta0 = arma::abs(0.5 + Random::randn_mat(multistart, d) / 6.0)
1141
      //          % arma::repmat(max(m_X, 0) - min(m_X, 0), multistart, 1);
1142

1143
      if (parameters.theta.has_value()) {  // just use given theta(s) as starting values for multi-bfgs
89✔
1144
        multistart = std::max(multistart, (int)theta0.n_rows);
3✔
1145
        theta0 = arma::join_cols(theta0, theta0_rand);  // append random starting points to given ones
3✔
1146
        theta0.resize(multistart, theta0.n_cols);       // keep only multistart first rows
3✔
1147
      } else {
1148
        theta0 = theta0_rand;
86✔
1149
      }
1150
      // arma::cout << "theta0:" << theta0 << arma::endl;
1151

1152
      arma::vec gamma_lower = theta_lower;
89✔
1153
      arma::vec gamma_upper = theta_upper;
89✔
1154
      if (Optim::reparametrize) {
89✔
1155
        gamma_lower = Optim::reparam_to(theta_upper);
89✔
1156
        gamma_upper = Optim::reparam_to(theta_lower);
89✔
1157
      }
1158

1159
      double min_ofn = std::numeric_limits<double>::infinity();
89✔
1160

1161
      for (arma::uword i = 0; i < multistart; i++) {  // TODO: use some foreach/pragma to let OpenMP work.
178✔
1162
        arma::vec gamma_tmp = theta0.row(i % multistart).t();
267✔
1163
        if (Optim::reparametrize)
89✔
1164
          gamma_tmp = Optim::reparam_to(theta0.row(i % multistart).t());
178✔
1165

1166
        gamma_lower = arma::min(gamma_tmp, gamma_lower);
89✔
1167
        gamma_upper = arma::max(gamma_tmp, gamma_upper);
89✔
1168

1169
        if (Optim::log_level > 0) {
89✔
1170
          arma::cout << "BFGS:" << arma::endl;
×
1171
          arma::cout << "  max iterations: " << Optim::max_iteration << arma::endl;
×
1172
          arma::cout << "  null gradient tolerance: " << Optim::gradient_tolerance << arma::endl;
×
1173
          arma::cout << "  constant objective tolerance: " << Optim::objective_rel_tolerance << arma::endl;
×
1174
          arma::cout << "  reparametrize: " << Optim::reparametrize << arma::endl;
×
1175
          arma::cout << "  normalize: " << m_normalize << arma::endl;
×
1176
          arma::cout << "  lower_bounds: " << theta_lower.t() << " ";
×
1177
          arma::cout << "  upper_bounds: " << theta_upper.t() << " ";
×
1178
          arma::cout << "  start_point: " << theta0.row(i % multistart) << " ";
×
1179
        }
1180

1181
        m_est_sigma2 = parameters.is_sigma2_estim;
89✔
1182
        if ((!m_est_sigma2) && (parameters.sigma2.has_value())) {
89✔
1183
          m_sigma2 = parameters.sigma2.value();
×
1184
          if (m_normalize)
×
1185
            m_sigma2 /= (scaleY * scaleY);
×
1186
        } else {
1187
          m_est_sigma2 = true;  // force estim if no value given
89✔
1188
        }
1189

1190
        lbfgsb::Optimizer optimizer{d};
89✔
1191
        optimizer.iprint = Optim::log_level - 2;
89✔
1192
        optimizer.max_iter = Optim::max_iteration;
89✔
1193
        optimizer.pgtol = Optim::gradient_tolerance;
89✔
1194
        optimizer.factr = Optim::objective_rel_tolerance / 1E-13;
89✔
1195
        arma::ivec bounds_type{d, arma::fill::value(2)};  // means both upper  &lower bounds
89✔
1196

1197
        int retry = 0;
89✔
1198
        double best_f_opt = std::numeric_limits<double>::infinity();
89✔
1199
        arma::vec best_gamma = gamma_tmp;
89✔
1200
        Kriging::KModel m = make_Model(theta0.row(i % multistart).t(), nullptr);
267✔
1201

1202
        while (retry <= Optim::max_restart) {
279✔
1203
          arma::vec gamma_0 = gamma_tmp;
279✔
1204
          auto result = optimizer.minimize(
1205
              [&m, &fit_ofn](const arma::vec& vals_inp, arma::vec& grad_out) -> double {
4,717✔
1206
                return fit_ofn(vals_inp, &grad_out, nullptr, &m);
4,717✔
1207
              },
1208
              gamma_tmp,
1209
              gamma_lower.memptr(),
279✔
1210
              gamma_upper.memptr(),
279✔
1211
              bounds_type.memptr());
558✔
1212

1213
          if (Optim::log_level > 0) {
279✔
1214
            arma::cout << "     iterations: " << result.num_iters << arma::endl;
×
1215
            arma::cout << "     status: " << result.task << arma::endl;
×
1216
            if (Optim::reparametrize) {
×
1217
              arma::cout << "     start_point: " << Optim::reparam_from(gamma_0).t() << " ";
×
1218
              arma::cout << "     solution: " << Optim::reparam_from(gamma_tmp).t() << " ";
×
1219
            } else {
1220
              arma::cout << "     start_point: " << gamma_0.t() << " ";
×
1221
              arma::cout << "     solution: " << gamma_tmp.t() << " ";
×
1222
            }
1223
          }
1224

1225
          if (result.f_opt < best_f_opt) {
279✔
1226
            best_f_opt = result.f_opt;
136✔
1227
            best_gamma = gamma_tmp;
136✔
1228
          }
1229

1230
          double sol_to_lb = arma::min(arma::abs(gamma_tmp - gamma_lower));
279✔
1231
          double sol_to_ub = arma::min(arma::abs(gamma_tmp - gamma_upper));
279✔
1232
          double sol_to_b = std::min(sol_to_ub, sol_to_lb);
279✔
1233
          // Optim::reparametrize ? sol_to_ub : sol_to_lb;  // just consider theta lower bound
1234
          if ((retry < Optim::max_restart)
558✔
1235
              && ((result.task.rfind("ABNORMAL_TERMINATION_IN_LNSRCH", 0) == 0)  // error in algorithm
515✔
1236
                  || ((sol_to_b < arma::datum::eps) && (result.num_iters <= 2))  // we are stuck on a bound
236✔
1237
                  || (result.f_opt > best_f_opt)  // maybe still better start point available
120✔
1238
                  )) {
1239
            gamma_tmp = (theta0.row(i % multistart).t() + theta_lower)
190✔
1240
                        / pow(2.0, retry + 1);  // so move starting point to middle-point
380✔
1241

1242
            if (Optim::reparametrize)
190✔
1243
              gamma_tmp = Optim::reparam_to(gamma_tmp);
190✔
1244

1245
            gamma_lower = arma::min(gamma_tmp, gamma_lower);
190✔
1246
            gamma_upper = arma::max(gamma_tmp, gamma_upper);
190✔
1247

1248
            retry++;
190✔
1249
          } else {
1250
            if (Optim::log_level > 1) {
89✔
1251
              result.print();
×
1252
            }
1253
            break;
89✔
1254
          }
1255
        }
368✔
1256

1257
        // last call to ensure that T and z are up-to-date with solution.
1258
        double min_ofn_tmp = fit_ofn(best_gamma, nullptr, nullptr, &m);
89✔
1259

1260
        if (Optim::log_level > 0) {
89✔
1261
          arma::cout << "  best objective: " << min_ofn_tmp << arma::endl;
×
1262
          if (Optim::reparametrize) {
×
1263
            arma::cout << "  best solution: " << Optim::reparam_from(best_gamma).t() << " ";
×
1264
          } else {
1265
            arma::cout << "  best solution: " << best_gamma.t() << " ";
×
1266
          }
1267
        }
1268

1269
        if (min_ofn_tmp < min_ofn) {
89✔
1270
          m_theta = best_gamma;
89✔
1271
          if (Optim::reparametrize)
89✔
1272
            m_theta = Optim::reparam_from(best_gamma);
89✔
1273
          m_est_theta = true;
89✔
1274
          min_ofn = min_ofn_tmp;
89✔
1275

1276
          m_is_empty = false;
89✔
1277
          m_T = std::move(m.L);
89✔
1278
          m_R = std::move(m.R);
89✔
1279
          m_M = std::move(m.Fstar);
89✔
1280
          m_circ = std::move(m.Rstar);
89✔
1281
          m_star = std::move(m.Qstar);
89✔
1282
          if (m_est_beta) {
89✔
1283
            m_beta = std::move(m.betahat);
89✔
1284
            m_z = std::move(m.Estar);
89✔
1285
          } else {
1286
            // m_beta = parameters.beta.value(); already done above
1287
            m_z = std::move(m.ystar) - m_M * m_beta;
×
1288
          }
1289
          if (m_est_sigma2) {
89✔
1290
            m_sigma2 = m.SSEstar / n;
89✔
1291
            if (m_objective.compare("LMP") == 0) {
89✔
1292
              m_sigma2 = m.SSEstar / (n - m_F.n_cols);
×
1293
            }
1294
          }
1295
        }
1296
      }
89✔
1297
    } else if (optim.rfind("Newton", 0) == 0) {
89✔
1298
      Random::init();
×
1299

1300
      int multistart = 1;
×
1301
      try {
1302
        multistart = std::stoi(optim.substr(6));
×
1303
      } catch (std::invalid_argument&) {
×
1304
        // let multistart = 1
1305
      }
×
1306

1307
      theta0 = arma::repmat(trans(theta_lower), multistart, 1)
×
1308
               + Random::randu_mat(multistart, d) % arma::repmat(trans(theta_upper - theta_lower), multistart, 1);
×
1309
      // theta0 = arma::abs(0.5 + Random::randn_mat(multistart, d) / 6.0)
1310
      //          % arma::repmat(max(m_X, 0) - min(m_X, 0), multistart, 1);
1311

1312
      if (parameters.theta.has_value()) {  // just use given theta(s) as starting values for multi-bfgs
×
1313
        arma::mat theta0_tmp = arma::mat(parameters.theta.value());
×
1314
        if (m_normalize)
×
1315
          theta0_tmp.each_row() /= scaleX;
×
1316
        theta0 = arma::join_cols(theta0, theta0_tmp);
×
1317
      }  // arma::cout << "theta0:" << theta0 << arma::endl;
×
1318

1319
      arma::vec gamma_lower = theta_lower;
×
1320
      arma::vec gamma_upper = theta_upper;
×
1321
      if (Optim::reparametrize) {
×
1322
        gamma_lower = Optim::reparam_to(theta_upper);
×
1323
        gamma_upper = Optim::reparam_to(theta_lower);
×
1324
      }
1325

1326
      double min_ofn = std::numeric_limits<double>::infinity();
×
1327

1328
      for (arma::uword i = 0; i < multistart; i++) {  // TODO: use some foreach/pragma to let OpenMP work.
×
1329
        arma::vec gamma_tmp = theta0.row(i % multistart).t();
×
1330
        if (Optim::reparametrize)
×
1331
          gamma_tmp = Optim::reparam_to(theta0.row(i % multistart).t());
×
1332

1333
        gamma_lower = arma::min(gamma_tmp, gamma_lower);
×
1334
        gamma_upper = arma::max(gamma_tmp, gamma_upper);
×
1335

1336
        if (Optim::log_level > 0) {
×
1337
          arma::cout << "Newton:" << arma::endl;
×
1338
          arma::cout << "  max iterations: " << Optim::max_iteration << arma::endl;
×
1339
          arma::cout << "  null gradient tolerance: " << Optim::gradient_tolerance << arma::endl;
×
1340
          arma::cout << "  constant objective tolerance: " << Optim::objective_rel_tolerance << arma::endl;
×
1341
          arma::cout << "  reparametrize: " << Optim::reparametrize << arma::endl;
×
1342
          arma::cout << "  normalize: " << m_normalize << arma::endl;
×
1343
          arma::cout << "  lower_bounds: " << theta_lower.t() << " ";
×
1344
          arma::cout << "  upper_bounds: " << theta_upper.t() << " ";
×
1345
          arma::cout << "  start_point: " << theta0.row(i % multistart) << " ";
×
1346
        }
1347

1348
        m_est_sigma2 = parameters.is_sigma2_estim;
×
1349
        if ((!m_est_sigma2) && (parameters.sigma2.has_value())) {
×
1350
          m_sigma2 = parameters.sigma2.value();
×
1351
          if (m_normalize)
×
1352
            m_sigma2 /= (scaleY * scaleY);
×
1353
        } else {
1354
          m_est_sigma2 = true;  // force estim if no value given
×
1355
        }
1356

1357
        Kriging::KModel m = make_Model(theta0.row(i % multistart).t(), nullptr);
×
1358
        double min_ofn_tmp = optim_newton(
×
1359
            [&m, &fit_ofn](const arma::vec& vals_inp, arma::vec* grad_out, arma::mat* hess_out) -> double {
×
1360
              return fit_ofn(vals_inp, grad_out, hess_out, &m);
×
1361
            },
1362
            gamma_tmp,
1363
            gamma_lower,
1364
            gamma_upper);
1365

1366
        if (Optim::log_level > 0) {
×
1367
          arma::cout << "  best objective: " << min_ofn_tmp << arma::endl;
×
1368
          if (Optim::reparametrize) {
×
1369
            arma::cout << "  best solution: " << Optim::reparam_from(gamma_tmp).t() << " ";
×
1370
          } else {
1371
            arma::cout << "  best solution: " << gamma_tmp.t() << " ";
×
1372
          }
1373
        }
1374

1375
        if (min_ofn_tmp < min_ofn) {
×
1376
          m_theta = gamma_tmp;
×
1377
          if (Optim::reparametrize)
×
1378
            m_theta = Optim::reparam_from(gamma_tmp);
×
1379
          m_est_theta = true;
×
1380
          min_ofn = min_ofn_tmp;
×
1381

1382
          m_is_empty = false;
×
1383
          m_T = std::move(m.L);
×
1384
          m_R = std::move(m.R);
×
1385
          m_M = std::move(m.Fstar);
×
1386
          m_circ = std::move(m.Rstar);
×
1387
          m_star = std::move(m.Qstar);
×
1388
          m_z = std::move(m.Estar);
×
1389
          if (m_est_beta) {
×
1390
            m_beta = std::move(m.betahat);
×
1391
            m_z = std::move(m.Estar);
×
1392
          } else {
1393
            // m_beta = parameters.beta.value(); already done above
1394
            m_z = std::move(m.ystar) - m_M * m_beta;
×
1395
          }
1396
          if (m_est_sigma2) {
×
1397
            m_sigma2 = m.SSEstar / n;
×
1398
            if (m_objective.compare("LMP") == 0) {
×
1399
              m_sigma2 = m.SSEstar / (n - m_F.n_cols);
×
1400
            }
1401
          }
1402
        }
1403
      }
×
1404
    } else
×
1405
      throw std::runtime_error("Unsupported optim: " + optim + " (supported are: none, BFGS[#], Newton[#])");
×
1406
  }
89✔
1407

1408
  // arma::cout << "theta:" << m_theta << arma::endl;
1409
}
89✔
1410

1411
/** Compute the prediction for given points X'
1412
 * @param X_n is n_n*d matrix of points where to predict output
1413
 * @param return_stdev is true if return also stdev column vector
1414
 * @param return_cov is true if return also cov matrix between X_n
1415
 * @param return_deriv is true if return also derivative of prediction wrt x
1416
 * @return output prediction: n_n means, [n_n standard deviations], [n_n*n_n full covariance matrix]
1417
 */
1418
LIBKRIGING_EXPORT std::tuple<arma::vec, arma::vec, arma::mat, arma::mat, arma::mat>
1419
Kriging::predict(const arma::mat& X_n, bool return_stdev, bool return_cov, bool return_deriv) {
14✔
1420
  arma::uword n_n = X_n.n_rows;
14✔
1421
  arma::uword n_o = m_X.n_rows;
14✔
1422
  arma::uword d = m_X.n_cols;
14✔
1423
  if (X_n.n_cols != d)
14✔
1424
    throw std::runtime_error("Predict locations have wrong dimension: " + std::to_string(X_n.n_cols) + " instead of "
×
1425
                             + std::to_string(d));
×
1426

1427
  arma::vec yhat_n = arma::vec(n_n, arma::fill::none);
14✔
1428
  arma::vec ysd2_n = arma::vec(n_n, arma::fill::zeros);
14✔
1429
  arma::mat Sigma_n = arma::mat(n_n, n_n, arma::fill::zeros);
14✔
1430
  arma::mat Dyhat_n = arma::mat(n_n, d, arma::fill::zeros);
14✔
1431
  arma::mat Dysd2_n = arma::mat(n_n, d, arma::fill::zeros);
14✔
1432

1433
  arma::mat Xn_o = trans(m_X);  // already normalized if needed
28✔
1434
  arma::mat Xn_n = X_n;
14✔
1435
  // Normalize X_n
1436
  Xn_n.each_row() -= m_centerX;
28✔
1437
  Xn_n.each_row() /= m_scaleX;
28✔
1438

1439
  double sigma2 = m_sigma2 * (m_objective.compare("LMP") == 0 ? (n_o - m_F.n_cols) / (n_o - m_F.n_cols - 2) : 1.0);
14✔
1440

1441
  arma::mat F_n = Trend::regressionModelMatrix(m_regmodel, Xn_n);
14✔
1442
  Xn_n = trans(Xn_n);
14✔
1443

1444
  auto t0 = Bench::tic();
14✔
1445
  arma::mat R_on = arma::mat(n_o, n_n, arma::fill::none);
14✔
1446
  for (arma::uword i = 0; i < n_o; i++) {
605✔
1447
    for (arma::uword j = 0; j < n_n; j++) {
50,065✔
1448
      arma::vec dij = Xn_o.col(i) - Xn_n.col(j);
98,948✔
1449
      if (dij.is_zero(arma::datum::eps))
49,474✔
1450
        R_on.at(i, j) = 1.0;
567✔
1451
      else
1452
        R_on.at(i, j) = _Cov(dij, m_theta);
97,814✔
1453
    }
49,474✔
1454
  }
1455
  t0 = Bench::toc(nullptr, "R_on       ", t0);
14✔
1456

1457
  arma::mat Rstar_on = LinearAlgebra::solve(m_T, R_on);
14✔
1458
  t0 = Bench::toc(nullptr, "Rstar_on   ", t0);
14✔
1459

1460
  yhat_n = F_n * m_beta + trans(Rstar_on) * m_z;
56✔
1461
  t0 = Bench::toc(nullptr, "yhat_n     ", t0);
14✔
1462

1463
  // Un-normalize predictor
1464
  yhat_n = m_centerY + m_scaleY * yhat_n;
42✔
1465

1466
  arma::mat Fhat_n = trans(Rstar_on) * m_M;
42✔
1467
  arma::mat E_n = F_n - Fhat_n;
14✔
1468
  arma::mat Ecirc_n = LinearAlgebra::rsolve(m_circ, E_n);
14✔
1469
  t0 = Bench::toc(nullptr, "Ecirc_n    ", t0);
14✔
1470

1471
  if (return_stdev) {
14✔
1472
    ysd2_n = 1.0 - sum(Rstar_on % Rstar_on, 0).as_col() + sum(Ecirc_n % Ecirc_n, 1).as_col();
56✔
1473
    ysd2_n.transform([](double val) { return (std::isnan(val) || val < 0 ? 0.0 : val); });
1,169✔
1474
    ysd2_n *= sigma2 * m_scaleY * m_scaleY;
14✔
1475
    t0 = Bench::toc(nullptr, "ysd2_n     ", t0);
14✔
1476
  }
1477

1478
  if (return_cov) {
14✔
1479
    // Compute the covariance matrix between new data points
1480
    arma::mat R_nn = arma::mat(n_n, n_n, arma::fill::none);
10✔
1481
    for (arma::uword i = 0; i < n_n; i++) {
768✔
1482
      // R_nn.at(i, i) = 1;
1483
      for (arma::uword j = 0; j < i; j++) {
33,380✔
1484
        R_nn.at(i, j) = R_nn.at(j, i) = _Cov((Xn_n.col(i) - Xn_n.col(j)), m_theta);
130,488✔
1485
      }
1486
    }
1487
    R_nn.diag().ones();
10✔
1488
    t0 = Bench::toc(nullptr, "R_nn       ", t0);
10✔
1489

1490
    Sigma_n = R_nn - trans(Rstar_on) * Rstar_on + Ecirc_n * trans(Ecirc_n);
30✔
1491
    Sigma_n *= sigma2 * m_scaleY * m_scaleY;
10✔
1492
    t0 = Bench::toc(nullptr, "Sigma_n    ", t0);
10✔
1493
  }
10✔
1494

1495
  if (return_deriv) {
14✔
1496
    //// https://github.com/libKriging/dolka/blob/bb1dbf0656117756165bdcff0bf5e0a1f963fbef/R/kmStuff.R#L322C1-L363C10
1497
    // for (i in 1:n_n) {
1498
    //
1499
    //   ## =================================================================
1500
    //   ## 'DF_n_i' and 'DR_on_i' are matrices with
1501
    //   ## dimension c(n_n, d)
1502
    //   ## =================================================================
1503
    //
1504
    //   DF_n_i <- trend.deltax(x = XNew[i, ], model = object)
1505
    //   KOldNewi <- as.vector(KOldNew[ , i])
1506
    //   DR_on_i <- covVector.dx(x = as.vector(XNew[i, ]),
1507
    //                               X = X,
1508
    //                               object = object@covariance,
1509
    //                               c = KOldNewi)
1510
    //
1511
    //   KOldNewStarDer[ , i, i, ] <-
1512
    //       backsolve(L, DR_on_i, upper.tri = FALSE)
1513
    //
1514
    //   ## Gradient of the kriging trend and mean
1515
    //   muNewHatDer[i, i, ] <- crossprod(DF_n_i, betaHat)
1516
    //   ## dim in product c(d, n) and NULL(length d)
1517
    //   mean_nHatDer[i, i, ] <- muNewHatDer[i, i, ] +
1518
    //       crossprod(KOldNewStarDer[ , i, i,  ], zStar)
1519
    //   ## dim in product c(d, n) and NULL(length n)
1520
    //   s2Der[i, i,  ] <-
1521
    //       - 2 * crossprod(KOldNewStarDer[ , i, i, ],
1522
    //                       drop(KOldNewStar[ , i]))
1523
    //
1524
    //   ## dim in product c(d, n) and c(n, p)
1525
    //
1526
    //   if (type == "UK") {
1527
    //       ENewDagDer[i, i, , ] <-
1528
    //           backsolve(t(RStar),
1529
    //                     DF_n_i -
1530
    //                     t(crossprod(KOldNewStarDer[ , i, i, ], FStar)),
1531
    //                     upper.tri = FALSE)
1532
    //       ## dim in product NULL (length p) and c(p, d) because of 'drop'
1533
    //       s2Der[i, i, ] <- s2Der[i, i, ] + 2 * drop(ENewDagT[ , i]) %*%
1534
    //           drop(ENewDagDer[i, i, , ])
1535
    //   }
1536
    //  numerical derivative step : value is sensitive only for non linear trends. Otherwise, it gives exact results.
1537
    const double h = 1.0E-5;
10✔
1538
    arma::mat h_eye_d = h * arma::mat(d, d, arma::fill::eye);
30✔
1539

1540
    // Compute the derivatives of the covariance and trend functions
1541
    for (arma::uword i = 0; i < n_n; i++) {  // for each predict point... should be parallel ?
768✔
1542
      arma::mat DR_on_i = arma::mat(n_o, d, arma::fill::none);
758✔
1543
      for (arma::uword j = 0; j < n_o; j++) {
48,148✔
1544
        DR_on_i.row(j) = R_on.at(j, i) * trans(_DlnCovDx(Xn_n.col(i) - Xn_o.col(j), m_theta));
189,560✔
1545
      }
1546
      t0 = Bench::toc(nullptr, "DR_on_i    ", t0);
758✔
1547

1548
      arma::mat tXn_n_repd_i
1549
          = arma::trans(Xn_n.col(i) * arma::mat(1, d, arma::fill::ones));  // just duplicate X_n.row(i) d times
2,274✔
1550
      arma::mat DF_n_i = (Trend::regressionModelMatrix(m_regmodel, tXn_n_repd_i + h_eye_d)
1,516✔
1551
                          - Trend::regressionModelMatrix(m_regmodel, tXn_n_repd_i - h_eye_d))
2,274✔
1552
                         / (2 * h);
1,516✔
1553
      t0 = Bench::toc(nullptr, "DF_n_i     ", t0);
758✔
1554

1555
      arma::mat W_i = LinearAlgebra::solve(m_T, DR_on_i);
758✔
1556
      t0 = Bench::toc(nullptr, "W_i        ", t0);
758✔
1557
      Dyhat_n.row(i) = trans(DF_n_i * m_beta + trans(W_i) * m_z);
3,032✔
1558
      t0 = Bench::toc(nullptr, "Dyhat_n    ", t0);
758✔
1559

1560
      if (return_stdev) {
758✔
1561
        arma::mat DEcirc_n_i = LinearAlgebra::solve(m_circ.t(), trans(DF_n_i - W_i.t() * m_M));
3,790✔
1562
        Dysd2_n.row(i) = -2 * Rstar_on.col(i).t() * W_i + 2 * Ecirc_n.row(i) * DEcirc_n_i;
1,516✔
1563
        t0 = Bench::toc(nullptr, "Dysd2_n    ", t0);
758✔
1564
      }
758✔
1565
    }
758✔
1566
    Dyhat_n *= m_scaleY;
10✔
1567
    Dysd2_n *= sigma2 * m_scaleY * m_scaleY;
10✔
1568
  }
10✔
1569

1570
  return std::make_tuple(std::move(yhat_n),
28✔
1571
                         std::move(arma::sqrt(ysd2_n)),
28✔
1572
                         std::move(Sigma_n),
14✔
1573
                         std::move(Dyhat_n),
14✔
1574
                         std::move(Dysd2_n / (2 * arma::sqrt(ysd2_n) * arma::mat(1, d, arma::fill::ones))));
70✔
1575
  /*if (return_stdev)
1576
    if (return_cov)
1577
      return std::make_tuple(std::move(yhat_n), std::move(pred_stdev), std::move(pred_cov));
1578
    else
1579
      return std::make_tuple(std::move(yhat_n), std::move(pred_stdev), nullptr);
1580
  else if (return_cov)
1581
    return std::make_tuple(std::move(yhat_n), std::move(pred_cov), nullptr);
1582
  else
1583
    return std::make_tuple(std::move(yhat_n), nullptr, nullptr);*/
1584
}
14✔
1585

1586
/** Draw sample trajectories of kriging at given points X'
1587
 * @param X_n is n_n*d matrix of points where to simulate output
1588
 * @param seed is seed for random number generator
1589
 * @param nsim is number of simulations to draw
1590
 * @param will_update is true if we want to keep simulations data for future update
1591
 * @return output is n_n*nsim matrix of simulations at X_n
1592
 */
1593
LIBKRIGING_EXPORT arma::mat Kriging::simulate(const int nsim,
2✔
1594
                                              const int seed,
1595
                                              const arma::mat& X_n,
1596
                                              const bool will_update) {
1597
  arma::uword n_n = X_n.n_rows;
2✔
1598
  arma::uword n_o = m_X.n_rows;
2✔
1599
  arma::uword d = m_X.n_cols;
2✔
1600
  if (X_n.n_cols != d)
2✔
1601
    throw std::runtime_error("Simulate locations have wrong dimension: " + std::to_string(X_n.n_cols) + " instead of "
×
1602
                             + std::to_string(d));
×
1603

1604
  arma::mat Xn_o = trans(m_X);  // already normalized if needed
4✔
1605
  arma::mat Xn_n = X_n;
2✔
1606
  // Normalize X_n
1607
  Xn_n.each_row() -= m_centerX;
4✔
1608
  Xn_n.each_row() /= m_scaleX;
4✔
1609

1610
  // Define regression matrix
1611
  arma::mat F_n = Trend::regressionModelMatrix(m_regmodel, Xn_n);
2✔
1612
  // if (will_update) sim_Fp = F_n;
1613
  Xn_n = trans(Xn_n);
2✔
1614

1615
  auto t0 = Bench::tic();
2✔
1616
  // Compute covariance between new data
1617
  arma::mat R_nn = arma::mat(n_n, n_n, arma::fill::none);
2✔
1618
  for (arma::uword i = 0; i < n_n; i++) {
201✔
1619
    // R_nn.at(i, i) = 1.0;
1620
    for (arma::uword j = 0; j < i; j++) {
10,000✔
1621
      R_nn.at(i, j) = R_nn.at(j, i) = _Cov((Xn_n.col(i) - Xn_n.col(j)), m_theta);
39,204✔
1622
    }
1623
  }
1624
  R_nn.diag().ones();
2✔
1625
  t0 = Bench::toc(nullptr, "R_nn          ", t0);
2✔
1626

1627
  // Compute covariance between training data and new data to predict
1628
  arma::mat R_on = arma::mat(n_o, n_n, arma::fill::none);
2✔
1629
  for (arma::uword i = 0; i < n_o; i++) {
12✔
1630
    for (arma::uword j = 0; j < n_n; j++) {
1,005✔
1631
      R_on.at(i, j) = _Cov((Xn_o.col(i) - Xn_n.col(j)), m_theta);
2,985✔
1632
    }
1633
  }
1634
  t0 = Bench::toc(nullptr, "R_on        ", t0);
2✔
1635

1636
  arma::mat Rstar_on = LinearAlgebra::solve(m_T, R_on);
2✔
1637
  t0 = Bench::toc(nullptr, "Rstar_on   ", t0);
2✔
1638
  // arma::cout << "Rstar_on:" << Rstar_on << arma::endl;
1639

1640
  arma::vec yhat_n = F_n * m_beta + trans(Rstar_on) * m_z;
8✔
1641
  t0 = Bench::toc(nullptr, "yhat_n        ", t0);
2✔
1642

1643
  arma::mat Fhat_n = trans(Rstar_on) * m_M;
6✔
1644
  arma::mat E_n = F_n - Fhat_n;
2✔
1645
  arma::mat Ecirc_n = LinearAlgebra::rsolve(m_circ, E_n);
2✔
1646
  t0 = Bench::toc(nullptr, "Ecirc_n       ", t0);
2✔
1647
  // arma::cout << "eig(R_nn):" << arma::eig_sym(R_nn) << arma::endl;
1648

1649
  // arma::cout << "t(Rstar_on)*Rstar_on:" <<  trans(Rstar_on) * Rstar_on << arma::endl;
1650

1651
  arma::mat SigmaNoTrend_nKo = R_nn - trans(Rstar_on) * Rstar_on;
4✔
1652
  // arma::cout << "eig(SigmaNoTrend_nKo):" << arma::eig_sym(SigmaNoTrend_nKo) << arma::endl;
1653

1654
  arma::mat Sigma_nKo = SigmaNoTrend_nKo + Ecirc_n * trans(Ecirc_n);
4✔
1655
  t0 = Bench::toc(nullptr, "Sigma_nKo     ", t0);
2✔
1656

1657
  // arma::cout << "eig(Sigma_nKo):" << arma::eig_sym(Sigma_nKo) << arma::endl;
1658

1659
  arma::mat LSigma_nKo = LinearAlgebra::safe_chol_lower(Sigma_nKo);
2✔
1660
  t0 = Bench::toc(nullptr, "LSigma_nKo     ", t0);
2✔
1661

1662
  arma::mat y_n = arma::mat(n_n, nsim, arma::fill::none);
2✔
1663
  y_n.each_col() = yhat_n;
2✔
1664
  Random::reset_seed(seed);
2✔
1665
  y_n += LSigma_nKo * Random::randn_mat(n_n, nsim) * std::sqrt(m_sigma2);
4✔
1666

1667
  // Un-normalize simulations
1668
  y_n = m_centerY + m_scaleY * y_n;
6✔
1669

1670
  if (will_update) {
2✔
1671
    lastsimup_Xn_u.clear();  // force reset to force update_simulate consider new data
×
1672
    lastsim_y_n = y_n;
×
1673

1674
    lastsim_Xn_n = Xn_n;
×
1675
    lastsim_seed = seed;
×
1676
    lastsim_nsim = nsim;
×
1677

1678
    lastsim_R_nn = R_nn;
×
1679
    lastsim_F_n = F_n;
×
1680

1681
    lastsim_L_oCn = Rstar_on;
×
1682
    lastsim_L_nCn = LinearAlgebra::safe_chol_lower(SigmaNoTrend_nKo);
×
1683
    t0 = Bench::toc(nullptr, "L_nCn     ", t0);
×
1684

1685
    lastsim_L_on = arma::join_rows(arma::join_cols(m_T, lastsim_L_oCn.t()),
×
1686
                                   arma::join_cols(arma::zeros(n_o, n_n), lastsim_L_nCn));
×
1687

1688
    arma::mat Linv_on = LinearAlgebra::solve(lastsim_L_on, arma::mat(n_o + n_n, n_o + n_n, arma::fill::eye));
×
1689
    t0 = Bench::toc(nullptr, "Linv_on     ", t0);
×
1690
    lastsim_Rinv_on = Linv_on.t() * Linv_on;
×
1691
    t0 = Bench::toc(nullptr, "Rinv_on     ", t0);
×
1692

1693
    lastsim_F_on = arma::join_cols(m_F, lastsim_F_n);
×
1694
    lastsim_Fstar_on = LinearAlgebra::solve(lastsim_L_on, lastsim_F_on);
×
1695
    t0 = Bench::toc(nullptr, "Fstar_on     ", t0);
×
1696
    arma::mat Q_Fstar_on;
×
1697
    arma::qr(Q_Fstar_on, lastsim_circ_on, lastsim_Fstar_on);
×
1698
    lastsim_Fcirc_on = LinearAlgebra::rsolve(lastsim_circ_on, lastsim_F_on);
×
1699
    t0 = Bench::toc(nullptr, "Fcirc_on     ", t0);
×
1700

1701
    lastsim_Fhat_nKo = lastsim_L_oCn.t() * m_M;
×
1702
    t0 = Bench::toc(nullptr, "Fhat_nKo     ", t0);
×
1703
    lastsim_Ecirc_nKo = LinearAlgebra::rsolve(m_circ, F_n - lastsim_Fhat_nKo);
×
1704
    t0 = Bench::toc(nullptr, "Ecirc_nKo     ", t0);
×
1705
  }
×
1706

1707
  // Un-normalize simulations
1708
  return y_n;
4✔
1709
}
2✔
1710

1711
LIBKRIGING_EXPORT arma::mat Kriging::rand(const int nsim,
×
1712
                                          const int seed,
1713
                                          const arma::mat& X_n,
1714
                                          const bool will_update) {
1715
  return simulate(nsim, seed, X_n, will_update);
×
1716
}
1717

1718
LIBKRIGING_EXPORT arma::mat Kriging::update_simulate(const arma::vec& y_u, const arma::mat& X_u) {
×
1719
  if (y_u.n_elem != X_u.n_rows)
×
1720
    throw std::runtime_error("Dimension of new data should be the same:\n X: (" + std::to_string(X_u.n_rows) + "x"
×
1721
                             + std::to_string(X_u.n_cols) + "), y: (" + std::to_string(y_u.n_elem) + ")");
×
1722

1723
  if (X_u.n_cols != m_X.n_cols)
×
1724
    throw std::runtime_error("Dimension of new data should be the same:\n X: (...x" + std::to_string(m_X.n_cols)
×
1725
                             + "), new X: (...x" + std::to_string(X_u.n_cols) + ")");
×
1726

1727
  if (lastsim_y_n.is_empty() || lastsim_y_n.n_rows == 0)
×
1728
    throw std::runtime_error("No previous simulation data available");
×
1729

1730
  if (lastsim_Xn_n.n_rows != X_u.n_cols)
×
1731
    throw std::runtime_error("Dimension of new data should be the same:\n X: (...x" + std::to_string(X_u.n_cols)
×
1732
                             + "), last sim X: (...x" + std::to_string(lastsim_Xn_n.n_rows) + ")");
×
1733

1734
  arma::uword n_n = lastsim_Xn_n.n_cols;
×
1735
  arma::uword n_o = m_X.n_rows;
×
1736
  arma::uword d = m_X.n_cols;
×
1737
  arma::mat Xn_o = trans(m_X);    // already normalized if needed
×
1738
  arma::mat Xn_n = lastsim_Xn_n;  // already normalized
×
1739

1740
  arma::uword n_on = n_o + n_n;
×
1741
  // arma::mat Xn_on = arma::join_cols(Xn_o, Xn_n);
1742
  arma::mat F_on = arma::join_cols(m_F, lastsim_F_n);
×
1743

1744
  arma::uword n_u = X_u.n_rows;
×
1745
  // Normalize X_u
1746
  arma::mat Xn_u = X_u;
×
1747
  Xn_u.each_row() -= m_centerX;
×
1748
  Xn_u.each_row() /= m_scaleX;
×
1749

1750
  // Define regression matrix
1751
  arma::mat F_u = Trend::regressionModelMatrix(m_regmodel, Xn_u);
×
1752

1753
  auto t0 = Bench::tic();
×
1754
  Xn_u = trans(Xn_u);
×
1755
  t0 = Bench::toc(nullptr, "Xn_u.t()      ", t0);
×
1756

1757
  bool use_lastsimup
1758
      = (!lastsimup_Xn_u.is_empty()) && arma::approx_equal(lastsimup_Xn_u, Xn_u, "absdiff", arma::datum::eps);
×
1759
  if (!use_lastsimup) {
×
1760
    lastsimup_Xn_u = Xn_u;
×
1761

1762
    // Compute covariance between updated data
1763
    lastsimup_R_uu = arma::mat(n_u, n_u, arma::fill::none);
×
1764
    for (arma::uword i = 0; i < n_u; i++) {
×
1765
      // lastsimup_R_uu.at(i, i) = 1.0;
1766
      for (arma::uword j = 0; j < i; j++) {
×
1767
        lastsimup_R_uu.at(i, j) = lastsimup_R_uu.at(j, i) = _Cov((Xn_u.col(i) - Xn_u.col(j)), m_theta);
×
1768
      }
1769
    }
1770
    lastsimup_R_uu.diag().ones();
×
1771
    t0 = Bench::toc(nullptr, "R_uu          ", t0);
×
1772

1773
    // Compute covariance between updated/old data
1774
    lastsimup_R_uo = arma::mat(n_u, n_o, arma::fill::none);
×
1775
    for (arma::uword i = 0; i < n_u; i++) {
×
1776
      for (arma::uword j = 0; j < n_o; j++) {
×
1777
        lastsimup_R_uo.at(i, j) = _Cov((Xn_u.col(i) - Xn_o.col(j)), m_theta);
×
1778
      }
1779
    }
1780
    t0 = Bench::toc(nullptr, "R_uo          ", t0);
×
1781

1782
    // Compute covariance between updated/new data
1783
    lastsimup_R_un = arma::mat(n_u, n_n, arma::fill::none);
×
1784
    for (arma::uword i = 0; i < n_u; i++) {
×
1785
      for (arma::uword j = 0; j < n_n; j++) {
×
1786
        lastsimup_R_un.at(i, j) = _Cov((Xn_u.col(i) - Xn_n.col(j)), m_theta);
×
1787
      }
1788
    }
1789
    t0 = Bench::toc(nullptr, "R_un          ", t0);
×
1790
  }
1791

1792
  // ======================================================================
1793
  // FOXY step #1 Extend the simulation to the design 'X_u' IF
1794
  // NECESSARY. Remind that the simulation number j is
1795
  // conditional on the given 'y_o' and on 'y_n = Y_sim[ , j]'
1796
  //
1797
  // CAUTION. To avoid unnecessary re-computations we here use
1798
  // auxiliary variables that where computed in the creation of
1799
  // the KM0 step AND later in the simulation. The first ones are
1800
  // in 'theKM0$Extra' and the second ones are in 'Ex'
1801
  //
1802
  // In indices 'C' means comma and 'K' means pipe '|'
1803
  //
1804
  // ======================================================================
1805

1806
  if (!use_lastsimup) {
×
1807
    arma::mat R_onCu = arma::join_rows(lastsimup_R_uo, lastsimup_R_un).t();
×
1808
    arma::mat Rstar_onCu = LinearAlgebra::solve(lastsim_L_on, R_onCu);
×
1809
    t0 = Bench::toc(nullptr, "Rstar_onCu          ", t0);
×
1810

1811
    arma::mat Ecirc_uKon = LinearAlgebra::rsolve(lastsim_circ_on, F_u - Rstar_onCu.t() * lastsim_Fstar_on);
×
1812
    t0 = Bench::toc(nullptr, "Ecirc_uKon          ", t0);
×
1813

1814
    arma::mat Sigma_uKon = lastsimup_R_uu - Rstar_onCu.t() * Rstar_onCu + Ecirc_uKon * Ecirc_uKon.t();
×
1815
    t0 = Bench::toc(nullptr, "Sigma_uKon          ", t0);
×
1816

1817
    arma::mat LSigma_uKon = LinearAlgebra::safe_chol_lower(Sigma_uKon);
×
1818
    t0 = Bench::toc(nullptr, "LSigma_uKon          ", t0);
×
1819

1820
    arma::mat W_uCon = (R_onCu.t() + Ecirc_uKon * lastsim_Fcirc_on.t()) * lastsim_Rinv_on;
×
1821
    t0 = Bench::toc(nullptr, "W_uCon          ", t0);
×
1822

1823
    arma::mat m_u = W_uCon.head_cols(n_o) * m_y;
×
1824
    arma::mat M_u = arma::repmat(m_u, 1, lastsim_nsim) + W_uCon.tail_cols(n_n) * lastsim_y_n;
×
1825

1826
    Random::reset_seed(lastsim_seed);
×
1827
    lastsimup_y_u = M_u + LSigma_uKon * Random::randn_mat(n_u, lastsim_nsim) * std::sqrt(m_sigma2);
×
1828
    t0 = Bench::toc(nullptr, "y_u          ", t0);
×
1829
  }
×
1830

1831
  // ======================================================================
1832
  // FOXY step #2   Update the simulated paths on 'X_n'
1833
  // ======================================================================
1834

1835
  if (!use_lastsimup) {
×
1836
    arma::mat Rstar_ou = LinearAlgebra::solve(m_T, lastsimup_R_uo.t());
×
1837
    t0 = Bench::toc(nullptr, "Rstar_ou          ", t0);
×
1838

1839
    arma::mat Fhat_uKo = Rstar_ou.t() * m_M;
×
1840
    arma::mat Ecirc_uKo = LinearAlgebra::rsolve(m_circ, F_u - Fhat_uKo);
×
1841
    t0 = Bench::toc(nullptr, "Ecirc_uKo          ", t0);
×
1842

1843
    arma::mat Rtild_uCu = lastsimup_R_uu - Rstar_ou.t() * Rstar_ou + Ecirc_uKo * Ecirc_uKo.t();
×
1844
    t0 = Bench::toc(nullptr, "Rtild_uCu          ", t0);
×
1845

1846
    arma::mat Rtild_nCu = lastsimup_R_un - Rstar_ou.t() * lastsim_L_oCn + Ecirc_uKo * lastsim_Ecirc_nKo.t();
×
1847
    t0 = Bench::toc(nullptr, "Rtild_nCu          ", t0);
×
1848

1849
    lastsimup_Wtild_nKu = LinearAlgebra::solve(Rtild_uCu, Rtild_nCu).t();
×
1850
    t0 = Bench::toc(nullptr, "Wtild_nKu          ", t0);
×
1851
  }
×
1852

1853
  return lastsim_y_n + lastsimup_Wtild_nKu * (arma::repmat(y_u, 1, lastsim_nsim) - lastsimup_y_u);
×
1854
}
×
1855

1856
LIBKRIGING_EXPORT arma::mat Kriging::update_rand(const arma::vec& y_u, const arma::mat& X_u) {
×
1857
  return update_simulate(y_u, X_u);
×
1858
}
1859

1860
/** Add new conditional data points to previous (X,y), then perform new fit.
1861
 * @param y_u is n_u length column vector of new output
1862
 * @param X_u is n_u*d matrix of new input
1863
 * @param refit is true if we want to re-fit the model
1864
 */
1865
LIBKRIGING_EXPORT void Kriging::update(const arma::vec& y_u, const arma::mat& X_u, const bool refit) {
3✔
1866
  if (y_u.n_elem != X_u.n_rows)
3✔
1867
    throw std::runtime_error("Dimension of new data should be the same:\n X: (" + std::to_string(X_u.n_rows) + "x"
×
1868
                             + std::to_string(X_u.n_cols) + "), y: (" + std::to_string(y_u.n_elem) + ")");
×
1869

1870
  if (X_u.n_cols != m_X.n_cols)
3✔
1871
    throw std::runtime_error("Dimension of new data should be the same:\n X: (...x" + std::to_string(m_X.n_cols)
×
1872
                             + "), new X: (...x" + std::to_string(X_u.n_cols) + ")");
×
1873

1874
  // rebuild starting parameters
1875
  Parameters parameters;
3✔
1876
  if (refit) {  // re-fit
3✔
1877
    if (m_est_beta)
3✔
1878
      parameters = Parameters{std::make_optional(this->m_sigma2 * this->m_scaleY * this->m_scaleY),
12✔
1879
                              this->m_est_sigma2,
3✔
1880
                              std::make_optional(trans(this->m_theta) % this->m_scaleX),
12✔
1881
                              this->m_est_theta,
3✔
1882
                              std::make_optional(arma::ones<arma::vec>(0)),
6✔
1883
                              true};
3✔
1884
    else
1885
      parameters = Parameters{std::make_optional(this->m_sigma2 * this->m_scaleY * this->m_scaleY),
×
1886
                              this->m_est_sigma2,
×
1887
                              std::make_optional(trans(this->m_theta) % this->m_scaleX),
×
1888
                              this->m_est_theta,
×
1889
                              std::make_optional(trans(this->m_beta) * this->m_scaleY),
×
1890
                              false};
×
1891
    this->fit(arma::join_cols(m_y * this->m_scaleY + this->m_centerY,
6✔
1892
                              y_u),  // de-normalize previous data according to suite unnormed new data
1893
              arma::join_cols((m_X.each_row() % this->m_scaleX).each_row() + this->m_centerX, X_u),
9✔
1894
              m_regmodel,
3✔
1895
              m_normalize,
3✔
1896
              m_optim,
3✔
1897
              m_objective,
3✔
1898
              parameters);
1899
  } else {  // just update
1900
    parameters = Parameters{std::make_optional(this->m_sigma2 * this->m_scaleY * this->m_scaleY),
×
1901
                            false,
1902
                            std::make_optional(trans(arma::mat(this->m_theta)) % this->m_scaleX),
×
1903
                            false,
1904
                            std::make_optional(arma::vec(this->m_beta) * this->m_scaleY),
×
1905
                            false};
×
1906
    this->fit(arma::join_cols(m_y * this->m_scaleY + this->m_centerY,
×
1907
                              y_u),  // de-normalize previous data according to suite unnormed new data
1908
              arma::join_cols((m_X.each_row() % this->m_scaleX).each_row() + this->m_centerX, X_u),
×
1909
              m_regmodel,
×
1910
              m_normalize,
×
1911
              "none",
1912
              m_objective,
×
1913
              parameters);
1914
  }
1915
}
3✔
1916

1917
LIBKRIGING_EXPORT std::string Kriging::summary() const {
10✔
1918
  std::ostringstream oss;
10✔
1919
  auto vec_printer = [&oss](const arma::vec& v) {
20✔
1920
    v.for_each([&oss, i = 0](const arma::vec::elem_type& val) mutable {
20✔
1921
      if (i++ > 0)
20✔
1922
        oss << ", ";
×
1923
      oss << val;
20✔
1924
    });
20✔
1925
  };
30✔
1926

1927
  if (m_X.is_empty() || m_X.n_rows == 0) {  // not yet fitted
20✔
1928
    oss << "* covariance:\n";
×
1929
    oss << "  * kernel: " << m_covType << "\n";
×
1930
  } else {
1931
    oss << "* data";
10✔
1932
    oss << ((m_normalize) ? " (normalized): " : ": ") << m_X.n_rows << "x";
10✔
1933
    arma::rowvec Xmins = arma::min(m_X, 0);
20✔
1934
    arma::rowvec Xmaxs = arma::max(m_X, 0);
20✔
1935
    for (arma::uword i = 0; i < m_X.n_cols; i++) {
20✔
1936
      oss << "[" << Xmins[i] << "," << Xmaxs[i] << "]";
30✔
1937
      if (i < m_X.n_cols - 1)
10✔
1938
        oss << ",";
×
1939
    }
1940
    oss << " -> " << m_y.n_elem << "x[" << arma::min(m_y) << "," << arma::max(m_y) << "]\n";
10✔
1941
    oss << "* trend " << Trend::toString(m_regmodel);
10✔
1942
    oss << ((m_est_beta) ? " (est.): " : ": ");
10✔
1943
    vec_printer(m_beta);
10✔
1944
    oss << "\n";
10✔
1945
    oss << "* variance";
10✔
1946
    oss << ((m_est_sigma2) ? " (est.): " : ": ");
10✔
1947
    oss << m_sigma2;
10✔
1948
    oss << "\n";
10✔
1949
    oss << "* covariance:\n";
10✔
1950
    oss << "  * kernel: " << m_covType << "\n";
10✔
1951
    oss << "  * range";
10✔
1952
    oss << ((m_est_theta) ? " (est.): " : ": ");
10✔
1953
    vec_printer(m_theta);
10✔
1954
    oss << "\n";
10✔
1955
    oss << "  * fit:\n";
10✔
1956
    oss << "    * objective: " << m_objective << "\n";
10✔
1957
    oss << "    * optim: " << m_optim << "\n";
10✔
1958
  }
10✔
1959
  return oss.str();
20✔
1960
}
10✔
1961

1962
void Kriging::save(const std::string filename) const {
31✔
1963
  nlohmann::json j;
31✔
1964

1965
  j["version"] = 2;
31✔
1966
  j["content"] = "Kriging";
31✔
1967

1968
  // _Cov_pow  &std::function embedded by make_Cov
1969
  j["covType"] = m_covType;
31✔
1970
  j["X"] = to_json(m_X);
31✔
1971
  j["centerX"] = to_json(m_centerX);
31✔
1972
  j["scaleX"] = to_json(m_scaleX);
31✔
1973
  j["y"] = to_json(m_y);
31✔
1974
  j["centerY"] = m_centerY;
31✔
1975
  j["scaleY"] = m_scaleY;
31✔
1976
  j["normalize"] = m_normalize;
31✔
1977
  j["regmodel"] = Trend::toString(m_regmodel);
31✔
1978
  j["optim"] = m_optim;
31✔
1979
  j["objective"] = m_objective;
31✔
1980
  // Auxiliary data
1981
  j["dX"] = to_json(m_dX);
31✔
1982
  j["maxdX"] = to_json(m_maxdX);
31✔
1983
  j["F"] = to_json(m_F);
31✔
1984
  j["T"] = to_json(m_T);
31✔
1985
  j["R"] = to_json(m_R);
31✔
1986
  j["M"] = to_json(m_M);
31✔
1987
  j["star"] = to_json(m_star);
31✔
1988
  j["circ"] = to_json(m_circ);
31✔
1989
  j["z"] = to_json(m_z);
31✔
1990
  j["beta"] = to_json(m_beta);
31✔
1991
  j["est_beta"] = m_est_beta;
31✔
1992
  j["theta"] = to_json(m_theta);
31✔
1993
  j["est_theta"] = m_est_theta;
31✔
1994
  j["sigma2"] = m_sigma2;
31✔
1995
  j["est_sigma2"] = m_est_sigma2;
31✔
1996

1997
  std::ofstream f(filename);
31✔
1998
  f << std::setw(4) << j;
31✔
1999
}
31✔
2000

2001
Kriging Kriging::load(const std::string filename) {
31✔
2002
  std::ifstream f(filename);
31✔
2003
  nlohmann::json j = nlohmann::json::parse(f);
31✔
2004

2005
  uint32_t version = j["version"].template get<uint32_t>();
31✔
2006
  if (version != 2) {
31✔
2007
    throw std::runtime_error(asString("Bad version to load from '", filename, "'; found ", version, ", requires 2"));
×
2008
  }
2009
  std::string content = j["content"].template get<std::string>();
31✔
2010
  if (content != "Kriging") {
31✔
2011
    throw std::runtime_error(
×
2012
        asString("Bad content to load from '", filename, "'; found '", content, "', requires 'Kriging'"));
×
2013
  }
2014

2015
  std::string covType = j["covType"].template get<std::string>();
31✔
2016
  Kriging kr(covType);  // _Cov_pow  &std::function embedded by make_Cov
31✔
2017

2018
  kr.m_X = mat_from_json(j["X"]);
31✔
2019
  kr.m_centerX = rowvec_from_json(j["centerX"]);
31✔
2020
  kr.m_scaleX = rowvec_from_json(j["scaleX"]);
31✔
2021
  kr.m_y = colvec_from_json(j["y"]);
31✔
2022
  kr.m_centerY = j["centerY"].template get<decltype(kr.m_centerY)>();
31✔
2023
  kr.m_scaleY = j["scaleY"].template get<decltype(kr.m_scaleY)>();
31✔
2024
  kr.m_normalize = j["normalize"].template get<decltype(kr.m_normalize)>();
31✔
2025

2026
  std::string model = j["regmodel"].template get<std::string>();
31✔
2027
  kr.m_regmodel = Trend::fromString(model);
31✔
2028

2029
  kr.m_optim = j["optim"].template get<decltype(kr.m_optim)>();
31✔
2030
  kr.m_objective = j["objective"].template get<decltype(kr.m_objective)>();
31✔
2031
  // Auxiliary data
2032
  kr.m_dX = mat_from_json(j["dX"]);
31✔
2033
  kr.m_maxdX = colvec_from_json(j["maxdX"]);
31✔
2034
  kr.m_F = mat_from_json(j["F"]);
31✔
2035
  kr.m_T = mat_from_json(j["T"]);
31✔
2036
  kr.m_R = mat_from_json(j["R"]);
31✔
2037
  kr.m_M = mat_from_json(j["M"]);
31✔
2038
  kr.m_star = mat_from_json(j["star"]);
31✔
2039
  kr.m_circ = mat_from_json(j["circ"]);
31✔
2040
  kr.m_z = colvec_from_json(j["z"]);
31✔
2041
  kr.m_beta = colvec_from_json(j["beta"]);
31✔
2042
  kr.m_est_beta = j["est_beta"].template get<decltype(kr.m_est_beta)>();
31✔
2043
  kr.m_theta = colvec_from_json(j["theta"]);
31✔
2044
  kr.m_est_theta = j["est_theta"].template get<decltype(kr.m_est_theta)>();
31✔
2045
  kr.m_sigma2 = j["sigma2"].template get<decltype(kr.m_sigma2)>();
31✔
2046
  kr.m_est_sigma2 = j["est_sigma2"].template get<decltype(kr.m_est_sigma2)>();
31✔
2047
  kr.m_is_empty = false;
31✔
2048

2049
  return kr;
62✔
2050
}
31✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc