• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

Open-Sn / opensn / 23701943051

28 Mar 2026 11:04PM UTC coverage: 76.084% (+0.06%) from 76.027%
23701943051

push

github

web-flow
Merge pull request #991 from wdhawkins/m2d_d2m_ndarray

Convert m2d/d2m storage from std:vector<std:vector<>> to NDArray.

133 of 145 new or added lines in 7 files covered. (91.72%)

3 existing lines in 1 file now uncovered.

21178 of 27835 relevant lines covered (76.08%)

67247084.49 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

72.56
/framework/math/math.cc
1
// SPDX-FileCopyrightText: 2024 The OpenSn Authors <https://open-sn.github.io/opensn/>
2
// SPDX-License-Identifier: MIT
3

4
#include "framework/math/math.h"
5
#include "framework/runtime.h"
6
#include "framework/logging/log.h"
7
#include <cassert>
8
#include <petscmat.h>
9
#include <petscksp.h>
10
#include <petscblaslapack.h>
11
#include <iostream>
12
#include <iomanip>
13
#include <limits>
14

15
namespace opensn
16
{
17

18
double
19
Factorial(const int x)
30,450,704✔
20
{
21
  double factorial_value = 1.0;
30,450,704✔
22
  for (int i = 2; i <= x; ++i)
53,976,920✔
23
    factorial_value *= i;
23,526,216✔
24

25
  return factorial_value;
30,450,704✔
26
}
27

28
std::pair<double, double>
29
OmegaToPhiThetaSafe(const Vector3& omega)
839,857✔
30
{
31
  // Notes: asin maps [-1,+1] to [-pi/2,+pi/2]
32
  //        acos maps [-1,+1] to [0,pi]
33
  // This mapping requires some logic for determining the azimuthal angle.
34
  //
35
  const auto omega_hat = omega.Normalized();
839,857✔
36

37
  double mu = omega_hat.z;
839,857✔
38
  mu = std::min(mu, 1.0);
839,857✔
39
  mu = std::max(mu, -1.0);
839,857✔
40

41
  double theta = acos(mu);
839,857✔
42

43
  // Handling omega aligned to k_hat
44
  if (std::fabs(omega_hat.z) < 1.0e-16)
839,857✔
45
    return {0.0, theta};
×
46

47
  double cos_phi = omega_hat.x / sin(theta);
839,857✔
48
  cos_phi = std::min(cos_phi, 1.0);
839,857✔
49
  cos_phi = std::max(cos_phi, -1.0);
839,857✔
50

51
  // Computing varphi for NE and NW quadrant
52
  if (omega_hat.y >= 0.0)
839,857✔
53
    return {acos(cos_phi), theta};
421,089✔
54
  // Computing varphi for SE and SW quadrant
55
  else
56
    return {2.0 * M_PI - acos(cos_phi), theta};
418,768✔
57
}
58

59
std::vector<std::vector<double>>
60
Transpose(const std::vector<std::vector<double>>& matrix)
5✔
61
{
62
  // Check if matrix is empty
63
  if (matrix.empty())
5✔
64
  {
65
    throw std::runtime_error("Cannot transpose empty matrix");
1✔
66
  }
67

68
  const size_t m = matrix.size();    // number of rows
4✔
69
  const size_t n = matrix[0].size(); // number of columns
4✔
70

71
  // Verify consistent row dimensions
72
  for (size_t i = 1; i < m; ++i)
9✔
73
  {
74
    if (matrix[i].size() != n)
5✔
75
    {
76
      throw std::runtime_error("Matrix must have consistent row dimensions");
×
77
    }
78
  }
79

80
  // Create transposed matrix with dimensions n x m
81
  std::vector<std::vector<double>> transposed(n, std::vector<double>(m));
8✔
82

83
  // Perform transpose: transposed[j][i] = matrix[i][j]
84
  for (size_t i = 0; i < m; ++i)
13✔
85
  {
86
    for (size_t j = 0; j < n; ++j)
31✔
87
    {
88
      transposed[j][i] = matrix[i][j];
22✔
89
    }
90
  }
91

92
  return transposed;
4✔
93
}
94

95
NDArray<double, 2>
96
Transpose(const NDArray<double, 2>& matrix)
5✔
97
{
98
  if (matrix.empty())
5✔
99
    throw std::runtime_error("Cannot transpose empty matrix");
1✔
100

101
  const auto dims = matrix.dimension();
4✔
102
  if (dims.size() != 2)
4✔
NEW
103
    throw std::runtime_error("Transpose requires a rank-2 matrix");
×
104

105
  const auto m = dims[0];
4✔
106
  const auto n = dims[1];
4✔
107
  NDArray<double, 2> transposed({n, m}, 0.0);
4✔
108

109
  for (size_t i = 0; i < m; ++i)
454✔
110
    for (size_t j = 0; j < n; ++j)
149,960✔
111
      transposed(j, i) = matrix(i, j);
149,510✔
112

113
  return transposed;
4✔
114
}
4✔
115

116
std::vector<std::vector<double>>
117
InvertMatrix(const std::vector<std::vector<double>>& matrix)
5✔
118
{
119
  const auto n = static_cast<PetscInt>(matrix.size());
5✔
120

121
  // Check if matrix is square
122
  if (n == 0)
5✔
123
  {
124
    throw std::runtime_error("Matrix must not be empty for inversion");
1✔
125
  }
126

127
  for (PetscInt i = 0; i < n; ++i)
13✔
128
  {
129
    if (std::cmp_not_equal(matrix[i].size(), n))
9✔
130
    {
131
      throw std::runtime_error("Matrix must be square for inversion");
×
132
    }
133
  }
134

135
  // Use SVD-based pseudo-inverse for numerical stability
136
  // This is more robust than LU factorization for ill-conditioned matrices
137

138
  // Create working arrays for LAPACK SVD (dgesvd)
139
  // A = U * S * V^T, so A^(-1) = V * S^(-1) * U^T
140
  std::vector<double> A_col_major(n * n);
4✔
141
  std::vector<double> U(n * n);
4✔
142
  std::vector<double> VT(n * n);
4✔
143
  std::vector<double> S(n);
4✔
144

145
  // Copy matrix to column-major format for LAPACK
146
  for (PetscInt j = 0; j < n; ++j)
13✔
147
  {
148
    for (PetscInt i = 0; i < n; ++i)
30✔
149
    {
150
      A_col_major[j * n + i] = matrix[i][j];
21✔
151
    }
152
  }
153

154
  // Call LAPACK dgesvd for SVD decomposition
155
  // jobu = 'A' means compute all columns of U
156
  // jobvt = 'A' means compute all rows of V^T
157
  char jobu = 'A';
4✔
158
  char jobvt = 'A';
4✔
159
  auto n_blas = static_cast<PetscBLASInt>(n);
4✔
160
  PetscBLASInt lwork = std::max(PetscBLASInt(1), 5 * n_blas);
4✔
161
  std::vector<double> work(lwork);
8✔
162
  PetscBLASInt info_svd = 0;
4✔
163

164
  // LAPACK SVD: dgesvd
165
  LAPACKgesvd_(&jobu,
4✔
166
               &jobvt,
167
               &n_blas,
168
               &n_blas,
169
               A_col_major.data(),
170
               &n_blas,
171
               S.data(),
172
               U.data(),
173
               &n_blas,
174
               VT.data(),
175
               &n_blas,
176
               work.data(),
177
               &lwork,
178
               &info_svd);
179

180
  if (info_svd != 0)
4✔
181
  {
182
    throw std::runtime_error("SVD decomposition failed with LAPACK error code: " +
×
183
                             std::to_string(info_svd));
×
184
  }
185

186
  // Compute condition number and determine tolerance for singular values
187
  double sigma_max = S[0];
4✔
188
  double sigma_min = S[n - 1];
4✔
189

190
  // Use machine epsilon scaled by matrix size and largest singular value as tolerance
191
  // This is a standard approach for pseudo-inverse computation
192
  const double machine_eps = std::numeric_limits<double>::epsilon();
4✔
193
  const double tol = static_cast<double>(n) * machine_eps * sigma_max;
4✔
194

195
  // Log condition number for debugging ill-conditioned matrices
196
  double condition_number = (sigma_min > tol) ? (sigma_max / sigma_min) : INFINITY;
4✔
197
  if (condition_number > 1.0e10)
4✔
198
  {
199
    log.Log0Warning() << "InvertMatrix: Matrix is ill-conditioned (condition number = "
×
200
                      << std::scientific << std::setprecision(3) << condition_number << ")";
×
201
  }
202

203
  // Compute pseudo-inverse: A^(-1) = V * S^(-1) * U^T
204
  // Since we have V^T from SVD, we need V = (V^T)^T
205
  // Result[i][j] = sum_k V[i][k] * (1/S[k]) * U[j][k]
206
  //              = sum_k VT[k][i] * (1/S[k]) * U[j][k]
207
  // In column-major: VT[k*n + i], U[k*n + j]
208

209
  std::vector<std::vector<double>> inverse(n, std::vector<double>(n, 0.0));
8✔
210

211
  for (PetscInt i = 0; i < n; ++i)
13✔
212
  {
213
    for (PetscInt j = 0; j < n; ++j)
30✔
214
    {
215
      double sum = 0.0;
216
      for (PetscInt k = 0; k < n; ++k)
72✔
217
      {
218
        // Only include contributions from singular values above tolerance
219
        if (S[k] > tol)
51✔
220
        {
221
          double v_ik = VT[k + i * n];
51✔
222
          double u_jk = U[j + k * n];
51✔
223
          sum += v_ik * (1.0 / S[k]) * u_jk;
51✔
224
        }
225
      }
226
      inverse[i][j] = sum;
21✔
227
    }
228
  }
229

230
  return inverse;
4✔
231
}
4✔
232

233
NDArray<double, 2>
234
InvertMatrix(const NDArray<double, 2>& matrix)
5✔
235
{
236
  if (matrix.empty())
5✔
237
    throw std::runtime_error("Matrix must not be empty for inversion");
1✔
238

239
  const auto dims = matrix.dimension();
4✔
240
  if (dims.size() != 2 or dims[0] != dims[1])
4✔
NEW
241
    throw std::runtime_error("Matrix must be square for inversion");
×
242

243
  const size_t n = dims[0];
4✔
244

245
  std::vector<double> A_col_major(n * n);
4✔
246
  std::vector<double> U(n * n);
4✔
247
  std::vector<double> VT(n * n);
4✔
248
  std::vector<double> S(n);
4✔
249

250
  for (size_t j = 0; j < n; ++j)
454✔
251
    for (size_t i = 0; i < n; ++i)
149,958✔
252
      A_col_major[j * n + i] = matrix(i, j);
149,508✔
253

254
  char jobu = 'A';
4✔
255
  char jobvt = 'A';
4✔
256
  auto n_blas = static_cast<PetscBLASInt>(n);
4✔
257
  PetscBLASInt lwork = std::max(PetscBLASInt(1), 5 * n_blas);
4✔
258
  std::vector<double> work(lwork);
4✔
259
  PetscBLASInt info_svd = 0;
4✔
260

261
  LAPACKgesvd_(&jobu,
4✔
262
               &jobvt,
263
               &n_blas,
264
               &n_blas,
265
               A_col_major.data(),
266
               &n_blas,
267
               S.data(),
268
               U.data(),
269
               &n_blas,
270
               VT.data(),
271
               &n_blas,
272
               work.data(),
273
               &lwork,
274
               &info_svd);
275

276
  if (info_svd != 0)
4✔
NEW
277
    throw std::runtime_error("SVD decomposition failed with LAPACK error code: " +
×
NEW
278
                             std::to_string(info_svd));
×
279

280
  const auto sigma_max = S[0];
4✔
281
  const auto sigma_min = S[n - 1];
4✔
282
  const auto machine_eps = std::numeric_limits<double>::epsilon();
4✔
283
  const auto tol = static_cast<double>(n) * machine_eps * sigma_max;
4✔
284

285
  const double condition_number = (sigma_min > tol) ? (sigma_max / sigma_min) : INFINITY;
4✔
286
  if (condition_number > 1.0e10)
4✔
287
  {
NEW
288
    log.Log0Warning() << "InvertMatrix: Matrix is ill-conditioned (condition number = "
×
NEW
289
                      << std::scientific << std::setprecision(3) << condition_number << ")";
×
290
  }
291

292
  NDArray<double, 2> inverse({n, n}, 0.0);
4✔
293
  for (size_t i = 0; i < n; ++i)
454✔
294
  {
295
    for (size_t j = 0; j < n; ++j)
149,958✔
296
    {
297
      double sum = 0.0;
298
      for (size_t k = 0; k < n; ++k)
56,838,156✔
299
      {
300
        if (S[k] > tol)
56,688,648✔
301
        {
302
          const auto v_ik = VT[k + i * n];
56,688,648✔
303
          const auto u_jk = U[j + k * n];
56,688,648✔
304
          sum += v_ik * (1.0 / S[k]) * u_jk;
56,688,648✔
305
        }
306
      }
307
      inverse(i, j) = sum;
149,508✔
308
    }
309
  }
310

311
  return inverse;
4✔
312
}
4✔
313

314
std::vector<std::vector<double>>
315
OrthogonalizeMatrixSpan(const std::vector<std::vector<double>>& matrix,
4✔
316
                        const std::vector<double>& weights)
317
{
318
  // Check an empty matrix
319
  if (matrix.empty())
4✔
320
  {
321
    throw std::runtime_error("Cannot orthogonalize empty matrix");
1✔
322
  }
323

324
  const size_t m = matrix.size();
3✔
325
  const size_t n = matrix[0].size();
3✔
326

327
  // Verify rectangular matrix
328
  for (size_t i = 1; i < m; ++i)
8✔
329
  {
330
    if (matrix[i].size() != n)
5✔
331
    {
332
      throw std::runtime_error("Matrix must have consistent column dimensions");
×
333
    }
334
  }
335

336
  // Create working copy of the matrix - we will orthogonalize its columns in place
337
  std::vector<std::vector<double>> Q = matrix;
3✔
338

339
  // Orthogonalize columns
340
  for (size_t j = 0; j < n; ++j)
8✔
341
  {
342
    // Two passes of orthogonalization for numerical stability
343
    for (int pass = 0; pass < 2; ++pass)
15✔
344
    {
345
      // Orthogonalize column j against all previous columns
346
      for (size_t i = 0; i < j; ++i)
14✔
347
      {
348
        // Compute weighted inner product <Q[:,i], Q[:,j]>
349
        double dot_product = 0.0;
350
        for (size_t row = 0; row < m; ++row)
16✔
351
        {
352
          dot_product += weights[row] * Q[row][i] * Q[row][j];
12✔
353
        }
354

355
        // Subtract projection: Q[:,j] -= dot_product * Q[:,i]
356
        for (size_t row = 0; row < m; ++row)
16✔
357
        {
358
          Q[row][j] -= dot_product * Q[row][i];
12✔
359
        }
360
      }
361
    }
362

363
    // Normalize column j under weighted inner product
364
    double norm_squared = 0.0;
365
    for (size_t row = 0; row < m; ++row)
19✔
366
    {
367
      norm_squared += weights[row] * Q[row][j] * Q[row][j];
14✔
368
    }
369
    double norm = std::sqrt(norm_squared);
5✔
370

371
    if (norm > 1e-14)
5✔
372
    {
373
      for (size_t row = 0; row < m; ++row)
19✔
374
      {
375
        Q[row][j] /= norm;
14✔
376
      }
377
    }
378
  }
379
  return Q;
3✔
380
}
381

382
NDArray<double, 2>
383
OrthogonalizeMatrixSpan(const NDArray<double, 2>& matrix, const std::vector<double>& weights)
8✔
384
{
385
  if (matrix.empty())
8✔
386
    throw std::runtime_error("Cannot orthogonalize empty matrix");
1✔
387

388
  const auto dims = matrix.dimension();
7✔
389
  if (dims.size() != 2)
7✔
NEW
390
    throw std::runtime_error("OrthogonalizeMatrixSpan requires a rank-2 matrix");
×
391

392
  const size_t m = dims[0];
7✔
393
  const size_t n = dims[1];
7✔
394
  if (weights.size() != m)
7✔
NEW
395
    throw std::runtime_error("OrthogonalizeMatrixSpan weight size mismatch");
×
396

397
  NDArray<double, 2> Q(matrix);
7✔
398

399
  for (size_t j = 0; j < n; ++j)
347✔
400
  {
401
    for (int pass = 0; pass < 2; ++pass)
1,020✔
402
    {
403
      for (size_t i = 0; i < j; ++i)
32,186✔
404
      {
405
        double dot_product = 0.0;
406
        for (size_t row = 0; row < m; ++row)
11,261,720✔
407
          dot_product += weights[row] * Q(row, i) * Q(row, j);
11,230,214✔
408

409
        for (size_t row = 0; row < m; ++row)
11,261,720✔
410
          Q(row, j) -= dot_product * Q(row, i);
11,230,214✔
411
      }
412
    }
413

414
    double norm_squared = 0.0;
415
    for (size_t row = 0; row < m; ++row)
96,346✔
416
      norm_squared += weights[row] * Q(row, j) * Q(row, j);
96,006✔
417
    const double norm = std::sqrt(norm_squared);
340✔
418

419
    if (norm > 1e-14)
340✔
420
      for (size_t row = 0; row < m; ++row)
96,346✔
421
        Q(row, j) /= norm;
96,006✔
422
  }
423

424
  return Q;
7✔
425
}
7✔
426

427
void
428
PrintVector(const std::vector<double>& x)
×
429
{
430
  for (const auto& xi : x)
×
431
    std::cout << xi << ' ';
×
432
  std::cout << std::endl;
×
433
}
×
434

435
void
436
Scale(std::vector<double>& x, const double& val)
170✔
437
{
438
  for (double& xi : x)
5,629,938✔
439
    xi *= val;
5,629,768✔
440
}
170✔
441

442
void
443
Set(std::vector<double>& x, const double& val)
×
444
{
445
  for (double& xi : x)
×
446
    xi = val;
×
447
}
×
448

449
double
450
Dot(const std::vector<double>& x, const std::vector<double>& y)
×
451
{
452
  // Error Checks
453
  assert(!x.empty());
×
454
  assert(!y.empty());
×
455
  assert(x.size() == y.size());
×
456
  // Local Variables
457
  size_t n = x.size();
×
458
  double val = 0.0;
×
459

460
  for (size_t i = 0; i != n; ++i)
×
461
    val += x[i] * y[i];
×
462

463
  return val;
×
464
}
465

466
std::vector<double>
467
Mult(const std::vector<double>& x, const double& val)
×
468
{
469
  size_t n = x.size();
×
470
  std::vector<double> y(n);
×
471

472
  for (size_t i = 0; i != n; ++i)
×
473
    y[i] = val * x[i];
×
474

475
  return y;
×
476
}
477

478
double
479
L1Norm(const std::vector<double>& x)
×
480
{
481
  // Local Variables
482
  size_t n = x.size();
×
483
  double val = 0.0;
×
484

485
  for (size_t i = 0; i != n; ++i)
×
486
    val += std::fabs(x[i]);
×
487

488
  return val;
×
489
}
490

491
double
492
L2Norm(const std::vector<double>& x)
×
493
{
494
  // Local Variables
495
  size_t n = x.size();
×
496
  double val = 0.0;
×
497

498
  for (size_t i = 0; i != n; ++i)
×
499
    val += x[i] * x[i];
×
500

501
  return std::sqrt(val);
×
502
}
503

504
double
505
LInfNorm(const std::vector<double>& x)
×
506
{
507
  // Local Variables
508
  size_t n = x.size();
×
509
  double val = 0.0;
×
510

511
  for (size_t i = 0; i != n; ++i)
×
512
    val += std::max(std::fabs(x[i]), val);
×
513

514
  return val;
×
515
}
516

517
double
518
LpNorm(const std::vector<double>& x, const double& p)
×
519
{
520
  // Local Variables
521
  size_t n = x.size();
×
522
  double val = 0.0;
×
523

524
  for (size_t i = 0; i != n; ++i)
×
525
    val += std::pow(std::fabs(x[i]), p);
×
526

527
  return std::pow(val, 1.0 / p);
×
528
}
529

530
std::vector<double>
531
operator+(const std::vector<double>& a, const std::vector<double>& b)
10,128✔
532
{
533
  assert(a.size() == b.size());
10,128✔
534
  std::vector<double> result(a.size(), 0.0);
10,128✔
535

536
  for (size_t i = 0; i < a.size(); ++i)
36,976,528✔
537
    result[i] = a[i] + b[i];
36,966,400✔
538

539
  return result;
10,128✔
540
}
541

542
std::vector<double>
543
operator-(const std::vector<double>& a, const std::vector<double>& b)
2,600✔
544
{
545
  assert(a.size() == b.size());
2,600✔
546
  std::vector<double> result(a.size(), 0.0);
2,600✔
547

548
  for (size_t i = 0; i < a.size(); ++i)
8,322,600✔
549
    result[i] = a[i] - b[i];
8,320,000✔
550

551
  return result;
2,600✔
552
}
553

554
double
555
ComputeL2Change(std::vector<double>& x, std::vector<double>& y)
×
556
{
557
  assert(x.size() == y.size());
×
558

559
  double norm = 0.0;
×
560
  for (auto i = 0; i < x.size(); ++i)
×
561
  {
562
    double val = x[i] - y[i];
×
563
    norm += val * val;
×
564
  }
565

566
  double global_norm = 0.0;
×
567
  mpi_comm.all_reduce<double>(norm, global_norm, mpi::op::sum<double>());
×
568

569
  return std::sqrt(global_norm);
×
570
}
571

572
double
573
ComputePointwiseChange(std::vector<double>& x, std::vector<double>& y)
148,452✔
574
{
575
  assert(x.size() == y.size());
148,452✔
576

577
  double pw_change = 0.0;
148,452✔
578
  for (auto i = 0; i < x.size(); ++i)
102,010,868✔
579
  {
580
    double max = std::max(x[i], y[i]);
101,862,416✔
581
    double delta = std::fabs(x[i] - y[i]);
101,862,416✔
582
    if (max >= std::numeric_limits<double>::min())
101,862,416✔
583
      pw_change = std::max(delta / max, pw_change);
79,080,716✔
584
    else
585
      pw_change = std::max(delta, pw_change);
22,781,700✔
586
  }
587

588
  double global_pw_change = 0.0;
148,452✔
589
  mpi_comm.all_reduce<double>(pw_change, global_pw_change, mpi::op::max<double>());
148,452✔
590

591
  return global_pw_change;
148,452✔
592
}
593

594
} // namespace opensn
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc