• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 22261613902

21 Feb 2026 06:02PM UTC coverage: 81.808% (+0.05%) from 81.763%
22261613902

Pull #3474

github

web-flow
Merge aea47008a into 139907c95
Pull Request #3474: Support arbitrary symmetry axis for CylindricalIndependent class

17351 of 24448 branches covered (70.97%)

Branch coverage included in aggregate %.

50 of 64 new or added lines in 2 files covered. (78.13%)

2915 existing lines in 63 files now uncovered.

57697 of 67289 relevant lines covered (85.75%)

45481208.67 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

72.28
/src/distribution.cpp
1
#include "openmc/distribution.h"
2

3
#include <algorithm> // for copy
4
#include <array>
5
#include <cmath>     // for sqrt, floor, max
6
#include <iterator>  // for back_inserter
7
#include <numeric>   // for accumulate
8
#include <stdexcept> // for runtime_error
9
#include <string>    // for string, stod
10

11
#include "openmc/constants.h"
12
#include "openmc/error.h"
13
#include "openmc/math_functions.h"
14
#include "openmc/random_dist.h"
15
#include "openmc/random_lcg.h"
16
#include "openmc/xml_interface.h"
17

18
namespace openmc {
19

20
//==============================================================================
21
// Helper function for computing importance weights from biased sampling
22
//==============================================================================
23

24
vector<double> compute_importance_weights(
10✔
25
  const vector<double>& p, const vector<double>& b)
26
{
27
  std::size_t n = p.size();
10✔
28

29
  // Normalize original probabilities
30
  double sum_p = std::accumulate(p.begin(), p.end(), 0.0);
10✔
31
  vector<double> p_norm(n);
10✔
32
  for (std::size_t i = 0; i < n; ++i) {
40✔
33
    p_norm[i] = p[i] / sum_p;
30✔
34
  }
35

36
  // Normalize bias probabilities
37
  double sum_b = std::accumulate(b.begin(), b.end(), 0.0);
10✔
38
  vector<double> b_norm(n);
10✔
39
  for (std::size_t i = 0; i < n; ++i) {
40✔
40
    b_norm[i] = b[i] / sum_b;
30✔
41
  }
42

43
  // Compute importance weights
44
  vector<double> weights(n);
10✔
45
  for (std::size_t i = 0; i < n; ++i) {
40✔
46
    weights[i] = (b_norm[i] == 0.0) ? INFTY : p_norm[i] / b_norm[i];
30!
47
  }
48
  return weights;
20✔
49
}
10✔
50

51
std::pair<double, double> Distribution::sample(uint64_t* seed) const
779,916,725✔
52
{
53
  if (bias_) {
779,916,725✔
54
    // Sample from the bias distribution and compute importance weight
55
    double val = bias_->sample_unbiased(seed);
650,029✔
56
    double wgt = this->evaluate(val) / bias_->evaluate(val);
650,029✔
57
    return {val, wgt};
650,029✔
58
  } else {
59
    // Unbiased sampling: return sampled value with weight 1.0
60
    double val = sample_unbiased(seed);
779,266,696✔
61
    return {val, 1.0};
779,266,696✔
62
  }
63
}
64

65
// PDF evaluation not supported for all distribution types
66
double Distribution::evaluate(double x) const
×
67
{
68
  throw std::runtime_error(
×
69
    "PDF evaluation not implemented for this distribution type.");
×
70
}
71

72
void Distribution::read_bias_from_xml(pugi::xml_node node)
7,785✔
73
{
74
  if (check_for_node(node, "bias")) {
7,785✔
75
    pugi::xml_node bias_node = node.child("bias");
70✔
76

77
    if (check_for_node(bias_node, "bias")) {
70!
78
      openmc::fatal_error(
×
79
        "Distribution has a bias distribution with its own bias distribution. "
80
        "Please ensure bias distributions do not have their own bias.");
81
    }
82

83
    UPtrDist bias = distribution_from_xml(bias_node);
70✔
84
    this->set_bias(std::move(bias));
70✔
85
  }
70✔
86
}
7,785✔
87

88
//==============================================================================
89
// DiscreteIndex implementation
90
//==============================================================================
91

92
DiscreteIndex::DiscreteIndex(pugi::xml_node node)
×
93
{
94
  auto params = get_node_array<double>(node, "parameters");
×
95
  std::size_t n = params.size() / 2;
×
96

97
  assign({params.data() + n, n});
×
98
}
×
99

100
DiscreteIndex::DiscreteIndex(span<const double> p)
46,587✔
101
{
102
  assign(p);
46,587✔
103
}
46,587✔
104

105
void DiscreteIndex::assign(span<const double> p)
79,642✔
106
{
107
  prob_.assign(p.begin(), p.end());
79,642✔
108
  this->init_alias();
79,642✔
109
}
79,642✔
110

111
void DiscreteIndex::init_alias()
79,642✔
112
{
113
  normalize();
79,642✔
114

115
  // The initialization and sampling method is based on Vose
116
  // (DOI: 10.1109/32.92917)
117
  // Vectors for large and small probabilities based on 1/n
118
  vector<size_t> large;
79,642✔
119
  vector<size_t> small;
79,642✔
120

121
  size_t n = prob_.size();
79,642✔
122

123
  // Set and allocate memory
124
  alias_.assign(n, 0);
79,642✔
125

126
  // Fill large and small vectors based on 1/n
127
  for (size_t i = 0; i < n; i++) {
1,521,507✔
128
    prob_[i] *= n;
1,441,865✔
129
    if (prob_[i] > 1.0) {
1,441,865✔
130
      large.push_back(i);
215,691✔
131
    } else {
132
      small.push_back(i);
1,226,174✔
133
    }
134
  }
135

136
  while (!large.empty() && !small.empty()) {
1,367,508✔
137
    int j = small.back();
1,287,866✔
138
    int k = large.back();
1,287,866✔
139

140
    // Remove last element of small
141
    small.pop_back();
1,287,866✔
142

143
    // Update probability and alias based on Vose's algorithm
144
    prob_[k] += prob_[j] - 1.0;
1,287,866✔
145
    alias_[j] = k;
1,287,866✔
146

147
    // Move large index to small vector, if it is no longer large
148
    if (prob_[k] < 1.0) {
1,287,866✔
149
      small.push_back(k);
207,842✔
150
      large.pop_back();
207,842✔
151
    }
152
  }
153
}
79,642✔
154

155
size_t DiscreteIndex::sample(uint64_t* seed) const
63,629,854✔
156
{
157
  // Alias sampling of discrete distribution
158
  size_t n = prob_.size();
63,629,854✔
159
  if (n > 1) {
63,629,854✔
160
    size_t u = prn(seed) * n;
16,202,266✔
161
    if (prn(seed) < prob_[u]) {
16,202,266✔
162
      return u;
9,631,040✔
163
    } else {
164
      return alias_[u];
6,571,226✔
165
    }
166
  } else {
167
    return 0;
47,427,588✔
168
  }
169
}
170

171
void DiscreteIndex::normalize()
79,642✔
172
{
173
  // Renormalize density function so that it sums to unity. Note that we save
174
  // the integral of the distribution so that if it is used as part of another
175
  // distribution (e.g., Mixture), we know its relative strength.
176
  integral_ = std::accumulate(prob_.begin(), prob_.end(), 0.0);
79,642✔
177
  for (auto& p_i : prob_) {
1,521,507✔
178
    p_i /= integral_;
1,441,865✔
179
  }
180
}
79,642✔
181

182
//==============================================================================
183
// Discrete implementation
184
//==============================================================================
185

186
Discrete::Discrete(pugi::xml_node node)
25,326✔
187
{
188
  auto params = get_node_array<double>(node, "parameters");
25,326✔
189
  std::size_t n = params.size() / 2;
25,326✔
190

191
  // First half is x values, second half is probabilities
192
  x_.assign(params.begin(), params.begin() + n);
25,326✔
193
  const double* p = params.data() + n;
25,326✔
194

195
  // Check for bias
196
  if (check_for_node(node, "bias")) {
25,326✔
197
    // Get bias probabilities
198
    auto bias_params = get_node_array<double>(node, "bias");
10✔
199
    if (bias_params.size() != n) {
10!
200
      openmc::fatal_error(
×
201
        "Size mismatch: Attempted to bias Discrete distribution with " +
×
202
        std::to_string(n) + " probability entries using a bias with " +
×
203
        std::to_string(bias_params.size()) +
×
204
        " entries. Please ensure distributions have the same size.");
205
    }
206

207
    // Compute importance weights
208
    vector<double> p_vec(p, p + n);
10✔
209
    weight_ = compute_importance_weights(p_vec, bias_params);
10✔
210

211
    // Initialize DiscreteIndex with bias probabilities for sampling
212
    di_.assign(bias_params);
10✔
213
  } else {
10✔
214
    // Unbiased case: weight_ stays empty
215
    di_.assign({p, n});
25,316✔
216
  }
217
}
25,326✔
218

219
Discrete::Discrete(const double* x, const double* p, size_t n) : di_({p, n})
46,587✔
220
{
221
  x_.assign(x, x + n);
46,587✔
222
}
46,587✔
223

224
std::pair<double, double> Discrete::sample(uint64_t* seed) const
57,710,809✔
225
{
226
  size_t idx = di_.sample(seed);
57,710,809✔
227
  double wgt = weight_.empty() ? 1.0 : weight_[idx];
57,710,809✔
228
  return {x_[idx], wgt};
57,710,809✔
229
}
230

231
double Discrete::sample_unbiased(uint64_t* seed) const
×
232
{
233
  size_t idx = di_.sample(seed);
×
234
  return x_[idx];
×
235
}
236

237
//==============================================================================
238
// Uniform implementation
239
//==============================================================================
240

241
Uniform::Uniform(pugi::xml_node node)
309✔
242
{
243
  auto params = get_node_array<double>(node, "parameters");
309✔
244
  if (params.size() != 2) {
309!
245
    fatal_error("Uniform distribution must have two "
×
246
                "parameters specified.");
247
  }
248

249
  a_ = params.at(0);
309✔
250
  b_ = params.at(1);
309✔
251

252
  read_bias_from_xml(node);
309✔
253
}
309✔
254

255
double Uniform::sample_unbiased(uint64_t* seed) const
726,221✔
256
{
257
  return a_ + prn(seed) * (b_ - a_);
726,221✔
258
}
259

260
double Uniform::evaluate(double x) const
350,029✔
261
{
262
  if (x <= a()) {
350,029!
263
    return 0.0;
×
264
  } else if (x >= b()) {
350,029!
265
    return 0.0;
×
266
  } else {
267
    return 1 / (b() - a());
350,029✔
268
  }
269
}
270

271
//==============================================================================
272
// PowerLaw implementation
273
//==============================================================================
274

275
PowerLaw::PowerLaw(pugi::xml_node node)
68✔
276
{
277
  auto params = get_node_array<double>(node, "parameters");
68✔
278
  if (params.size() != 3) {
68!
279
    fatal_error("PowerLaw distribution must have three "
×
280
                "parameters specified.");
281
  }
282

283
  const double a = params.at(0);
68✔
284
  const double b = params.at(1);
68✔
285
  const double n = params.at(2);
68✔
286

287
  offset_ = std::pow(a, n + 1);
68✔
288
  span_ = std::pow(b, n + 1) - offset_;
68✔
289
  ninv_ = 1 / (n + 1);
68✔
290

291
  read_bias_from_xml(node);
68✔
292
}
68✔
293

294
double PowerLaw::evaluate(double x) const
250,029✔
295
{
296
  if (x <= a()) {
250,029!
297
    return 0.0;
×
298
  } else if (x >= b()) {
250,029!
299
    return 0.0;
×
300
  } else {
301
    int pwr = n() + 1;
250,029✔
302
    double norm = pwr / span_;
250,029✔
303
    return norm * std::pow(std::fabs(x), n());
250,029✔
304
  }
305
}
306

307
double PowerLaw::sample_unbiased(uint64_t* seed) const
252,049✔
308
{
309
  return std::pow(offset_ + prn(seed) * span_, ninv_);
252,049✔
310
}
311

312
//==============================================================================
313
// Maxwell implementation
314
//==============================================================================
315

316
Maxwell::Maxwell(pugi::xml_node node)
86✔
317
{
318
  theta_ = std::stod(get_node_value(node, "parameters"));
86✔
319

320
  read_bias_from_xml(node);
86✔
321
}
86✔
322

323
double Maxwell::sample_unbiased(uint64_t* seed) const
203,520✔
324
{
325
  return maxwell_spectrum(theta_, seed);
203,520✔
326
}
327

328
double Maxwell::evaluate(double x) const
200,000✔
329
{
330
  double c = (2.0 / SQRT_PI) * std::pow(theta_, -1.5);
200,000✔
331
  return c * std::sqrt(x) * std::exp(-x / theta_);
200,000✔
332
}
333

334
//==============================================================================
335
// Watt implementation
336
//==============================================================================
337

338
Watt::Watt(pugi::xml_node node)
114✔
339
{
340
  auto params = get_node_array<double>(node, "parameters");
114✔
341
  if (params.size() != 2)
114!
342
    openmc::fatal_error("Watt energy distribution must have two "
×
343
                        "parameters specified.");
344

345
  a_ = params.at(0);
114✔
346
  b_ = params.at(1);
114✔
347

348
  read_bias_from_xml(node);
114✔
349
}
114✔
350

351
double Watt::sample_unbiased(uint64_t* seed) const
12,946,669✔
352
{
353
  return watt_spectrum(a_, b_, seed);
12,946,669✔
354
}
355

356
double Watt::evaluate(double x) const
200,000✔
357
{
358
  double c =
359
    2.0 / (std::sqrt(PI * b_) * std::pow(a_, 1.5) * std::exp(a_ * b_ / 4.0));
200,000✔
360
  return c * std::exp(-x / a_) * std::sinh(std::sqrt(b_ * x));
200,000✔
361
}
362

363
//==============================================================================
364
// Normal implementation
365
//==============================================================================
366

367
Normal::Normal(double mean_value, double std_dev, double lower, double upper)
60✔
368
  : mean_value_ {mean_value}, std_dev_ {std_dev}, lower_ {lower}, upper_ {upper}
60✔
369
{
370
  compute_normalization();
60✔
371
}
60✔
372

373
Normal::Normal(pugi::xml_node node)
40✔
374
{
375
  auto params = get_node_array<double>(node, "parameters");
40✔
376
  if (params.size() != 2 && params.size() != 4) {
40!
UNCOV
377
    openmc::fatal_error("Normal energy distribution must have two "
×
378
                        "parameters (mean, std_dev) or four parameters "
379
                        "(mean, std_dev, lower, upper) specified.");
380
  }
381

382
  mean_value_ = params.at(0);
40✔
383
  std_dev_ = params.at(1);
40✔
384

385
  // Optional truncation bounds
386
  if (params.size() == 4) {
40✔
387
    lower_ = params.at(2);
10✔
388
    upper_ = params.at(3);
10✔
389
  } else {
390
    lower_ = -INFTY;
30✔
391
    upper_ = INFTY;
30✔
392
  }
393

394
  compute_normalization();
40✔
395
  read_bias_from_xml(node);
40✔
396
}
40✔
397

398
void Normal::compute_normalization()
100✔
399
{
400
  // Validate bounds
401
  if (lower_ >= upper_) {
100!
UNCOV
402
    openmc::fatal_error(
×
403
      "Normal distribution lower bound must be less than upper bound.");
404
  }
405

406
  // Check if truncation bounds are finite
407
  is_truncated_ = (lower_ > -INFTY || upper_ < INFTY);
100✔
408

409
  if (is_truncated_) {
100✔
410
    double alpha = (lower_ - mean_value_) / std_dev_;
50✔
411
    double beta = (upper_ - mean_value_) / std_dev_;
50✔
412
    double cdf_diff = standard_normal_cdf(beta) - standard_normal_cdf(alpha);
50✔
413

414
    if (cdf_diff <= 0.0) {
50!
UNCOV
415
      openmc::fatal_error(
×
416
        "Normal distribution truncation bounds exclude entire distribution.");
417
    }
418
    norm_factor_ = 1.0 / cdf_diff;
50✔
419
  } else {
420
    norm_factor_ = 1.0;
50✔
421
  }
422
}
100✔
423

424
double Normal::sample_unbiased(uint64_t* seed) const
300,000✔
425
{
426
  if (!is_truncated_) {
300,000✔
427
    return normal_variate(mean_value_, std_dev_, seed);
200,000✔
428
  }
429

430
  // Rejection sampling for truncated normal
431
  double x;
432
  do {
433
    x = normal_variate(mean_value_, std_dev_, seed);
146,590✔
434
  } while (x < lower_ || x > upper_);
146,590✔
435
  return x;
100,000✔
436
}
437

438
double Normal::evaluate(double x) const
200,110✔
439
{
440
  // Return 0 outside truncation bounds
441
  if (x < lower_ || x > upper_) {
200,110✔
442
    return 0.0;
40✔
443
  }
444

445
  // Standard normal PDF value
446
  double pdf = (1.0 / (std::sqrt(2.0 * PI) * std_dev_)) *
200,070✔
447
               std::exp(-std::pow((x - mean_value_), 2.0) /
200,070✔
448
                        (2.0 * std::pow(std_dev_, 2.0)));
200,070✔
449

450
  // Apply normalization for truncation
451
  return pdf * norm_factor_;
200,070✔
452
}
453

454
//==============================================================================
455
// Tabular implementation
456
//==============================================================================
457

458
Tabular::Tabular(pugi::xml_node node)
7,168✔
459
{
460
  if (check_for_node(node, "interpolation")) {
7,168!
461
    std::string temp = get_node_value(node, "interpolation");
7,168✔
462
    if (temp == "histogram") {
7,168✔
463
      interp_ = Interpolation::histogram;
7,092✔
464
    } else if (temp == "linear-linear") {
76!
465
      interp_ = Interpolation::lin_lin;
76✔
UNCOV
466
    } else if (temp == "log-linear") {
×
UNCOV
467
      interp_ = Interpolation::log_lin;
×
UNCOV
468
    } else if (temp == "log-log") {
×
UNCOV
469
      interp_ = Interpolation::log_log;
×
470
    } else {
UNCOV
471
      openmc::fatal_error(
×
UNCOV
472
        "Unsupported interpolation type for distribution: " + temp);
×
473
    }
474
  } else {
7,168✔
UNCOV
475
    interp_ = Interpolation::histogram;
×
476
  }
477

478
  // Read and initialize tabular distribution. If number of parameters is odd,
479
  // add an extra zero for the 'p' array.
480
  auto params = get_node_array<double>(node, "parameters");
7,168✔
481
  if (params.size() % 2 != 0) {
7,168!
UNCOV
482
    params.push_back(0.0);
×
483
  }
484
  std::size_t n = params.size() / 2;
7,168✔
485
  const double* x = params.data();
7,168✔
486
  const double* p = x + n;
7,168✔
487
  init(x, p, n);
7,168✔
488

489
  read_bias_from_xml(node);
7,168✔
490
}
7,168✔
491

492
Tabular::Tabular(const double* x, const double* p, int n, Interpolation interp,
44,264,094✔
493
  const double* c)
44,264,094✔
494
  : interp_ {interp}
44,264,094✔
495
{
496
  init(x, p, n, c);
44,264,094✔
497
}
44,264,094✔
498

499
void Tabular::init(
44,271,262✔
500
  const double* x, const double* p, std::size_t n, const double* c)
501
{
502
  // Copy x/p arrays into vectors
503
  std::copy(x, x + n, std::back_inserter(x_));
44,271,262✔
504
  std::copy(p, p + n, std::back_inserter(p_));
44,271,262✔
505

506
  // Calculate cumulative distribution function
507
  if (c) {
44,271,262✔
508
    std::copy(c, c + n, std::back_inserter(c_));
44,264,094✔
509
  } else {
510
    c_.resize(n);
7,168✔
511
    c_[0] = 0.0;
7,168✔
512
    for (int i = 1; i < n; ++i) {
88,544✔
513
      if (interp_ == Interpolation::histogram) {
81,376✔
514
        c_[i] = c_[i - 1] + p_[i - 1] * (x_[i] - x_[i - 1]);
81,204✔
515
      } else if (interp_ == Interpolation::lin_lin) {
172!
516
        c_[i] = c_[i - 1] + 0.5 * (p_[i - 1] + p_[i]) * (x_[i] - x_[i - 1]);
172✔
UNCOV
517
      } else if (interp_ == Interpolation::log_lin) {
×
UNCOV
518
        double m = std::log(p_[i] / p_[i - 1]) / (x_[i] - x_[i - 1]);
×
519
        c_[i] = c_[i - 1] + p_[i - 1] * (x_[i] - x_[i - 1]) *
×
520
                              exprel(m * (x_[i] - x_[i - 1]));
×
521
      } else if (interp_ == Interpolation::log_log) {
×
UNCOV
522
        double m = std::log((x_[i] * p_[i]) / (x_[i - 1] * p_[i - 1])) /
×
523
                   std::log(x_[i] / x_[i - 1]);
×
UNCOV
524
        c_[i] = c_[i - 1] + x_[i - 1] * p_[i - 1] *
×
UNCOV
525
                              std::log(x_[i] / x_[i - 1]) *
×
UNCOV
526
                              exprel(m * std::log(x_[i] / x_[i - 1]));
×
527
      } else {
UNCOV
528
        UNREACHABLE();
×
529
      }
530
    }
531
  }
532

533
  // Normalize density and distribution functions. Note that we save the
534
  // integral of the distribution so that if it is used as part of another
535
  // distribution (e.g., Mixture), we know its relative strength.
536
  integral_ = c_[n - 1];
44,271,262✔
537
  for (int i = 0; i < n; ++i) {
635,371,955✔
538
    p_[i] = p_[i] / integral_;
591,100,693✔
539
    c_[i] = c_[i] / integral_;
591,100,693✔
540
  }
541
}
44,271,262✔
542

543
double Tabular::sample_unbiased(uint64_t* seed) const
765,488,266✔
544
{
545
  // Sample value of CDF
546
  double c = prn(seed);
765,488,266✔
547

548
  // Find first CDF bin which is above the sampled value
549
  double c_i = c_[0];
765,488,266✔
550
  int i;
551
  std::size_t n = c_.size();
765,488,266✔
552
  for (i = 0; i < n - 1; ++i) {
2,147,483,647!
553
    if (c <= c_[i + 1])
2,147,483,647✔
554
      break;
765,488,266✔
555
    c_i = c_[i + 1];
2,116,651,462✔
556
  }
557

558
  // Determine bounding PDF values
559
  double x_i = x_[i];
765,488,266✔
560
  double p_i = p_[i];
765,488,266✔
561

562
  if (interp_ == Interpolation::histogram) {
765,488,266✔
563
    // Histogram interpolation
564
    if (p_i > 0.0) {
3,100,570!
565
      return x_i + (c - c_i) / p_i;
3,100,570✔
566
    } else {
UNCOV
567
      return x_i;
×
568
    }
569
  } else if (interp_ == Interpolation::lin_lin) {
762,387,696!
570
    // Linear-linear interpolation
571
    double x_i1 = x_[i + 1];
762,387,696✔
572
    double p_i1 = p_[i + 1];
762,387,696✔
573

574
    double m = (p_i1 - p_i) / (x_i1 - x_i);
762,387,696✔
575
    if (m == 0.0) {
762,387,696✔
576
      return x_i + (c - c_i) / p_i;
307,853,245✔
577
    } else {
578
      return x_i +
579
             (std::sqrt(std::max(0.0, p_i * p_i + 2 * m * (c - c_i))) - p_i) /
454,534,451✔
580
               m;
454,534,451✔
581
    }
582
  } else if (interp_ == Interpolation::log_lin) {
×
583
    // Log-linear interpolation
584
    double x_i1 = x_[i + 1];
×
UNCOV
585
    double p_i1 = p_[i + 1];
×
586

UNCOV
587
    double m = std::log(p_i1 / p_i) / (x_i1 - x_i);
×
UNCOV
588
    double f = (c - c_i) / p_i;
×
UNCOV
589
    return x_i + f * log1prel(m * f);
×
UNCOV
590
  } else if (interp_ == Interpolation::log_log) {
×
591
    // Log-Log interpolation
UNCOV
592
    double x_i1 = x_[i + 1];
×
UNCOV
593
    double p_i1 = p_[i + 1];
×
594

UNCOV
595
    double m = std::log((x_i1 * p_i1) / (x_i * p_i)) / std::log(x_i1 / x_i);
×
UNCOV
596
    double f = (c - c_i) / (p_i * x_i);
×
UNCOV
597
    return x_i * std::exp(f * log1prel(m * f));
×
598
  } else {
UNCOV
599
    UNREACHABLE();
×
600
  }
601
}
602

603
double Tabular::evaluate(double x) const
100,000✔
604
{
605
  int i;
606

607
  if (interp_ == Interpolation::histogram) {
100,000!
608
    i = std::upper_bound(x_.begin(), x_.end(), x) - x_.begin() - 1;
×
609
    if (i < 0 || i >= static_cast<int>(p_.size())) {
×
UNCOV
610
      return 0.0;
×
611
    } else {
UNCOV
612
      return p_[i];
×
613
    }
614
  } else {
615
    i = std::lower_bound(x_.begin(), x_.end(), x) - x_.begin() - 1;
100,000✔
616

617
    if (i < 0 || i >= static_cast<int>(p_.size()) - 1) {
100,000!
618
      return 0.0;
×
619
    } else {
620
      double x0 = x_[i];
100,000✔
621
      double x1 = x_[i + 1];
100,000✔
622
      double p0 = p_[i];
100,000✔
623
      double p1 = p_[i + 1];
100,000✔
624

625
      double t = (x - x0) / (x1 - x0);
100,000✔
626
      return (1 - t) * p0 + t * p1;
100,000✔
627
    }
628
  }
629
}
630

631
//==============================================================================
632
// Equiprobable implementation
633
//==============================================================================
634

UNCOV
635
double Equiprobable::sample_unbiased(uint64_t* seed) const
×
636
{
UNCOV
637
  std::size_t n = x_.size();
×
638

639
  double r = prn(seed);
×
UNCOV
640
  int i = std::floor((n - 1) * r);
×
641

UNCOV
642
  double xl = x_[i];
×
UNCOV
643
  double xr = x_[i + i];
×
UNCOV
644
  return xl + ((n - 1) * r - i) * (xr - xl);
×
645
}
646

UNCOV
647
double Equiprobable::evaluate(double x) const
×
648
{
649
  double x_min = *std::min_element(x_.begin(), x_.end());
×
UNCOV
650
  double x_max = *std::max_element(x_.begin(), x_.end());
×
651

UNCOV
652
  if (x < x_min || x > x_max) {
×
UNCOV
653
    return 0.0;
×
654
  } else {
UNCOV
655
    return 1.0 / (x_max - x_min);
×
656
  }
657
}
658

659
//==============================================================================
660
// Mixture implementation
661
//==============================================================================
662

663
Mixture::Mixture(pugi::xml_node node)
62✔
664
{
665
  vector<double> probabilities;
62✔
666

667
  // First pass: collect distributions and their probabilities
668
  for (pugi::xml_node pair : node.children("pair")) {
228✔
669
    // Check that required data exists
670
    if (!pair.attribute("probability"))
166!
UNCOV
671
      fatal_error("Mixture pair element does not have probability.");
×
672
    if (!pair.child("dist"))
166!
673
      fatal_error("Mixture pair element does not have a distribution.");
×
674

675
    // Get probability and distribution
676
    double p = std::stod(pair.attribute("probability").value());
166✔
677
    auto dist = distribution_from_xml(pair.child("dist"));
166✔
678

679
    // Weight probability by the distribution's integral
680
    double weighted_prob = p * dist->integral();
166✔
681
    probabilities.push_back(weighted_prob);
166✔
682
    distribution_.push_back(std::move(dist));
166✔
683
  }
166✔
684

685
  // Save sum of weighted probabilities
686
  integral_ = std::accumulate(probabilities.begin(), probabilities.end(), 0.0);
62✔
687

688
  std::size_t n = probabilities.size();
62✔
689

690
  // Check for bias
691
  if (check_for_node(node, "bias")) {
62!
692
    // Get bias probabilities
UNCOV
693
    auto bias_params = get_node_array<double>(node, "bias");
×
UNCOV
694
    if (bias_params.size() != n) {
×
UNCOV
695
      openmc::fatal_error(
×
UNCOV
696
        "Size mismatch: Attempted to bias Mixture distribution with " +
×
UNCOV
697
        std::to_string(n) + " components using a bias with " +
×
UNCOV
698
        std::to_string(bias_params.size()) +
×
699
        " entries. Please ensure distributions have the same size.");
700
    }
701

702
    // Compute importance weights
UNCOV
703
    weight_ = compute_importance_weights(probabilities, bias_params);
×
704

705
    // Initialize DiscreteIndex with bias probabilities for sampling
UNCOV
706
    di_.assign(bias_params);
×
UNCOV
707
  } else {
×
708
    // Unbiased case: weight_ stays empty
709
    di_.assign(probabilities);
62✔
710
  }
711
}
62✔
712

713
std::pair<double, double> Mixture::sample(uint64_t* seed) const
203,240✔
714
{
715
  size_t idx = di_.sample(seed);
203,240✔
716

717
  // Sample the chosen distribution
718
  auto [val, sub_wgt] = distribution_[idx]->sample(seed);
203,240✔
719

720
  // Multiply by component selection weight
721
  double mix_wgt = weight_.empty() ? 1.0 : weight_[idx];
203,240!
722
  return {val, mix_wgt * sub_wgt};
203,240✔
723
}
724

UNCOV
725
double Mixture::sample_unbiased(uint64_t* seed) const
×
726
{
UNCOV
727
  size_t idx = di_.sample(seed);
×
UNCOV
728
  return distribution_[idx]->sample(seed).first;
×
729
}
730

731
//==============================================================================
732
// Helper function
733
//==============================================================================
734

735
UPtrDist distribution_from_xml(pugi::xml_node node)
33,153✔
736
{
737
  if (!check_for_node(node, "type"))
33,153!
UNCOV
738
    openmc::fatal_error("Distribution type must be specified.");
×
739

740
  // Determine type of distribution
741
  std::string type = get_node_value(node, "type", true, true);
33,153✔
742

743
  // Allocate extension of Distribution
744
  UPtrDist dist;
33,153✔
745
  if (type == "uniform") {
33,153✔
746
    dist = UPtrDist {new Uniform(node)};
309✔
747
  } else if (type == "powerlaw") {
32,844✔
748
    dist = UPtrDist {new PowerLaw(node)};
68✔
749
  } else if (type == "maxwell") {
32,776✔
750
    dist = UPtrDist {new Maxwell(node)};
86✔
751
  } else if (type == "watt") {
32,690✔
752
    dist = UPtrDist {new Watt(node)};
114✔
753
  } else if (type == "normal") {
32,576✔
754
    dist = UPtrDist {new Normal(node)};
30✔
755
  } else if (type == "discrete") {
32,546✔
756
    dist = UPtrDist {new Discrete(node)};
25,316✔
757
  } else if (type == "tabular") {
7,230✔
758
    dist = UPtrDist {new Tabular(node)};
7,168✔
759
  } else if (type == "mixture") {
62!
760
    dist = UPtrDist {new Mixture(node)};
62✔
UNCOV
761
  } else if (type == "muir") {
×
UNCOV
762
    openmc::fatal_error(
×
763
      "'muir' distributions are now specified using the openmc.stats.muir() "
764
      "function in Python. Please regenerate your XML files.");
765
  } else {
UNCOV
766
    openmc::fatal_error("Invalid distribution type: " + type);
×
767
  }
768
  return dist;
66,306✔
769
}
33,153✔
770

771
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc