• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 21979935674

13 Feb 2026 08:26AM UTC coverage: 81.457% (-0.4%) from 81.851%
21979935674

Pull #3801

github

web-flow
Merge cee3353e4 into bcb939520
Pull Request #3801: avoid need to set particles and batches for dagmc models when calling convert_to_multigroup

16961 of 23617 branches covered (71.82%)

Branch coverage included in aggregate %.

0 of 1 new or added line in 1 file covered. (0.0%)

908 existing lines in 38 files now uncovered.

55977 of 65925 relevant lines covered (84.91%)

39154534.73 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

72.28
/src/distribution.cpp
1
#include "openmc/distribution.h"
2

3
#include <algorithm> // for copy
4
#include <array>
5
#include <cmath>     // for sqrt, floor, max
6
#include <iterator>  // for back_inserter
7
#include <numeric>   // for accumulate
8
#include <stdexcept> // for runtime_error
9
#include <string>    // for string, stod
10

11
#include "openmc/constants.h"
12
#include "openmc/error.h"
13
#include "openmc/math_functions.h"
14
#include "openmc/random_dist.h"
15
#include "openmc/random_lcg.h"
16
#include "openmc/xml_interface.h"
17

18
namespace openmc {
19

20
//==============================================================================
21
// Helper function for computing importance weights from biased sampling
22
//==============================================================================
23

24
vector<double> compute_importance_weights(
8✔
25
  const vector<double>& p, const vector<double>& b)
26
{
27
  std::size_t n = p.size();
8✔
28

29
  // Normalize original probabilities
30
  double sum_p = std::accumulate(p.begin(), p.end(), 0.0);
8✔
31
  vector<double> p_norm(n);
8✔
32
  for (std::size_t i = 0; i < n; ++i) {
32✔
33
    p_norm[i] = p[i] / sum_p;
24✔
34
  }
35

36
  // Normalize bias probabilities
37
  double sum_b = std::accumulate(b.begin(), b.end(), 0.0);
8✔
38
  vector<double> b_norm(n);
8✔
39
  for (std::size_t i = 0; i < n; ++i) {
32✔
40
    b_norm[i] = b[i] / sum_b;
24✔
41
  }
42

43
  // Compute importance weights
44
  vector<double> weights(n);
8✔
45
  for (std::size_t i = 0; i < n; ++i) {
32✔
46
    weights[i] = (b_norm[i] == 0.0) ? INFTY : p_norm[i] / b_norm[i];
24!
47
  }
48
  return weights;
16✔
49
}
8✔
50

51
std::pair<double, double> Distribution::sample(uint64_t* seed) const
622,589,545✔
52
{
53
  if (bias_) {
622,589,545✔
54
    // Sample from the bias distribution and compute importance weight
55
    double val = bias_->sample_unbiased(seed);
519,977✔
56
    double wgt = this->evaluate(val) / bias_->evaluate(val);
519,977✔
57
    return {val, wgt};
519,977✔
58
  } else {
59
    // Unbiased sampling: return sampled value with weight 1.0
60
    double val = sample_unbiased(seed);
622,069,568✔
61
    return {val, 1.0};
622,069,568✔
62
  }
63
}
64

65
// PDF evaluation not supported for all distribution types
66
double Distribution::evaluate(double x) const
×
67
{
68
  throw std::runtime_error(
×
69
    "PDF evaluation not implemented for this distribution type.");
×
70
}
71

72
void Distribution::read_bias_from_xml(pugi::xml_node node)
6,121✔
73
{
74
  if (check_for_node(node, "bias")) {
6,121✔
75
    pugi::xml_node bias_node = node.child("bias");
56✔
76

77
    if (check_for_node(bias_node, "bias")) {
56!
78
      openmc::fatal_error(
×
79
        "Distribution has a bias distribution with its own bias distribution. "
80
        "Please ensure bias distributions do not have their own bias.");
81
    }
82

83
    UPtrDist bias = distribution_from_xml(bias_node);
56✔
84
    this->set_bias(std::move(bias));
56✔
85
  }
56✔
86
}
6,121✔
87

88
//==============================================================================
89
// DiscreteIndex implementation
90
//==============================================================================
91

92
DiscreteIndex::DiscreteIndex(pugi::xml_node node)
×
93
{
94
  auto params = get_node_array<double>(node, "parameters");
×
95
  std::size_t n = params.size() / 2;
×
96

97
  assign({params.data() + n, n});
×
98
}
×
99

100
DiscreteIndex::DiscreteIndex(span<const double> p)
32,341✔
101
{
102
  assign(p);
32,341✔
103
}
32,341✔
104

105
void DiscreteIndex::assign(span<const double> p)
58,347✔
106
{
107
  prob_.assign(p.begin(), p.end());
58,347✔
108
  this->init_alias();
58,347✔
109
}
58,347✔
110

111
void DiscreteIndex::init_alias()
58,347✔
112
{
113
  normalize();
58,347✔
114

115
  // The initialization and sampling method is based on Vose
116
  // (DOI: 10.1109/32.92917)
117
  // Vectors for large and small probabilities based on 1/n
118
  vector<size_t> large;
58,347✔
119
  vector<size_t> small;
58,347✔
120

121
  size_t n = prob_.size();
58,347✔
122

123
  // Set and allocate memory
124
  alias_.assign(n, 0);
58,347✔
125

126
  // Fill large and small vectors based on 1/n
127
  for (size_t i = 0; i < n; i++) {
1,175,976✔
128
    prob_[i] *= n;
1,117,629✔
129
    if (prob_[i] > 1.0) {
1,117,629✔
130
      large.push_back(i);
167,605✔
131
    } else {
132
      small.push_back(i);
950,024✔
133
    }
134
  }
135

136
  while (!large.empty() && !small.empty()) {
1,067,744✔
137
    int j = small.back();
1,009,397✔
138
    int k = large.back();
1,009,397✔
139

140
    // Remove last element of small
141
    small.pop_back();
1,009,397✔
142

143
    // Update probability and alias based on Vose's algorithm
144
    prob_[k] += prob_[j] - 1.0;
1,009,397✔
145
    alias_[j] = k;
1,009,397✔
146

147
    // Move large index to small vector, if it is no longer large
148
    if (prob_[k] < 1.0) {
1,009,397✔
149
      small.push_back(k);
161,423✔
150
      large.pop_back();
161,423✔
151
    }
152
  }
153
}
58,347✔
154

155
size_t DiscreteIndex::sample(uint64_t* seed) const
50,461,503✔
156
{
157
  // Alias sampling of discrete distribution
158
  size_t n = prob_.size();
50,461,503✔
159
  if (n > 1) {
50,461,503✔
160
    size_t u = prn(seed) * n;
12,876,461✔
161
    if (prn(seed) < prob_[u]) {
12,876,461✔
162
      return u;
7,636,760✔
163
    } else {
164
      return alias_[u];
5,239,701✔
165
    }
166
  } else {
167
    return 0;
37,585,042✔
168
  }
169
}
170

171
void DiscreteIndex::normalize()
58,347✔
172
{
173
  // Renormalize density function so that it sums to unity. Note that we save
174
  // the integral of the distribution so that if it is used as part of another
175
  // distribution (e.g., Mixture), we know its relative strength.
176
  integral_ = std::accumulate(prob_.begin(), prob_.end(), 0.0);
58,347✔
177
  for (auto& p_i : prob_) {
1,175,976✔
178
    p_i /= integral_;
1,117,629✔
179
  }
180
}
58,347✔
181

182
//==============================================================================
183
// Discrete implementation
184
//==============================================================================
185

186
Discrete::Discrete(pugi::xml_node node)
19,933✔
187
{
188
  auto params = get_node_array<double>(node, "parameters");
19,933✔
189
  std::size_t n = params.size() / 2;
19,933✔
190

191
  // First half is x values, second half is probabilities
192
  x_.assign(params.begin(), params.begin() + n);
19,933✔
193
  const double* p = params.data() + n;
19,933✔
194

195
  // Check for bias
196
  if (check_for_node(node, "bias")) {
19,933✔
197
    // Get bias probabilities
198
    auto bias_params = get_node_array<double>(node, "bias");
8✔
199
    if (bias_params.size() != n) {
8!
200
      openmc::fatal_error(
×
201
        "Size mismatch: Attempted to bias Discrete distribution with " +
×
202
        std::to_string(n) + " probability entries using a bias with " +
×
203
        std::to_string(bias_params.size()) +
×
204
        " entries. Please ensure distributions have the same size.");
205
    }
206

207
    // Compute importance weights
208
    vector<double> p_vec(p, p + n);
8✔
209
    weight_ = compute_importance_weights(p_vec, bias_params);
8✔
210

211
    // Initialize DiscreteIndex with bias probabilities for sampling
212
    di_.assign(bias_params);
8✔
213
  } else {
8✔
214
    // Unbiased case: weight_ stays empty
215
    di_.assign({p, n});
19,925✔
216
  }
217
}
19,933✔
218

219
Discrete::Discrete(const double* x, const double* p, size_t n) : di_({p, n})
32,341✔
220
{
221
  x_.assign(x, x + n);
32,341✔
222
}
32,341✔
223

224
std::pair<double, double> Discrete::sample(uint64_t* seed) const
45,811,602✔
225
{
226
  size_t idx = di_.sample(seed);
45,811,602✔
227
  double wgt = weight_.empty() ? 1.0 : weight_[idx];
45,811,602✔
228
  return {x_[idx], wgt};
45,811,602✔
229
}
230

231
double Discrete::sample_unbiased(uint64_t* seed) const
×
232
{
233
  size_t idx = di_.sample(seed);
×
234
  return x_[idx];
×
235
}
236

237
//==============================================================================
238
// Uniform implementation
239
//==============================================================================
240

241
Uniform::Uniform(pugi::xml_node node)
235✔
242
{
243
  auto params = get_node_array<double>(node, "parameters");
235✔
244
  if (params.size() != 2) {
235!
245
    fatal_error("Uniform distribution must have two "
×
246
                "parameters specified.");
247
  }
248

249
  a_ = params.at(0);
235✔
250
  b_ = params.at(1);
235✔
251

252
  read_bias_from_xml(node);
235✔
253
}
235✔
254

255
double Uniform::sample_unbiased(uint64_t* seed) const
537,023✔
256
{
257
  return a_ + prn(seed) * (b_ - a_);
537,023✔
258
}
259

260
double Uniform::evaluate(double x) const
279,977✔
261
{
262
  if (x <= a()) {
279,977!
263
    return 0.0;
×
264
  } else if (x >= b()) {
279,977!
265
    return 0.0;
×
266
  } else {
267
    return 1 / (b() - a());
279,977✔
268
  }
269
}
270

271
//==============================================================================
272
// PowerLaw implementation
273
//==============================================================================
274

275
PowerLaw::PowerLaw(pugi::xml_node node)
54✔
276
{
277
  auto params = get_node_array<double>(node, "parameters");
54✔
278
  if (params.size() != 3) {
54!
279
    fatal_error("PowerLaw distribution must have three "
×
280
                "parameters specified.");
281
  }
282

283
  const double a = params.at(0);
54✔
284
  const double b = params.at(1);
54✔
285
  const double n = params.at(2);
54✔
286

287
  offset_ = std::pow(a, n + 1);
54✔
288
  span_ = std::pow(b, n + 1) - offset_;
54✔
289
  ninv_ = 1 / (n + 1);
54✔
290

291
  read_bias_from_xml(node);
54✔
292
}
54✔
293

294
double PowerLaw::evaluate(double x) const
199,977✔
295
{
296
  if (x <= a()) {
199,977!
297
    return 0.0;
×
298
  } else if (x >= b()) {
199,977!
299
    return 0.0;
×
300
  } else {
301
    int pwr = n() + 1;
199,977✔
302
    double norm = pwr / span_;
199,977✔
303
    return norm * std::pow(std::fabs(x), n());
199,977✔
304
  }
305
}
306

307
double PowerLaw::sample_unbiased(uint64_t* seed) const
201,593✔
308
{
309
  return std::pow(offset_ + prn(seed) * span_, ninv_);
201,593✔
310
}
311

312
//==============================================================================
313
// Maxwell implementation
314
//==============================================================================
315

316
Maxwell::Maxwell(pugi::xml_node node)
68✔
317
{
318
  theta_ = std::stod(get_node_value(node, "parameters"));
68✔
319

320
  read_bias_from_xml(node);
68✔
321
}
68✔
322

323
double Maxwell::sample_unbiased(uint64_t* seed) const
162,816✔
324
{
325
  return maxwell_spectrum(theta_, seed);
162,816✔
326
}
327

328
double Maxwell::evaluate(double x) const
160,000✔
329
{
330
  double c = (2.0 / SQRT_PI) * std::pow(theta_, -1.5);
160,000✔
331
  return c * std::sqrt(x) * std::exp(-x / theta_);
160,000✔
332
}
333

334
//==============================================================================
335
// Watt implementation
336
//==============================================================================
337

338
Watt::Watt(pugi::xml_node node)
90✔
339
{
340
  auto params = get_node_array<double>(node, "parameters");
90✔
341
  if (params.size() != 2)
90!
342
    openmc::fatal_error("Watt energy distribution must have two "
×
343
                        "parameters specified.");
344

345
  a_ = params.at(0);
90✔
346
  b_ = params.at(1);
90✔
347

348
  read_bias_from_xml(node);
90✔
349
}
90✔
350

351
double Watt::sample_unbiased(uint64_t* seed) const
10,345,698✔
352
{
353
  return watt_spectrum(a_, b_, seed);
10,345,698✔
354
}
355

356
double Watt::evaluate(double x) const
160,000✔
357
{
358
  double c =
359
    2.0 / (std::sqrt(PI * b_) * std::pow(a_, 1.5) * std::exp(a_ * b_ / 4.0));
160,000✔
360
  return c * std::exp(-x / a_) * std::sinh(std::sqrt(b_ * x));
160,000✔
361
}
362

363
//==============================================================================
364
// Normal implementation
365
//==============================================================================
366

367
Normal::Normal(double mean_value, double std_dev, double lower, double upper)
48✔
368
  : mean_value_ {mean_value}, std_dev_ {std_dev}, lower_ {lower}, upper_ {upper}
48✔
369
{
370
  compute_normalization();
48✔
371
}
48✔
372

373
Normal::Normal(pugi::xml_node node)
32✔
374
{
375
  auto params = get_node_array<double>(node, "parameters");
32✔
376
  if (params.size() != 2 && params.size() != 4) {
32!
UNCOV
377
    openmc::fatal_error("Normal energy distribution must have two "
×
378
                        "parameters (mean, std_dev) or four parameters "
379
                        "(mean, std_dev, lower, upper) specified.");
380
  }
381

382
  mean_value_ = params.at(0);
32✔
383
  std_dev_ = params.at(1);
32✔
384

385
  // Optional truncation bounds
386
  if (params.size() == 4) {
32✔
387
    lower_ = params.at(2);
8✔
388
    upper_ = params.at(3);
8✔
389
  } else {
390
    lower_ = -INFTY;
24✔
391
    upper_ = INFTY;
24✔
392
  }
393

394
  compute_normalization();
32✔
395
  read_bias_from_xml(node);
32✔
396
}
32✔
397

398
void Normal::compute_normalization()
80✔
399
{
400
  // Validate bounds
401
  if (lower_ >= upper_) {
80!
UNCOV
402
    openmc::fatal_error(
×
403
      "Normal distribution lower bound must be less than upper bound.");
404
  }
405

406
  // Check if truncation bounds are finite
407
  is_truncated_ = (lower_ > -INFTY || upper_ < INFTY);
80✔
408

409
  if (is_truncated_) {
80✔
410
    double alpha = (lower_ - mean_value_) / std_dev_;
40✔
411
    double beta = (upper_ - mean_value_) / std_dev_;
40✔
412
    double cdf_diff = standard_normal_cdf(beta) - standard_normal_cdf(alpha);
40✔
413

414
    if (cdf_diff <= 0.0) {
40!
UNCOV
415
      openmc::fatal_error(
×
416
        "Normal distribution truncation bounds exclude entire distribution.");
417
    }
418
    norm_factor_ = 1.0 / cdf_diff;
40✔
419
  } else {
420
    norm_factor_ = 1.0;
40✔
421
  }
422
}
80✔
423

424
double Normal::sample_unbiased(uint64_t* seed) const
240,000✔
425
{
426
  if (!is_truncated_) {
240,000✔
427
    return normal_variate(mean_value_, std_dev_, seed);
160,000✔
428
  }
429

430
  // Rejection sampling for truncated normal
431
  double x;
432
  do {
433
    x = normal_variate(mean_value_, std_dev_, seed);
117,272✔
434
  } while (x < lower_ || x > upper_);
117,272✔
435
  return x;
80,000✔
436
}
437

438
double Normal::evaluate(double x) const
160,088✔
439
{
440
  // Return 0 outside truncation bounds
441
  if (x < lower_ || x > upper_) {
160,088✔
442
    return 0.0;
32✔
443
  }
444

445
  // Standard normal PDF value
446
  double pdf = (1.0 / (std::sqrt(2.0 * PI) * std_dev_)) *
160,056✔
447
               std::exp(-std::pow((x - mean_value_), 2.0) /
160,056✔
448
                        (2.0 * std::pow(std_dev_, 2.0)));
160,056✔
449

450
  // Apply normalization for truncation
451
  return pdf * norm_factor_;
160,056✔
452
}
453

454
//==============================================================================
455
// Tabular implementation
456
//==============================================================================
457

458
Tabular::Tabular(pugi::xml_node node)
5,642✔
459
{
460
  if (check_for_node(node, "interpolation")) {
5,642!
461
    std::string temp = get_node_value(node, "interpolation");
5,642✔
462
    if (temp == "histogram") {
5,642✔
463
      interp_ = Interpolation::histogram;
5,582✔
464
    } else if (temp == "linear-linear") {
60!
465
      interp_ = Interpolation::lin_lin;
60✔
466
    } else if (temp == "log-linear") {
×
UNCOV
467
      interp_ = Interpolation::log_lin;
×
UNCOV
468
    } else if (temp == "log-log") {
×
UNCOV
469
      interp_ = Interpolation::log_log;
×
470
    } else {
UNCOV
471
      openmc::fatal_error(
×
UNCOV
472
        "Unsupported interpolation type for distribution: " + temp);
×
473
    }
474
  } else {
5,642✔
UNCOV
475
    interp_ = Interpolation::histogram;
×
476
  }
477

478
  // Read and initialize tabular distribution. If number of parameters is odd,
479
  // add an extra zero for the 'p' array.
480
  auto params = get_node_array<double>(node, "parameters");
5,642✔
481
  if (params.size() % 2 != 0) {
5,642!
UNCOV
482
    params.push_back(0.0);
×
483
  }
484
  std::size_t n = params.size() / 2;
5,642✔
485
  const double* x = params.data();
5,642✔
486
  const double* p = x + n;
5,642✔
487
  init(x, p, n);
5,642✔
488

489
  read_bias_from_xml(node);
5,642✔
490
}
5,642✔
491

492
Tabular::Tabular(const double* x, const double* p, int n, Interpolation interp,
34,838,905✔
493
  const double* c)
34,838,905✔
494
  : interp_ {interp}
34,838,905✔
495
{
496
  init(x, p, n, c);
34,838,905✔
497
}
34,838,905✔
498

499
void Tabular::init(
34,844,547✔
500
  const double* x, const double* p, std::size_t n, const double* c)
501
{
502
  // Copy x/p arrays into vectors
503
  std::copy(x, x + n, std::back_inserter(x_));
34,844,547✔
504
  std::copy(p, p + n, std::back_inserter(p_));
34,844,547✔
505

506
  // Calculate cumulative distribution function
507
  if (c) {
34,844,547✔
508
    std::copy(c, c + n, std::back_inserter(c_));
34,838,905✔
509
  } else {
510
    c_.resize(n);
5,642✔
511
    c_[0] = 0.0;
5,642✔
512
    for (int i = 1; i < n; ++i) {
69,688✔
513
      if (interp_ == Interpolation::histogram) {
64,046✔
514
        c_[i] = c_[i - 1] + p_[i - 1] * (x_[i] - x_[i - 1]);
63,910✔
515
      } else if (interp_ == Interpolation::lin_lin) {
136!
516
        c_[i] = c_[i - 1] + 0.5 * (p_[i - 1] + p_[i]) * (x_[i] - x_[i - 1]);
136✔
UNCOV
517
      } else if (interp_ == Interpolation::log_lin) {
×
UNCOV
518
        double m = std::log(p_[i] / p_[i - 1]) / (x_[i] - x_[i - 1]);
×
UNCOV
519
        c_[i] = c_[i - 1] + p_[i - 1] * (x_[i] - x_[i - 1]) *
×
520
                              exprel(m * (x_[i] - x_[i - 1]));
×
UNCOV
521
      } else if (interp_ == Interpolation::log_log) {
×
522
        double m = std::log((x_[i] * p_[i]) / (x_[i - 1] * p_[i - 1])) /
×
523
                   std::log(x_[i] / x_[i - 1]);
×
UNCOV
524
        c_[i] = c_[i - 1] + x_[i - 1] * p_[i - 1] *
×
525
                              std::log(x_[i] / x_[i - 1]) *
×
526
                              exprel(m * std::log(x_[i] / x_[i - 1]));
×
527
      } else {
528
        UNREACHABLE();
×
529
      }
530
    }
531
  }
532

533
  // Normalize density and distribution functions. Note that we save the
534
  // integral of the distribution so that if it is used as part of another
535
  // distribution (e.g., Mixture), we know its relative strength.
536
  integral_ = c_[n - 1];
34,844,547✔
537
  for (int i = 0; i < n; ++i) {
500,042,945✔
538
    p_[i] = p_[i] / integral_;
465,198,398✔
539
    c_[i] = c_[i] / integral_;
465,198,398✔
540
  }
541
}
34,844,547✔
542

543
double Tabular::sample_unbiased(uint64_t* seed) const
611,102,415✔
544
{
545
  // Sample value of CDF
546
  double c = prn(seed);
611,102,415✔
547

548
  // Find first CDF bin which is above the sampled value
549
  double c_i = c_[0];
611,102,415✔
550
  int i;
551
  std::size_t n = c_.size();
611,102,415✔
552
  for (i = 0; i < n - 1; ++i) {
2,147,483,647!
553
    if (c <= c_[i + 1])
2,147,483,647✔
554
      break;
611,102,415✔
555
    c_i = c_[i + 1];
1,690,394,244✔
556
  }
557

558
  // Determine bounding PDF values
559
  double x_i = x_[i];
611,102,415✔
560
  double p_i = p_[i];
611,102,415✔
561

562
  if (interp_ == Interpolation::histogram) {
611,102,415✔
563
    // Histogram interpolation
564
    if (p_i > 0.0) {
2,480,456!
565
      return x_i + (c - c_i) / p_i;
2,480,456✔
566
    } else {
UNCOV
567
      return x_i;
×
568
    }
569
  } else if (interp_ == Interpolation::lin_lin) {
608,621,959!
570
    // Linear-linear interpolation
571
    double x_i1 = x_[i + 1];
608,621,959✔
572
    double p_i1 = p_[i + 1];
608,621,959✔
573

574
    double m = (p_i1 - p_i) / (x_i1 - x_i);
608,621,959✔
575
    if (m == 0.0) {
608,621,959✔
576
      return x_i + (c - c_i) / p_i;
245,395,417✔
577
    } else {
578
      return x_i +
579
             (std::sqrt(std::max(0.0, p_i * p_i + 2 * m * (c - c_i))) - p_i) /
363,226,542✔
580
               m;
363,226,542✔
581
    }
582
  } else if (interp_ == Interpolation::log_lin) {
×
583
    // Log-linear interpolation
UNCOV
584
    double x_i1 = x_[i + 1];
×
585
    double p_i1 = p_[i + 1];
×
586

587
    double m = std::log(p_i1 / p_i) / (x_i1 - x_i);
×
588
    double f = (c - c_i) / p_i;
×
UNCOV
589
    return x_i + f * log1prel(m * f);
×
590
  } else if (interp_ == Interpolation::log_log) {
×
591
    // Log-Log interpolation
UNCOV
592
    double x_i1 = x_[i + 1];
×
593
    double p_i1 = p_[i + 1];
×
594

UNCOV
595
    double m = std::log((x_i1 * p_i1) / (x_i * p_i)) / std::log(x_i1 / x_i);
×
UNCOV
596
    double f = (c - c_i) / (p_i * x_i);
×
UNCOV
597
    return x_i * std::exp(f * log1prel(m * f));
×
598
  } else {
UNCOV
599
    UNREACHABLE();
×
600
  }
601
}
602

603
double Tabular::evaluate(double x) const
80,000✔
604
{
605
  int i;
606

607
  if (interp_ == Interpolation::histogram) {
80,000!
UNCOV
608
    i = std::upper_bound(x_.begin(), x_.end(), x) - x_.begin() - 1;
×
609
    if (i < 0 || i >= static_cast<int>(p_.size())) {
×
UNCOV
610
      return 0.0;
×
611
    } else {
UNCOV
612
      return p_[i];
×
613
    }
614
  } else {
615
    i = std::lower_bound(x_.begin(), x_.end(), x) - x_.begin() - 1;
80,000✔
616

617
    if (i < 0 || i >= static_cast<int>(p_.size()) - 1) {
80,000!
UNCOV
618
      return 0.0;
×
619
    } else {
620
      double x0 = x_[i];
80,000✔
621
      double x1 = x_[i + 1];
80,000✔
622
      double p0 = p_[i];
80,000✔
623
      double p1 = p_[i + 1];
80,000✔
624

625
      double t = (x - x0) / (x1 - x0);
80,000✔
626
      return (1 - t) * p0 + t * p1;
80,000✔
627
    }
628
  }
629
}
630

631
//==============================================================================
632
// Equiprobable implementation
633
//==============================================================================
634

635
double Equiprobable::sample_unbiased(uint64_t* seed) const
×
636
{
UNCOV
637
  std::size_t n = x_.size();
×
638

UNCOV
639
  double r = prn(seed);
×
UNCOV
640
  int i = std::floor((n - 1) * r);
×
641

UNCOV
642
  double xl = x_[i];
×
UNCOV
643
  double xr = x_[i + i];
×
644
  return xl + ((n - 1) * r - i) * (xr - xl);
×
645
}
646

UNCOV
647
double Equiprobable::evaluate(double x) const
×
648
{
UNCOV
649
  double x_min = *std::min_element(x_.begin(), x_.end());
×
UNCOV
650
  double x_max = *std::max_element(x_.begin(), x_.end());
×
651

UNCOV
652
  if (x < x_min || x > x_max) {
×
UNCOV
653
    return 0.0;
×
654
  } else {
UNCOV
655
    return 1.0 / (x_max - x_min);
×
656
  }
657
}
658

659
//==============================================================================
660
// Mixture implementation
661
//==============================================================================
662

663
Mixture::Mixture(pugi::xml_node node)
49✔
664
{
665
  vector<double> probabilities;
49✔
666

667
  // First pass: collect distributions and their probabilities
668
  for (pugi::xml_node pair : node.children("pair")) {
180✔
669
    // Check that required data exists
670
    if (!pair.attribute("probability"))
131!
UNCOV
671
      fatal_error("Mixture pair element does not have probability.");
×
672
    if (!pair.child("dist"))
131!
UNCOV
673
      fatal_error("Mixture pair element does not have a distribution.");
×
674

675
    // Get probability and distribution
676
    double p = std::stod(pair.attribute("probability").value());
131✔
677
    auto dist = distribution_from_xml(pair.child("dist"));
131✔
678

679
    // Weight probability by the distribution's integral
680
    double weighted_prob = p * dist->integral();
131✔
681
    probabilities.push_back(weighted_prob);
131✔
682
    distribution_.push_back(std::move(dist));
131✔
683
  }
131✔
684

685
  // Save sum of weighted probabilities
686
  integral_ = std::accumulate(probabilities.begin(), probabilities.end(), 0.0);
49✔
687

688
  std::size_t n = probabilities.size();
49✔
689

690
  // Check for bias
691
  if (check_for_node(node, "bias")) {
49!
692
    // Get bias probabilities
UNCOV
693
    auto bias_params = get_node_array<double>(node, "bias");
×
UNCOV
694
    if (bias_params.size() != n) {
×
UNCOV
695
      openmc::fatal_error(
×
UNCOV
696
        "Size mismatch: Attempted to bias Mixture distribution with " +
×
UNCOV
697
        std::to_string(n) + " components using a bias with " +
×
UNCOV
698
        std::to_string(bias_params.size()) +
×
699
        " entries. Please ensure distributions have the same size.");
700
    }
701

702
    // Compute importance weights
UNCOV
703
    weight_ = compute_importance_weights(probabilities, bias_params);
×
704

705
    // Initialize DiscreteIndex with bias probabilities for sampling
UNCOV
706
    di_.assign(bias_params);
×
UNCOV
707
  } else {
×
708
    // Unbiased case: weight_ stays empty
709
    di_.assign(probabilities);
49✔
710
  }
711
}
49✔
712

713
std::pair<double, double> Mixture::sample(uint64_t* seed) const
162,592✔
714
{
715
  size_t idx = di_.sample(seed);
162,592✔
716

717
  // Sample the chosen distribution
718
  auto [val, sub_wgt] = distribution_[idx]->sample(seed);
162,592✔
719

720
  // Multiply by component selection weight
721
  double mix_wgt = weight_.empty() ? 1.0 : weight_[idx];
162,592!
722
  return {val, mix_wgt * sub_wgt};
162,592✔
723
}
724

UNCOV
725
double Mixture::sample_unbiased(uint64_t* seed) const
×
726
{
UNCOV
727
  size_t idx = di_.sample(seed);
×
UNCOV
728
  return distribution_[idx]->sample(seed).first;
×
729
}
730

731
//==============================================================================
732
// Helper function
733
//==============================================================================
734

735
UPtrDist distribution_from_xml(pugi::xml_node node)
26,087✔
736
{
737
  if (!check_for_node(node, "type"))
26,087!
UNCOV
738
    openmc::fatal_error("Distribution type must be specified.");
×
739

740
  // Determine type of distribution
741
  std::string type = get_node_value(node, "type", true, true);
26,087✔
742

743
  // Allocate extension of Distribution
744
  UPtrDist dist;
26,087✔
745
  if (type == "uniform") {
26,087✔
746
    dist = UPtrDist {new Uniform(node)};
235✔
747
  } else if (type == "powerlaw") {
25,852✔
748
    dist = UPtrDist {new PowerLaw(node)};
54✔
749
  } else if (type == "maxwell") {
25,798✔
750
    dist = UPtrDist {new Maxwell(node)};
68✔
751
  } else if (type == "watt") {
25,730✔
752
    dist = UPtrDist {new Watt(node)};
90✔
753
  } else if (type == "normal") {
25,640✔
754
    dist = UPtrDist {new Normal(node)};
24✔
755
  } else if (type == "discrete") {
25,616✔
756
    dist = UPtrDist {new Discrete(node)};
19,925✔
757
  } else if (type == "tabular") {
5,691✔
758
    dist = UPtrDist {new Tabular(node)};
5,642✔
759
  } else if (type == "mixture") {
49!
760
    dist = UPtrDist {new Mixture(node)};
49✔
UNCOV
761
  } else if (type == "muir") {
×
UNCOV
762
    openmc::fatal_error(
×
763
      "'muir' distributions are now specified using the openmc.stats.muir() "
764
      "function in Python. Please regenerate your XML files.");
765
  } else {
UNCOV
766
    openmc::fatal_error("Invalid distribution type: " + type);
×
767
  }
768
  return dist;
52,174✔
769
}
26,087✔
770

771
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc