• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 21926592529

11 Feb 2026 11:06PM UTC coverage: 81.697% (-0.2%) from 81.905%
21926592529

Pull #3789

github

web-flow
Merge 9044b337f into 96383fcb2
Pull Request #3789: SolidRayTracePlot CAPI

17496 of 24595 branches covered (71.14%)

Branch coverage included in aggregate %.

467 of 622 new or added lines in 2 files covered. (75.08%)

217 existing lines in 5 files now uncovered.

56853 of 66411 relevant lines covered (85.61%)

44164293.19 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

70.48
/src/distribution.cpp
1
#include "openmc/distribution.h"
2

3
#include <algorithm> // for copy
4
#include <array>
5
#include <cmath>     // for sqrt, floor, max
6
#include <iterator>  // for back_inserter
7
#include <numeric>   // for accumulate
8
#include <stdexcept> // for runtime_error
9
#include <string>    // for string, stod
10

11
#include "openmc/constants.h"
12
#include "openmc/error.h"
13
#include "openmc/math_functions.h"
14
#include "openmc/random_dist.h"
15
#include "openmc/random_lcg.h"
16
#include "openmc/xml_interface.h"
17

18
namespace openmc {
19

20
//==============================================================================
21
// Helper function for computing importance weights from biased sampling
22
//==============================================================================
23

24
vector<double> compute_importance_weights(
10✔
25
  const vector<double>& p, const vector<double>& b)
26
{
27
  std::size_t n = p.size();
10✔
28

29
  // Normalize original probabilities
30
  double sum_p = std::accumulate(p.begin(), p.end(), 0.0);
10✔
31
  vector<double> p_norm(n);
10✔
32
  for (std::size_t i = 0; i < n; ++i) {
40✔
33
    p_norm[i] = p[i] / sum_p;
30✔
34
  }
35

36
  // Normalize bias probabilities
37
  double sum_b = std::accumulate(b.begin(), b.end(), 0.0);
10✔
38
  vector<double> b_norm(n);
10✔
39
  for (std::size_t i = 0; i < n; ++i) {
40✔
40
    b_norm[i] = b[i] / sum_b;
30✔
41
  }
42

43
  // Compute importance weights
44
  vector<double> weights(n);
10✔
45
  for (std::size_t i = 0; i < n; ++i) {
40✔
46
    weights[i] = (b_norm[i] == 0.0) ? INFTY : p_norm[i] / b_norm[i];
30!
47
  }
48
  return weights;
20✔
49
}
10✔
50

51
std::pair<double, double> Distribution::sample(uint64_t* seed) const
779,840,513✔
52
{
53
  if (bias_) {
779,840,513✔
54
    // Sample from the bias distribution and compute importance weight
55
    double val = bias_->sample_unbiased(seed);
650,053✔
56
    double wgt = this->evaluate(val) / bias_->evaluate(val);
650,053✔
57
    return {val, wgt};
650,053✔
58
  } else {
59
    // Unbiased sampling: return sampled value with weight 1.0
60
    double val = sample_unbiased(seed);
779,190,460✔
61
    return {val, 1.0};
779,190,460✔
62
  }
63
}
64

65
// PDF evaluation not supported for all distribution types
66
double Distribution::evaluate(double x) const
×
67
{
68
  throw std::runtime_error(
×
69
    "PDF evaluation not implemented for this distribution type.");
×
70
}
71

72
void Distribution::read_bias_from_xml(pugi::xml_node node)
7,747✔
73
{
74
  if (check_for_node(node, "bias")) {
7,747✔
75
    pugi::xml_node bias_node = node.child("bias");
70✔
76

77
    if (check_for_node(bias_node, "bias")) {
70!
78
      openmc::fatal_error(
×
79
        "Distribution has a bias distribution with its own bias distribution. "
80
        "Please ensure bias distributions do not have their own bias.");
81
    }
82

83
    UPtrDist bias = distribution_from_xml(bias_node);
70✔
84
    this->set_bias(std::move(bias));
70✔
85
  }
70✔
86
}
7,747✔
87

88
//==============================================================================
89
// DiscreteIndex implementation
90
//==============================================================================
91

92
DiscreteIndex::DiscreteIndex(pugi::xml_node node)
×
93
{
94
  auto params = get_node_array<double>(node, "parameters");
×
95
  std::size_t n = params.size() / 2;
×
96

97
  assign({params.data() + n, n});
×
98
}
×
99

100
DiscreteIndex::DiscreteIndex(span<const double> p)
46,541✔
101
{
102
  assign(p);
46,541✔
103
}
46,541✔
104

105
void DiscreteIndex::assign(span<const double> p)
79,464✔
106
{
107
  prob_.assign(p.begin(), p.end());
79,464✔
108
  this->init_alias();
79,464✔
109
}
79,464✔
110

111
void DiscreteIndex::init_alias()
79,464✔
112
{
113
  normalize();
79,464✔
114

115
  // The initialization and sampling method is based on Vose
116
  // (DOI: 10.1109/32.92917)
117
  // Vectors for large and small probabilities based on 1/n
118
  vector<size_t> large;
79,464✔
119
  vector<size_t> small;
79,464✔
120

121
  size_t n = prob_.size();
79,464✔
122

123
  // Set and allocate memory
124
  alias_.assign(n, 0);
79,464✔
125

126
  // Fill large and small vectors based on 1/n
127
  for (size_t i = 0; i < n; i++) {
1,516,167✔
128
    prob_[i] *= n;
1,436,703✔
129
    if (prob_[i] > 1.0) {
1,436,703✔
130
      large.push_back(i);
214,792✔
131
    } else {
132
      small.push_back(i);
1,221,911✔
133
    }
134
  }
135

136
  while (!large.empty() && !small.empty()) {
1,362,346✔
137
    int j = small.back();
1,282,882✔
138
    int k = large.back();
1,282,882✔
139

140
    // Remove last element of small
141
    small.pop_back();
1,282,882✔
142

143
    // Update probability and alias based on Vose's algorithm
144
    prob_[k] += prob_[j] - 1.0;
1,282,882✔
145
    alias_[j] = k;
1,282,882✔
146

147
    // Move large index to small vector, if it is no longer large
148
    if (prob_[k] < 1.0) {
1,282,882✔
149
      small.push_back(k);
206,971✔
150
      large.pop_back();
206,971✔
151
    }
152
  }
153
}
79,464✔
154

155
size_t DiscreteIndex::sample(uint64_t* seed) const
63,568,736✔
156
{
157
  // Alias sampling of discrete distribution
158
  size_t n = prob_.size();
63,568,736✔
159
  if (n > 1) {
63,568,736✔
160
    size_t u = prn(seed) * n;
16,184,807✔
161
    if (prn(seed) < prob_[u]) {
16,184,807✔
162
      return u;
9,613,671✔
163
    } else {
164
      return alias_[u];
6,571,136✔
165
    }
166
  } else {
167
    return 0;
47,383,929✔
168
  }
169
}
170

171
void DiscreteIndex::normalize()
79,464✔
172
{
173
  // Renormalize density function so that it sums to unity. Note that we save
174
  // the integral of the distribution so that if it is used as part of another
175
  // distribution (e.g., Mixture), we know its relative strength.
176
  integral_ = std::accumulate(prob_.begin(), prob_.end(), 0.0);
79,464✔
177
  for (auto& p_i : prob_) {
1,516,167✔
178
    p_i /= integral_;
1,436,703✔
179
  }
180
}
79,464✔
181

182
//==============================================================================
183
// Discrete implementation
184
//==============================================================================
185

186
Discrete::Discrete(pugi::xml_node node)
25,226✔
187
{
188
  auto params = get_node_array<double>(node, "parameters");
25,226✔
189
  std::size_t n = params.size() / 2;
25,226✔
190

191
  // First half is x values, second half is probabilities
192
  x_.assign(params.begin(), params.begin() + n);
25,226✔
193
  const double* p = params.data() + n;
25,226✔
194

195
  // Check for bias
196
  if (check_for_node(node, "bias")) {
25,226✔
197
    // Get bias probabilities
198
    auto bias_params = get_node_array<double>(node, "bias");
10✔
199
    if (bias_params.size() != n) {
10!
200
      openmc::fatal_error(
×
201
        "Size mismatch: Attempted to bias Discrete distribution with " +
×
202
        std::to_string(n) + " probability entries using a bias with " +
×
203
        std::to_string(bias_params.size()) +
×
204
        " entries. Please ensure distributions have the same size.");
205
    }
206

207
    // Compute importance weights
208
    vector<double> p_vec(p, p + n);
10✔
209
    weight_ = compute_importance_weights(p_vec, bias_params);
10✔
210

211
    // Initialize DiscreteIndex with bias probabilities for sampling
212
    di_.assign(bias_params);
10✔
213
  } else {
10✔
214
    // Unbiased case: weight_ stays empty
215
    di_.assign({p, n});
25,216✔
216
  }
217
}
25,226✔
218

219
Discrete::Discrete(const double* x, const double* p, size_t n) : di_({p, n})
46,541✔
220
{
221
  x_.assign(x, x + n);
46,541✔
222
}
46,541✔
223

224
std::pair<double, double> Discrete::sample(uint64_t* seed) const
57,667,150✔
225
{
226
  size_t idx = di_.sample(seed);
57,667,150✔
227
  double wgt = weight_.empty() ? 1.0 : weight_[idx];
57,667,150✔
228
  return {x_[idx], wgt};
57,667,150✔
229
}
230

231
double Discrete::sample_unbiased(uint64_t* seed) const
×
232
{
233
  size_t idx = di_.sample(seed);
×
234
  return x_[idx];
×
235
}
236

237
//==============================================================================
238
// Uniform implementation
239
//==============================================================================
240

241
Uniform::Uniform(pugi::xml_node node)
309✔
242
{
243
  auto params = get_node_array<double>(node, "parameters");
309✔
244
  if (params.size() != 2) {
309!
245
    fatal_error("Uniform distribution must have two "
×
246
                "parameters specified.");
247
  }
248

249
  a_ = params.at(0);
309✔
250
  b_ = params.at(1);
309✔
251

252
  read_bias_from_xml(node);
309✔
253
}
309✔
254

255
double Uniform::sample_unbiased(uint64_t* seed) const
726,197✔
256
{
257
  return a_ + prn(seed) * (b_ - a_);
726,197✔
258
}
259

260
double Uniform::evaluate(double x) const
350,053✔
261
{
262
  if (x <= a()) {
350,053!
263
    return 0.0;
×
264
  } else if (x >= b()) {
350,053!
265
    return 0.0;
×
266
  } else {
267
    return 1 / (b() - a());
350,053✔
268
  }
269
}
270

271
//==============================================================================
272
// PowerLaw implementation
273
//==============================================================================
274

275
PowerLaw::PowerLaw(pugi::xml_node node)
68✔
276
{
277
  auto params = get_node_array<double>(node, "parameters");
68✔
278
  if (params.size() != 3) {
68!
279
    fatal_error("PowerLaw distribution must have three "
×
280
                "parameters specified.");
281
  }
282

283
  const double a = params.at(0);
68✔
284
  const double b = params.at(1);
68✔
285
  const double n = params.at(2);
68✔
286

287
  offset_ = std::pow(a, n + 1);
68✔
288
  span_ = std::pow(b, n + 1) - offset_;
68✔
289
  ninv_ = 1 / (n + 1);
68✔
290

291
  read_bias_from_xml(node);
68✔
292
}
68✔
293

294
double PowerLaw::evaluate(double x) const
250,053✔
295
{
296
  if (x <= a()) {
250,053!
297
    return 0.0;
×
298
  } else if (x >= b()) {
250,053!
299
    return 0.0;
×
300
  } else {
301
    int pwr = n() + 1;
250,053✔
302
    double norm = pwr / span_;
250,053✔
303
    return norm * std::pow(std::fabs(x), n());
250,053✔
304
  }
305
}
306

307
double PowerLaw::sample_unbiased(uint64_t* seed) const
252,073✔
308
{
309
  return std::pow(offset_ + prn(seed) * span_, ninv_);
252,073✔
310
}
311

312
//==============================================================================
313
// Maxwell implementation
314
//==============================================================================
315

316
Maxwell::Maxwell(pugi::xml_node node)
86✔
317
{
318
  theta_ = std::stod(get_node_value(node, "parameters"));
86✔
319

320
  read_bias_from_xml(node);
86✔
321
}
86✔
322

323
double Maxwell::sample_unbiased(uint64_t* seed) const
203,520✔
324
{
325
  return maxwell_spectrum(theta_, seed);
203,520✔
326
}
327

328
double Maxwell::evaluate(double x) const
200,000✔
329
{
330
  double c = (2.0 / SQRT_PI) * std::pow(theta_, -1.5);
200,000✔
331
  return c * std::sqrt(x) * std::exp(-x / theta_);
200,000✔
332
}
333

334
//==============================================================================
335
// Watt implementation
336
//==============================================================================
337

338
Watt::Watt(pugi::xml_node node)
114✔
339
{
340
  auto params = get_node_array<double>(node, "parameters");
114✔
341
  if (params.size() != 2)
114!
342
    openmc::fatal_error("Watt energy distribution must have two "
×
343
                        "parameters specified.");
344

345
  a_ = params.at(0);
114✔
346
  b_ = params.at(1);
114✔
347

348
  read_bias_from_xml(node);
114✔
349
}
114✔
350

351
double Watt::sample_unbiased(uint64_t* seed) const
12,919,010✔
352
{
353
  return watt_spectrum(a_, b_, seed);
12,919,010✔
354
}
355

356
double Watt::evaluate(double x) const
200,000✔
357
{
358
  double c =
359
    2.0 / (std::sqrt(PI * b_) * std::pow(a_, 1.5) * std::exp(a_ * b_ / 4.0));
200,000✔
360
  return c * std::exp(-x / a_) * std::sinh(std::sqrt(b_ * x));
200,000✔
361
}
362

363
//==============================================================================
364
// Normal implementation
365
//==============================================================================
366
Normal::Normal(pugi::xml_node node)
30✔
367
{
368
  auto params = get_node_array<double>(node, "parameters");
30✔
369
  if (params.size() != 2) {
30!
370
    openmc::fatal_error("Normal energy distribution must have two "
×
371
                        "parameters specified.");
372
  }
373

374
  mean_value_ = params.at(0);
30✔
375
  std_dev_ = params.at(1);
30✔
376

377
  read_bias_from_xml(node);
30✔
378
}
30✔
379

380
double Normal::sample_unbiased(uint64_t* seed) const
200,000✔
381
{
382
  return normal_variate(mean_value_, std_dev_, seed);
200,000✔
383
}
384

385
double Normal::evaluate(double x) const
200,000✔
386
{
387
  return (1.0 / (std::sqrt(2.0 / PI) * std_dev_)) *
200,000✔
388
         std::exp(-(std::pow((x - mean_value_), 2.0)) /
200,000✔
389
                  (2.0 * std::pow(std_dev_, 2.0)));
200,000✔
390
}
391

392
//==============================================================================
393
// Tabular implementation
394
//==============================================================================
395

396
Tabular::Tabular(pugi::xml_node node)
7,140✔
397
{
398
  if (check_for_node(node, "interpolation")) {
7,140!
399
    std::string temp = get_node_value(node, "interpolation");
7,140✔
400
    if (temp == "histogram") {
7,140✔
401
      interp_ = Interpolation::histogram;
7,064✔
402
    } else if (temp == "linear-linear") {
76!
403
      interp_ = Interpolation::lin_lin;
76✔
UNCOV
404
    } else if (temp == "log-linear") {
×
405
      interp_ = Interpolation::log_lin;
×
406
    } else if (temp == "log-log") {
×
UNCOV
407
      interp_ = Interpolation::log_log;
×
408
    } else {
409
      openmc::fatal_error(
×
UNCOV
410
        "Unsupported interpolation type for distribution: " + temp);
×
411
    }
412
  } else {
7,140✔
UNCOV
413
    interp_ = Interpolation::histogram;
×
414
  }
415

416
  // Read and initialize tabular distribution. If number of parameters is odd,
417
  // add an extra zero for the 'p' array.
418
  auto params = get_node_array<double>(node, "parameters");
7,140✔
419
  if (params.size() % 2 != 0) {
7,140!
UNCOV
420
    params.push_back(0.0);
×
421
  }
422
  std::size_t n = params.size() / 2;
7,140✔
423
  const double* x = params.data();
7,140✔
424
  const double* p = x + n;
7,140✔
425
  init(x, p, n);
7,140✔
426

427
  read_bias_from_xml(node);
7,140✔
428
}
7,140✔
429

430
Tabular::Tabular(const double* x, const double* p, int n, Interpolation interp,
44,317,066✔
431
  const double* c)
44,317,066✔
432
  : interp_ {interp}
44,317,066✔
433
{
434
  init(x, p, n, c);
44,317,066✔
435
}
44,317,066✔
436

437
void Tabular::init(
44,324,206✔
438
  const double* x, const double* p, std::size_t n, const double* c)
439
{
440
  // Copy x/p arrays into vectors
441
  std::copy(x, x + n, std::back_inserter(x_));
44,324,206✔
442
  std::copy(p, p + n, std::back_inserter(p_));
44,324,206✔
443

444
  // Calculate cumulative distribution function
445
  if (c) {
44,324,206✔
446
    std::copy(c, c + n, std::back_inserter(c_));
44,317,066✔
447
  } else {
448
    c_.resize(n);
7,140✔
449
    c_[0] = 0.0;
7,140✔
450
    for (int i = 1; i < n; ++i) {
88,208✔
451
      if (interp_ == Interpolation::histogram) {
81,068✔
452
        c_[i] = c_[i - 1] + p_[i - 1] * (x_[i] - x_[i - 1]);
80,896✔
453
      } else if (interp_ == Interpolation::lin_lin) {
172!
454
        c_[i] = c_[i - 1] + 0.5 * (p_[i - 1] + p_[i]) * (x_[i] - x_[i - 1]);
172✔
UNCOV
455
      } else if (interp_ == Interpolation::log_lin) {
×
UNCOV
456
        double m = std::log(p_[i] / p_[i - 1]) / (x_[i] - x_[i - 1]);
×
UNCOV
457
        c_[i] = c_[i - 1] + p_[i - 1] * (x_[i] - x_[i - 1]) *
×
UNCOV
458
                              exprel(m * (x_[i] - x_[i - 1]));
×
UNCOV
459
      } else if (interp_ == Interpolation::log_log) {
×
UNCOV
460
        double m = std::log((x_[i] * p_[i]) / (x_[i - 1] * p_[i - 1])) /
×
UNCOV
461
                   std::log(x_[i] / x_[i - 1]);
×
UNCOV
462
        c_[i] = c_[i - 1] + x_[i - 1] * p_[i - 1] *
×
UNCOV
463
                              std::log(x_[i] / x_[i - 1]) *
×
UNCOV
464
                              exprel(m * std::log(x_[i] / x_[i - 1]));
×
465
      } else {
UNCOV
466
        UNREACHABLE();
×
467
      }
468
    }
469
  }
470

471
  // Normalize density and distribution functions. Note that we save the
472
  // integral of the distribution so that if it is used as part of another
473
  // distribution (e.g., Mixture), we know its relative strength.
474
  integral_ = c_[n - 1];
44,324,206✔
475
  for (int i = 0; i < n; ++i) {
635,810,485✔
476
    p_[i] = p_[i] / integral_;
591,486,279✔
477
    c_[i] = c_[i] / integral_;
591,486,279✔
478
  }
479
}
44,324,206✔
480

481
double Tabular::sample_unbiased(uint64_t* seed) const
765,539,713✔
482
{
483
  // Sample value of CDF
484
  double c = prn(seed);
765,539,713✔
485

486
  // Find first CDF bin which is above the sampled value
487
  double c_i = c_[0];
765,539,713✔
488
  int i;
489
  std::size_t n = c_.size();
765,539,713✔
490
  for (i = 0; i < n - 1; ++i) {
2,147,483,647!
491
    if (c <= c_[i + 1])
2,147,483,647✔
492
      break;
765,539,713✔
493
    c_i = c_[i + 1];
2,111,948,247✔
494
  }
495

496
  // Determine bounding PDF values
497
  double x_i = x_[i];
765,539,713✔
498
  double p_i = p_[i];
765,539,713✔
499

500
  if (interp_ == Interpolation::histogram) {
765,539,713✔
501
    // Histogram interpolation
502
    if (p_i > 0.0) {
3,110,710!
503
      return x_i + (c - c_i) / p_i;
3,110,710✔
504
    } else {
UNCOV
505
      return x_i;
×
506
    }
507
  } else if (interp_ == Interpolation::lin_lin) {
762,429,003!
508
    // Linear-linear interpolation
509
    double x_i1 = x_[i + 1];
762,429,003✔
510
    double p_i1 = p_[i + 1];
762,429,003✔
511

512
    double m = (p_i1 - p_i) / (x_i1 - x_i);
762,429,003✔
513
    if (m == 0.0) {
762,429,003✔
514
      return x_i + (c - c_i) / p_i;
308,715,021✔
515
    } else {
516
      return x_i +
517
             (std::sqrt(std::max(0.0, p_i * p_i + 2 * m * (c - c_i))) - p_i) /
453,713,982✔
518
               m;
453,713,982✔
519
    }
520
  } else if (interp_ == Interpolation::log_lin) {
×
521
    // Log-linear interpolation
UNCOV
522
    double x_i1 = x_[i + 1];
×
523
    double p_i1 = p_[i + 1];
×
524

UNCOV
525
    double m = std::log(p_i1 / p_i) / (x_i1 - x_i);
×
UNCOV
526
    double f = (c - c_i) / p_i;
×
UNCOV
527
    return x_i + f * log1prel(m * f);
×
UNCOV
528
  } else if (interp_ == Interpolation::log_log) {
×
529
    // Log-Log interpolation
UNCOV
530
    double x_i1 = x_[i + 1];
×
UNCOV
531
    double p_i1 = p_[i + 1];
×
532

UNCOV
533
    double m = std::log((x_i1 * p_i1) / (x_i * p_i)) / std::log(x_i1 / x_i);
×
UNCOV
534
    double f = (c - c_i) / (p_i * x_i);
×
UNCOV
535
    return x_i * std::exp(f * log1prel(m * f));
×
536
  } else {
UNCOV
537
    UNREACHABLE();
×
538
  }
539
}
540

541
double Tabular::evaluate(double x) const
100,000✔
542
{
543
  int i;
544

545
  if (interp_ == Interpolation::histogram) {
100,000!
546
    i = std::upper_bound(x_.begin(), x_.end(), x) - x_.begin() - 1;
×
UNCOV
547
    if (i < 0 || i >= static_cast<int>(p_.size())) {
×
548
      return 0.0;
×
549
    } else {
550
      return p_[i];
×
551
    }
552
  } else {
553
    i = std::lower_bound(x_.begin(), x_.end(), x) - x_.begin() - 1;
100,000✔
554

555
    if (i < 0 || i >= static_cast<int>(p_.size()) - 1) {
100,000!
UNCOV
556
      return 0.0;
×
557
    } else {
558
      double x0 = x_[i];
100,000✔
559
      double x1 = x_[i + 1];
100,000✔
560
      double p0 = p_[i];
100,000✔
561
      double p1 = p_[i + 1];
100,000✔
562

563
      double t = (x - x0) / (x1 - x0);
100,000✔
564
      return (1 - t) * p0 + t * p1;
100,000✔
565
    }
566
  }
567
}
568

569
//==============================================================================
570
// Equiprobable implementation
571
//==============================================================================
572

UNCOV
573
double Equiprobable::sample_unbiased(uint64_t* seed) const
×
574
{
UNCOV
575
  std::size_t n = x_.size();
×
576

UNCOV
577
  double r = prn(seed);
×
UNCOV
578
  int i = std::floor((n - 1) * r);
×
579

UNCOV
580
  double xl = x_[i];
×
UNCOV
581
  double xr = x_[i + i];
×
582
  return xl + ((n - 1) * r - i) * (xr - xl);
×
583
}
584

UNCOV
585
double Equiprobable::evaluate(double x) const
×
586
{
UNCOV
587
  double x_min = *std::min_element(x_.begin(), x_.end());
×
UNCOV
588
  double x_max = *std::max_element(x_.begin(), x_.end());
×
589

UNCOV
590
  if (x < x_min || x > x_max) {
×
UNCOV
591
    return 0.0;
×
592
  } else {
UNCOV
593
    return 1.0 / (x_max - x_min);
×
594
  }
595
}
596

597
//==============================================================================
598
// Mixture implementation
599
//==============================================================================
600

601
Mixture::Mixture(pugi::xml_node node)
62✔
602
{
603
  vector<double> probabilities;
62✔
604

605
  // First pass: collect distributions and their probabilities
606
  for (pugi::xml_node pair : node.children("pair")) {
228✔
607
    // Check that required data exists
608
    if (!pair.attribute("probability"))
166!
609
      fatal_error("Mixture pair element does not have probability.");
×
610
    if (!pair.child("dist"))
166!
UNCOV
611
      fatal_error("Mixture pair element does not have a distribution.");
×
612

613
    // Get probability and distribution
614
    double p = std::stod(pair.attribute("probability").value());
166✔
615
    auto dist = distribution_from_xml(pair.child("dist"));
166✔
616

617
    // Weight probability by the distribution's integral
618
    double weighted_prob = p * dist->integral();
166✔
619
    probabilities.push_back(weighted_prob);
166✔
620
    distribution_.push_back(std::move(dist));
166✔
621
  }
166✔
622

623
  // Save sum of weighted probabilities
624
  integral_ = std::accumulate(probabilities.begin(), probabilities.end(), 0.0);
62✔
625

626
  std::size_t n = probabilities.size();
62✔
627

628
  // Check for bias
629
  if (check_for_node(node, "bias")) {
62!
630
    // Get bias probabilities
UNCOV
631
    auto bias_params = get_node_array<double>(node, "bias");
×
UNCOV
632
    if (bias_params.size() != n) {
×
UNCOV
633
      openmc::fatal_error(
×
UNCOV
634
        "Size mismatch: Attempted to bias Mixture distribution with " +
×
UNCOV
635
        std::to_string(n) + " components using a bias with " +
×
636
        std::to_string(bias_params.size()) +
×
637
        " entries. Please ensure distributions have the same size.");
638
    }
639

640
    // Compute importance weights
UNCOV
641
    weight_ = compute_importance_weights(probabilities, bias_params);
×
642

643
    // Initialize DiscreteIndex with bias probabilities for sampling
UNCOV
644
    di_.assign(bias_params);
×
UNCOV
645
  } else {
×
646
    // Unbiased case: weight_ stays empty
647
    di_.assign(probabilities);
62✔
648
  }
649
}
62✔
650

651
std::pair<double, double> Mixture::sample(uint64_t* seed) const
203,240✔
652
{
653
  size_t idx = di_.sample(seed);
203,240✔
654

655
  // Sample the chosen distribution
656
  auto [val, sub_wgt] = distribution_[idx]->sample(seed);
203,240✔
657

658
  // Multiply by component selection weight
659
  double mix_wgt = weight_.empty() ? 1.0 : weight_[idx];
203,240!
660
  return {val, mix_wgt * sub_wgt};
203,240✔
661
}
662

UNCOV
663
double Mixture::sample_unbiased(uint64_t* seed) const
×
664
{
UNCOV
665
  size_t idx = di_.sample(seed);
×
UNCOV
666
  return distribution_[idx]->sample(seed).first;
×
667
}
668

669
//==============================================================================
670
// Helper function
671
//==============================================================================
672

673
UPtrDist distribution_from_xml(pugi::xml_node node)
33,025✔
674
{
675
  if (!check_for_node(node, "type"))
33,025!
UNCOV
676
    openmc::fatal_error("Distribution type must be specified.");
×
677

678
  // Determine type of distribution
679
  std::string type = get_node_value(node, "type", true, true);
33,025✔
680

681
  // Allocate extension of Distribution
682
  UPtrDist dist;
33,025✔
683
  if (type == "uniform") {
33,025✔
684
    dist = UPtrDist {new Uniform(node)};
309✔
685
  } else if (type == "powerlaw") {
32,716✔
686
    dist = UPtrDist {new PowerLaw(node)};
68✔
687
  } else if (type == "maxwell") {
32,648✔
688
    dist = UPtrDist {new Maxwell(node)};
86✔
689
  } else if (type == "watt") {
32,562✔
690
    dist = UPtrDist {new Watt(node)};
114✔
691
  } else if (type == "normal") {
32,448✔
692
    dist = UPtrDist {new Normal(node)};
30✔
693
  } else if (type == "discrete") {
32,418✔
694
    dist = UPtrDist {new Discrete(node)};
25,216✔
695
  } else if (type == "tabular") {
7,202✔
696
    dist = UPtrDist {new Tabular(node)};
7,140✔
697
  } else if (type == "mixture") {
62!
698
    dist = UPtrDist {new Mixture(node)};
62✔
UNCOV
699
  } else if (type == "muir") {
×
UNCOV
700
    openmc::fatal_error(
×
701
      "'muir' distributions are now specified using the openmc.stats.muir() "
702
      "function in Python. Please regenerate your XML files.");
703
  } else {
UNCOV
704
    openmc::fatal_error("Invalid distribution type: " + type);
×
705
  }
706
  return dist;
66,050✔
707
}
33,025✔
708

709
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc