• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 21548169227

31 Jan 2026 05:28PM UTC coverage: 81.982% (+0.02%) from 81.962%
21548169227

Pull #3761

github

web-flow
Merge b501e4fd5 into 7b4617aff
Pull Request #3761: Add truncated normal distribution support

17275 of 24050 branches covered (71.83%)

Branch coverage included in aggregate %.

85 of 88 new or added lines in 3 files covered. (96.59%)

308 existing lines in 25 files now uncovered.

55875 of 65177 relevant lines covered (85.73%)

50211152.52 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

76.6
/src/distribution.cpp
1
#include "openmc/distribution.h"
2

3
#include <algorithm> // for copy
4
#include <array>
5
#include <cmath>     // for sqrt, floor, max
6
#include <iterator>  // for back_inserter
7
#include <numeric>   // for accumulate
8
#include <stdexcept> // for runtime_error
9
#include <string>    // for string, stod
10

11
#include "openmc/constants.h"
12
#include "openmc/error.h"
13
#include "openmc/math_functions.h"
14
#include "openmc/random_dist.h"
15
#include "openmc/random_lcg.h"
16
#include "openmc/xml_interface.h"
17

18
namespace openmc {
19

20
//==============================================================================
21
// Helper function for computing importance weights from biased sampling
22
//==============================================================================
23

24
vector<double> compute_importance_weights(
11✔
25
  const vector<double>& p, const vector<double>& b)
26
{
27
  std::size_t n = p.size();
11✔
28

29
  // Normalize original probabilities
30
  double sum_p = std::accumulate(p.begin(), p.end(), 0.0);
11✔
31
  vector<double> p_norm(n);
11✔
32
  for (std::size_t i = 0; i < n; ++i) {
44✔
33
    p_norm[i] = p[i] / sum_p;
33✔
34
  }
35

36
  // Normalize bias probabilities
37
  double sum_b = std::accumulate(b.begin(), b.end(), 0.0);
11✔
38
  vector<double> b_norm(n);
11✔
39
  for (std::size_t i = 0; i < n; ++i) {
44✔
40
    b_norm[i] = b[i] / sum_b;
33✔
41
  }
42

43
  // Compute importance weights
44
  vector<double> weights(n);
11✔
45
  for (std::size_t i = 0; i < n; ++i) {
44✔
46
    weights[i] = (b_norm[i] == 0.0) ? INFTY : p_norm[i] / b_norm[i];
33!
47
  }
48
  return weights;
22✔
49
}
11✔
50

51
std::pair<double, double> Distribution::sample(uint64_t* seed) const
759,709,880✔
52
{
53
  if (bias_) {
759,709,880✔
54
    // Sample from the bias distribution and compute importance weight
55
    double val = bias_->sample_unbiased(seed);
714,849✔
56
    double wgt = this->evaluate(val) / bias_->evaluate(val);
714,849✔
57
    return {val, wgt};
714,849✔
58
  } else {
59
    // Unbiased sampling: return sampled value with weight 1.0
60
    double val = sample_unbiased(seed);
758,995,031✔
61
    return {val, 1.0};
758,995,031✔
62
  }
63
}
64

65
// PDF evaluation not supported for all distribution types
66
double Distribution::evaluate(double x) const
×
67
{
68
  throw std::runtime_error(
×
69
    "PDF evaluation not implemented for this distribution type.");
×
70
}
71

72
void Distribution::read_bias_from_xml(pugi::xml_node node)
8,495✔
73
{
74
  if (check_for_node(node, "bias")) {
8,495✔
75
    pugi::xml_node bias_node = node.child("bias");
77✔
76

77
    if (check_for_node(bias_node, "bias")) {
77!
78
      openmc::fatal_error(
×
79
        "Distribution has a bias distribution with its own bias distribution. "
80
        "Please ensure bias distributions do not have their own bias.");
81
    }
82

83
    UPtrDist bias = distribution_from_xml(bias_node);
77✔
84
    this->set_bias(std::move(bias));
77✔
85
  }
77✔
86
}
8,495✔
87

88
//==============================================================================
89
// DiscreteIndex implementation
90
//==============================================================================
91

92
DiscreteIndex::DiscreteIndex(pugi::xml_node node)
×
93
{
94
  auto params = get_node_array<double>(node, "parameters");
×
95
  std::size_t n = params.size() / 2;
×
96

97
  assign({params.data() + n, n});
×
98
}
×
99

100
DiscreteIndex::DiscreteIndex(span<const double> p)
47,427✔
101
{
102
  assign(p);
47,427✔
103
}
47,427✔
104

105
void DiscreteIndex::assign(span<const double> p)
83,348✔
106
{
107
  prob_.assign(p.begin(), p.end());
83,348✔
108
  this->init_alias();
83,348✔
109
}
83,348✔
110

111
void DiscreteIndex::init_alias()
83,348✔
112
{
113
  normalize();
83,348✔
114

115
  // The initialization and sampling method is based on Vose
116
  // (DOI: 10.1109/32.92917)
117
  // Vectors for large and small probabilities based on 1/n
118
  vector<size_t> large;
83,348✔
119
  vector<size_t> small;
83,348✔
120

121
  size_t n = prob_.size();
83,348✔
122

123
  // Set and allocate memory
124
  alias_.assign(n, 0);
83,348✔
125

126
  // Fill large and small vectors based on 1/n
127
  for (size_t i = 0; i < n; i++) {
1,638,935✔
128
    prob_[i] *= n;
1,555,587✔
129
    if (prob_[i] > 1.0) {
1,555,587✔
130
      large.push_back(i);
232,943✔
131
    } else {
132
      small.push_back(i);
1,322,644✔
133
    }
134
  }
135

136
  while (!large.empty() && !small.empty()) {
1,481,053✔
137
    int j = small.back();
1,397,705✔
138
    int k = large.back();
1,397,705✔
139

140
    // Remove last element of small
141
    small.pop_back();
1,397,705✔
142

143
    // Update probability and alias based on Vose's algorithm
144
    prob_[k] += prob_[j] - 1.0;
1,397,705✔
145
    alias_[j] = k;
1,397,705✔
146

147
    // Move large index to small vector, if it is no longer large
148
    if (prob_[k] < 1.0) {
1,397,705✔
149
      small.push_back(k);
224,470✔
150
      large.pop_back();
224,470✔
151
    }
152
  }
153
}
83,348✔
154

155
size_t DiscreteIndex::sample(uint64_t* seed) const
67,473,972✔
156
{
157
  // Alias sampling of discrete distribution
158
  size_t n = prob_.size();
67,473,972✔
159
  if (n > 1) {
67,473,972✔
160
    size_t u = prn(seed) * n;
16,640,687✔
161
    if (prn(seed) < prob_[u]) {
16,640,687✔
162
      return u;
9,965,528✔
163
    } else {
164
      return alias_[u];
6,675,159✔
165
    }
166
  } else {
167
    return 0;
50,833,285✔
168
  }
169
}
170

171
void DiscreteIndex::normalize()
83,348✔
172
{
173
  // Renormalize density function so that it sums to unity. Note that we save
174
  // the integral of the distribution so that if it is used as part of another
175
  // distribution (e.g., Mixture), we know its relative strength.
176
  integral_ = std::accumulate(prob_.begin(), prob_.end(), 0.0);
83,348✔
177
  for (auto& p_i : prob_) {
1,638,935✔
178
    p_i /= integral_;
1,555,587✔
179
  }
180
}
83,348✔
181

182
//==============================================================================
183
// Discrete implementation
184
//==============================================================================
185

186
Discrete::Discrete(pugi::xml_node node)
27,509✔
187
{
188
  auto params = get_node_array<double>(node, "parameters");
27,509✔
189
  std::size_t n = params.size() / 2;
27,509✔
190

191
  // First half is x values, second half is probabilities
192
  x_.assign(params.begin(), params.begin() + n);
27,509✔
193
  const double* p = params.data() + n;
27,509✔
194

195
  // Check for bias
196
  if (check_for_node(node, "bias")) {
27,509✔
197
    // Get bias probabilities
198
    auto bias_params = get_node_array<double>(node, "bias");
11✔
199
    if (bias_params.size() != n) {
11!
200
      openmc::fatal_error(
×
201
        "Size mismatch: Attempted to bias Discrete distribution with " +
×
202
        std::to_string(n) + " probability entries using a bias with " +
×
203
        std::to_string(bias_params.size()) +
×
204
        " entries. Please ensure distributions have the same size.");
205
    }
206

207
    // Compute importance weights
208
    vector<double> p_vec(p, p + n);
11✔
209
    weight_ = compute_importance_weights(p_vec, bias_params);
11✔
210

211
    // Initialize DiscreteIndex with bias probabilities for sampling
212
    di_.assign(bias_params);
11✔
213
  } else {
11✔
214
    // Unbiased case: weight_ stays empty
215
    di_.assign({p, n});
27,498✔
216
  }
217
}
27,509✔
218

219
Discrete::Discrete(const double* x, const double* p, size_t n) : di_({p, n})
47,427✔
220
{
221
  x_.assign(x, x + n);
47,427✔
222
}
47,427✔
223

224
std::pair<double, double> Discrete::sample(uint64_t* seed) const
62,135,454✔
225
{
226
  size_t idx = di_.sample(seed);
62,135,454✔
227
  double wgt = weight_.empty() ? 1.0 : weight_[idx];
62,135,454✔
228
  return {x_[idx], wgt};
62,135,454✔
229
}
230

231
double Discrete::sample_unbiased(uint64_t* seed) const
×
232
{
233
  size_t idx = di_.sample(seed);
×
234
  return x_[idx];
×
235
}
236

237
//==============================================================================
238
// Uniform implementation
239
//==============================================================================
240

241
Uniform::Uniform(pugi::xml_node node)
344✔
242
{
243
  auto params = get_node_array<double>(node, "parameters");
344✔
244
  if (params.size() != 2) {
344!
245
    fatal_error("Uniform distribution must have two "
×
246
                "parameters specified.");
247
  }
248

249
  a_ = params.at(0);
344✔
250
  b_ = params.at(1);
344✔
251

252
  read_bias_from_xml(node);
344✔
253
}
344✔
254

255
double Uniform::sample_unbiased(uint64_t* seed) const
781,026✔
256
{
257
  return a_ + prn(seed) * (b_ - a_);
781,026✔
258
}
259

260
double Uniform::evaluate(double x) const
384,849✔
261
{
262
  if (x <= a()) {
384,849!
263
    return 0.0;
×
264
  } else if (x >= b()) {
384,849!
265
    return 0.0;
×
266
  } else {
267
    return 1 / (b() - a());
384,849✔
268
  }
269
}
270

271
//==============================================================================
272
// PowerLaw implementation
273
//==============================================================================
274

275
PowerLaw::PowerLaw(pugi::xml_node node)
76✔
276
{
277
  auto params = get_node_array<double>(node, "parameters");
76✔
278
  if (params.size() != 3) {
76!
279
    fatal_error("PowerLaw distribution must have three "
×
280
                "parameters specified.");
281
  }
282

283
  const double a = params.at(0);
76✔
284
  const double b = params.at(1);
76✔
285
  const double n = params.at(2);
76✔
286

287
  offset_ = std::pow(a, n + 1);
76✔
288
  span_ = std::pow(b, n + 1) - offset_;
76✔
289
  ninv_ = 1 / (n + 1);
76✔
290

291
  read_bias_from_xml(node);
76✔
292
}
76✔
293

294
double PowerLaw::evaluate(double x) const
274,849✔
295
{
296
  if (x <= a()) {
274,849!
297
    return 0.0;
×
298
  } else if (x >= b()) {
274,849!
299
    return 0.0;
×
300
  } else {
301
    int pwr = n() + 1;
274,849✔
302
    double norm = pwr / span_;
274,849✔
303
    return norm * std::pow(std::fabs(x), n());
274,849✔
304
  }
305
}
306

307
double PowerLaw::sample_unbiased(uint64_t* seed) const
277,071✔
308
{
309
  return std::pow(offset_ + prn(seed) * span_, ninv_);
277,071✔
310
}
311

312
//==============================================================================
313
// Maxwell implementation
314
//==============================================================================
315

316
Maxwell::Maxwell(pugi::xml_node node)
97✔
317
{
318
  theta_ = std::stod(get_node_value(node, "parameters"));
97✔
319

320
  read_bias_from_xml(node);
97✔
321
}
97✔
322

323
double Maxwell::sample_unbiased(uint64_t* seed) const
223,872✔
324
{
325
  return maxwell_spectrum(theta_, seed);
223,872✔
326
}
327

328
double Maxwell::evaluate(double x) const
220,000✔
329
{
330
  double c = (2.0 / SQRT_PI) * std::pow(theta_, -1.5);
220,000✔
331
  return c * std::sqrt(x) * std::exp(-x / theta_);
220,000✔
332
}
333

334
//==============================================================================
335
// Watt implementation
336
//==============================================================================
337

338
Watt::Watt(pugi::xml_node node)
129✔
339
{
340
  auto params = get_node_array<double>(node, "parameters");
129✔
341
  if (params.size() != 2)
129!
342
    openmc::fatal_error("Watt energy distribution must have two "
×
343
                        "parameters specified.");
344

345
  a_ = params.at(0);
129✔
346
  b_ = params.at(1);
129✔
347

348
  read_bias_from_xml(node);
129✔
349
}
129✔
350

351
double Watt::sample_unbiased(uint64_t* seed) const
13,124,220✔
352
{
353
  return watt_spectrum(a_, b_, seed);
13,124,220✔
354
}
355

356
double Watt::evaluate(double x) const
220,000✔
357
{
358
  double c =
359
    2.0 / (std::sqrt(PI * b_) * std::pow(a_, 1.5) * std::exp(a_ * b_ / 4.0));
220,000✔
360
  return c * std::exp(-x / a_) * std::sinh(std::sqrt(b_ * x));
220,000✔
361
}
362

363
//==============================================================================
364
// Normal implementation
365
//==============================================================================
366

367
Normal::Normal(double mean_value, double std_dev, double lower, double upper)
66✔
368
  : mean_value_ {mean_value}, std_dev_ {std_dev}, lower_ {lower}, upper_ {upper}
66✔
369
{
370
  compute_normalization();
66✔
371
}
66✔
372

373
Normal::Normal(pugi::xml_node node)
44✔
374
{
375
  auto params = get_node_array<double>(node, "parameters");
44✔
376
  if (params.size() != 2 && params.size() != 4) {
44!
377
    openmc::fatal_error("Normal energy distribution must have two "
×
378
                        "parameters (mean, std_dev) or four parameters "
379
                        "(mean, std_dev, lower, upper) specified.");
380
  }
381

382
  mean_value_ = params.at(0);
44✔
383
  std_dev_ = params.at(1);
44✔
384

385
  // Optional truncation bounds
386
  if (params.size() == 4) {
44✔
387
    lower_ = params.at(2);
11✔
388
    upper_ = params.at(3);
11✔
389
  } else {
390
    lower_ = -INFTY;
33✔
391
    upper_ = INFTY;
33✔
392
  }
393

394
  compute_normalization();
44✔
395
  read_bias_from_xml(node);
44✔
396
}
44✔
397

398
void Normal::compute_normalization()
110✔
399
{
400
  // Validate bounds
401
  if (lower_ >= upper_) {
110!
NEW
402
    openmc::fatal_error(
×
403
      "Normal distribution lower bound must be less than upper bound.");
404
  }
405

406
  // Check if truncation bounds are finite
407
  is_truncated_ = (lower_ > -INFTY || upper_ < INFTY);
110✔
408

409
  if (is_truncated_) {
110✔
410
    double alpha = (lower_ - mean_value_) / std_dev_;
55✔
411
    double beta = (upper_ - mean_value_) / std_dev_;
55✔
412
    double cdf_diff = standard_normal_cdf(beta) - standard_normal_cdf(alpha);
55✔
413

414
    if (cdf_diff <= 0.0) {
55!
NEW
415
      openmc::fatal_error(
×
416
        "Normal distribution truncation bounds exclude entire distribution.");
417
    }
418
    norm_factor_ = 1.0 / cdf_diff;
55✔
419
  } else {
420
    norm_factor_ = 1.0;
55✔
421
  }
422
}
110✔
423

424
double Normal::sample_unbiased(uint64_t* seed) const
330,000✔
425
{
426
  if (!is_truncated_) {
330,000✔
427
    return normal_variate(mean_value_, std_dev_, seed);
220,000✔
428
  }
429

430
  // Rejection sampling for truncated normal
431
  double x;
432
  do {
433
    x = normal_variate(mean_value_, std_dev_, seed);
161,249✔
434
  } while (x < lower_ || x > upper_);
161,249✔
435
  return x;
110,000✔
436
}
437

438
double Normal::evaluate(double x) const
220,121✔
439
{
440
  // Return 0 outside truncation bounds
441
  if (x < lower_ || x > upper_) {
220,121✔
442
    return 0.0;
44✔
443
  }
444

445
  // Standard normal PDF value
446
  double pdf = (1.0 / (std::sqrt(2.0 * PI) * std_dev_)) *
220,077✔
447
               std::exp(-std::pow((x - mean_value_), 2.0) /
220,077✔
448
                        (2.0 * std::pow(std_dev_, 2.0)));
220,077✔
449

450
  // Apply normalization for truncation
451
  return pdf * norm_factor_;
220,077✔
452
}
453

454
//==============================================================================
455
// Tabular implementation
456
//==============================================================================
457

458
Tabular::Tabular(pugi::xml_node node)
7,805✔
459
{
460
  if (check_for_node(node, "interpolation")) {
7,805!
461
    std::string temp = get_node_value(node, "interpolation");
7,805✔
462
    if (temp == "histogram") {
7,805✔
463
      interp_ = Interpolation::histogram;
7,719✔
464
    } else if (temp == "linear-linear") {
86!
465
      interp_ = Interpolation::lin_lin;
86✔
466
    } else {
467
      openmc::fatal_error(
×
468
        "Unsupported interpolation type for distribution: " + temp);
×
469
    }
470
  } else {
7,805✔
471
    interp_ = Interpolation::histogram;
×
472
  }
473

474
  // Read and initialize tabular distribution. If number of parameters is odd,
475
  // add an extra zero for the 'p' array.
476
  auto params = get_node_array<double>(node, "parameters");
7,805✔
477
  if (params.size() % 2 != 0) {
7,805!
478
    params.push_back(0.0);
×
479
  }
480
  std::size_t n = params.size() / 2;
7,805✔
481
  const double* x = params.data();
7,805✔
482
  const double* p = x + n;
7,805✔
483
  init(x, p, n);
7,805✔
484

485
  read_bias_from_xml(node);
7,805✔
486
}
7,805✔
487

488
Tabular::Tabular(const double* x, const double* p, int n, Interpolation interp,
47,491,681✔
489
  const double* c)
47,491,681✔
490
  : interp_ {interp}
47,491,681✔
491
{
492
  init(x, p, n, c);
47,491,681✔
493
}
47,491,681✔
494

495
void Tabular::init(
47,499,486✔
496
  const double* x, const double* p, std::size_t n, const double* c)
497
{
498
  // Copy x/p arrays into vectors
499
  std::copy(x, x + n, std::back_inserter(x_));
47,499,486✔
500
  std::copy(p, p + n, std::back_inserter(p_));
47,499,486✔
501

502
  // Check interpolation parameter
503
  if (interp_ != Interpolation::histogram &&
47,499,486✔
504
      interp_ != Interpolation::lin_lin) {
39,610,675!
505
    openmc::fatal_error("Only histogram and linear-linear interpolation "
×
506
                        "for tabular distribution is supported.");
507
  }
508

509
  // Calculate cumulative distribution function
510
  if (c) {
47,499,486✔
511
    std::copy(c, c + n, std::back_inserter(c_));
47,491,681✔
512
  } else {
513
    c_.resize(n);
7,805✔
514
    c_[0] = 0.0;
7,805✔
515
    for (int i = 1; i < n; ++i) {
96,556✔
516
      if (interp_ == Interpolation::histogram) {
88,751✔
517
        c_[i] = c_[i - 1] + p_[i - 1] * (x_[i] - x_[i - 1]);
88,557✔
518
      } else if (interp_ == Interpolation::lin_lin) {
194!
519
        c_[i] = c_[i - 1] + 0.5 * (p_[i - 1] + p_[i]) * (x_[i] - x_[i - 1]);
194✔
520
      }
521
    }
522
  }
523

524
  // Normalize density and distribution functions. Note that we save the
525
  // integral of the distribution so that if it is used as part of another
526
  // distribution (e.g., Mixture), we know its relative strength.
527
  integral_ = c_[n - 1];
47,499,486✔
528
  for (int i = 0; i < n; ++i) {
683,986,760✔
529
    p_[i] = p_[i] / integral_;
636,487,274✔
530
    c_[i] = c_[i] / integral_;
636,487,274✔
531
  }
532
}
47,499,486✔
533

534
double Tabular::sample_unbiased(uint64_t* seed) const
744,973,691✔
535
{
536
  // Sample value of CDF
537
  double c = prn(seed);
744,973,691✔
538

539
  // Find first CDF bin which is above the sampled value
540
  double c_i = c_[0];
744,973,691✔
541
  int i;
542
  std::size_t n = c_.size();
744,973,691✔
543
  for (i = 0; i < n - 1; ++i) {
2,408,773,622!
544
    if (c <= c_[i + 1])
2,408,773,622✔
545
      break;
744,973,691✔
546
    c_i = c_[i + 1];
2,055,535,856✔
547
  }
548

549
  // Determine bounding PDF values
550
  double x_i = x_[i];
744,973,691✔
551
  double p_i = p_[i];
744,973,691✔
552

553
  if (interp_ == Interpolation::histogram) {
744,973,691✔
554
    // Histogram interpolation
555
    if (p_i > 0.0) {
3,421,781!
556
      return x_i + (c - c_i) / p_i;
3,421,781✔
557
    } else {
558
      return x_i;
×
559
    }
560
  } else {
561
    // Linear-linear interpolation
562
    double x_i1 = x_[i + 1];
741,551,910✔
563
    double p_i1 = p_[i + 1];
741,551,910✔
564

565
    double m = (p_i1 - p_i) / (x_i1 - x_i);
741,551,910✔
566
    if (m == 0.0) {
741,551,910✔
567
      return x_i + (c - c_i) / p_i;
328,933,124✔
568
    } else {
569
      return x_i +
570
             (std::sqrt(std::max(0.0, p_i * p_i + 2 * m * (c - c_i))) - p_i) /
412,618,786✔
571
               m;
412,618,786✔
572
    }
573
  }
574
}
575

576
double Tabular::evaluate(double x) const
110,000✔
577
{
578
  int i;
579

580
  if (interp_ == Interpolation::histogram) {
110,000!
581
    i = std::upper_bound(x_.begin(), x_.end(), x) - x_.begin() - 1;
×
582
    if (i < 0 || i >= static_cast<int>(p_.size())) {
×
583
      return 0.0;
×
584
    } else {
585
      return p_[i];
×
586
    }
587
  } else {
588
    i = std::lower_bound(x_.begin(), x_.end(), x) - x_.begin() - 1;
110,000✔
589

590
    if (i < 0 || i >= static_cast<int>(p_.size()) - 1) {
110,000!
591
      return 0.0;
×
592
    } else {
593
      double x0 = x_[i];
110,000✔
594
      double x1 = x_[i + 1];
110,000✔
595
      double p0 = p_[i];
110,000✔
596
      double p1 = p_[i + 1];
110,000✔
597

598
      double t = (x - x0) / (x1 - x0);
110,000✔
599
      return (1 - t) * p0 + t * p1;
110,000✔
600
    }
601
  }
602
}
603

604
//==============================================================================
605
// Equiprobable implementation
606
//==============================================================================
607

608
double Equiprobable::sample_unbiased(uint64_t* seed) const
×
609
{
610
  std::size_t n = x_.size();
×
611

612
  double r = prn(seed);
×
613
  int i = std::floor((n - 1) * r);
×
614

615
  double xl = x_[i];
×
616
  double xr = x_[i + i];
×
617
  return xl + ((n - 1) * r - i) * (xr - xl);
×
618
}
619

620
double Equiprobable::evaluate(double x) const
×
621
{
622
  double x_min = *std::min_element(x_.begin(), x_.end());
×
623
  double x_max = *std::max_element(x_.begin(), x_.end());
×
624

625
  if (x < x_min || x > x_max) {
×
626
    return 0.0;
×
627
  } else {
628
    return 1.0 / (x_max - x_min);
×
629
  }
630
}
631

632
//==============================================================================
633
// Mixture implementation
634
//==============================================================================
635

636
Mixture::Mixture(pugi::xml_node node)
70✔
637
{
638
  vector<double> probabilities;
70✔
639

640
  // First pass: collect distributions and their probabilities
641
  for (pugi::xml_node pair : node.children("pair")) {
258✔
642
    // Check that required data exists
643
    if (!pair.attribute("probability"))
188!
644
      fatal_error("Mixture pair element does not have probability.");
×
645
    if (!pair.child("dist"))
188!
646
      fatal_error("Mixture pair element does not have a distribution.");
×
647

648
    // Get probability and distribution
649
    double p = std::stod(pair.attribute("probability").value());
188✔
650
    auto dist = distribution_from_xml(pair.child("dist"));
188✔
651

652
    // Weight probability by the distribution's integral
653
    double weighted_prob = p * dist->integral();
188✔
654
    probabilities.push_back(weighted_prob);
188✔
655
    distribution_.push_back(std::move(dist));
188✔
656
  }
188✔
657

658
  // Save sum of weighted probabilities
659
  integral_ = std::accumulate(probabilities.begin(), probabilities.end(), 0.0);
70✔
660

661
  std::size_t n = probabilities.size();
70✔
662

663
  // Check for bias
664
  if (check_for_node(node, "bias")) {
70!
665
    // Get bias probabilities
666
    auto bias_params = get_node_array<double>(node, "bias");
×
667
    if (bias_params.size() != n) {
×
668
      openmc::fatal_error(
×
669
        "Size mismatch: Attempted to bias Mixture distribution with " +
×
670
        std::to_string(n) + " components using a bias with " +
×
671
        std::to_string(bias_params.size()) +
×
672
        " entries. Please ensure distributions have the same size.");
673
    }
674

675
    // Compute importance weights
676
    weight_ = compute_importance_weights(probabilities, bias_params);
×
677

678
    // Initialize DiscreteIndex with bias probabilities for sampling
679
    di_.assign(bias_params);
×
680
  } else {
×
681
    // Unbiased case: weight_ stays empty
682
    di_.assign(probabilities);
70✔
683
  }
684
}
70✔
685

686
std::pair<double, double> Mixture::sample(uint64_t* seed) const
223,564✔
687
{
688
  size_t idx = di_.sample(seed);
223,564✔
689

690
  // Sample the chosen distribution
691
  auto [val, sub_wgt] = distribution_[idx]->sample(seed);
223,564✔
692

693
  // Multiply by component selection weight
694
  double mix_wgt = weight_.empty() ? 1.0 : weight_[idx];
223,564!
695
  return {val, mix_wgt * sub_wgt};
223,564✔
696
}
697

698
double Mixture::sample_unbiased(uint64_t* seed) const
×
699
{
700
  size_t idx = di_.sample(seed);
×
701
  return distribution_[idx]->sample(seed).first;
×
702
}
703

704
//==============================================================================
705
// Helper function
706
//==============================================================================
707

708
UPtrDist distribution_from_xml(pugi::xml_node node)
36,052✔
709
{
710
  if (!check_for_node(node, "type"))
36,052!
711
    openmc::fatal_error("Distribution type must be specified.");
×
712

713
  // Determine type of distribution
714
  std::string type = get_node_value(node, "type", true, true);
36,052✔
715

716
  // Allocate extension of Distribution
717
  UPtrDist dist;
36,052✔
718
  if (type == "uniform") {
36,052✔
719
    dist = UPtrDist {new Uniform(node)};
344✔
720
  } else if (type == "powerlaw") {
35,708✔
721
    dist = UPtrDist {new PowerLaw(node)};
76✔
722
  } else if (type == "maxwell") {
35,632✔
723
    dist = UPtrDist {new Maxwell(node)};
97✔
724
  } else if (type == "watt") {
35,535✔
725
    dist = UPtrDist {new Watt(node)};
129✔
726
  } else if (type == "normal") {
35,406✔
727
    dist = UPtrDist {new Normal(node)};
33✔
728
  } else if (type == "discrete") {
35,373✔
729
    dist = UPtrDist {new Discrete(node)};
27,498✔
730
  } else if (type == "tabular") {
7,875✔
731
    dist = UPtrDist {new Tabular(node)};
7,805✔
732
  } else if (type == "mixture") {
70!
733
    dist = UPtrDist {new Mixture(node)};
70✔
734
  } else if (type == "muir") {
×
735
    openmc::fatal_error(
×
736
      "'muir' distributions are now specified using the openmc.stats.muir() "
737
      "function in Python. Please regenerate your XML files.");
738
  } else {
739
    openmc::fatal_error("Invalid distribution type: " + type);
×
740
  }
741
  return dist;
72,104✔
742
}
36,052✔
743

744
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc