• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 22210404096

20 Feb 2026 03:44AM UTC coverage: 81.804% (+0.08%) from 81.721%
22210404096

Pull #3809

github

web-flow
Merge d39f3220e into 53ce1910f
Pull Request #3809: Implement tally filter for filtering by reaction

17328 of 24423 branches covered (70.95%)

Branch coverage included in aggregate %.

125 of 149 new or added lines in 11 files covered. (83.89%)

1322 existing lines in 33 files now uncovered.

57670 of 67257 relevant lines covered (85.75%)

45506622.43 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.84
/src/eigenvalue.cpp
1
#include "openmc/eigenvalue.h"
2

3
#include "openmc/tensor.h"
4

5
#include "openmc/array.h"
6
#include "openmc/bank.h"
7
#include "openmc/capi.h"
8
#include "openmc/constants.h"
9
#include "openmc/error.h"
10
#include "openmc/hdf5_interface.h"
11
#include "openmc/ifp.h"
12
#include "openmc/math_functions.h"
13
#include "openmc/mesh.h"
14
#include "openmc/message_passing.h"
15
#include "openmc/random_lcg.h"
16
#include "openmc/search.h"
17
#include "openmc/settings.h"
18
#include "openmc/simulation.h"
19
#include "openmc/tallies/tally.h"
20
#include "openmc/timer.h"
21

22
#include <algorithm> // for min
23
#include <cmath>     // for sqrt, abs, pow
24
#include <iterator>  // for back_inserter
25
#include <limits>    //for infinity
26
#include <string>
27

28
namespace openmc {
29

30
//==============================================================================
31
// Global variables
32
//==============================================================================
33

34
namespace simulation {
35

36
double keff_generation;
37
array<double, 2> k_sum;
38
vector<double> entropy;
39
tensor::Tensor<double> source_frac;
40

41
} // namespace simulation
42

43
//==============================================================================
44
// Non-member functions
45
//==============================================================================
46

47
void calculate_generation_keff()
85,290✔
48
{
49
  const auto& gt = simulation::global_tallies;
85,290✔
50

51
  // Get keff for this generation by subtracting off the starting value
52
  simulation::keff_generation =
85,290✔
53
    gt(GlobalTally::K_TRACKLENGTH, TallyResult::VALUE) -
85,290✔
54
    simulation::keff_generation;
55

56
  double keff_reduced;
57
#ifdef OPENMC_MPI
58
  if (settings::solver_type != SolverType::RANDOM_RAY) {
42,532✔
59
    // Combine values across all processors
60
    MPI_Allreduce(&simulation::keff_generation, &keff_reduced, 1, MPI_DOUBLE,
39,252✔
61
      MPI_SUM, mpi::intracomm);
62
  } else {
63
    // If using random ray, MPI parallelism is provided by domain replication.
64
    // As such, all fluxes will be reduced at the end of each transport sweep,
65
    // such that all ranks have identical scalar flux vectors, and will all
66
    // independently compute the same value of k. Thus, there is no need to
67
    // perform any additional MPI reduction here.
68
    keff_reduced = simulation::keff_generation;
3,280✔
69
  }
70
#else
71
  keff_reduced = simulation::keff_generation;
42,758✔
72
#endif
73

74
  // Normalize single batch estimate of k
75
  // TODO: This should be normalized by total_weight, not by n_particles
76
  if (settings::solver_type != SolverType::RANDOM_RAY) {
85,290✔
77
    keff_reduced /= settings::n_particles;
78,950✔
78
  }
79

80
  simulation::k_generation.push_back(keff_reduced);
85,290✔
81
}
85,290✔
82

83
void synchronize_bank()
78,950✔
84
{
85
  simulation::time_bank.start();
78,950✔
86

87
  // In order to properly understand the fission bank algorithm, you need to
88
  // think of the fission and source bank as being one global array divided
89
  // over multiple processors. At the start, each processor has a random amount
90
  // of fission bank sites -- each processor needs to know the total number of
91
  // sites in order to figure out the probability for selecting
92
  // sites. Furthermore, each proc also needs to know where in the 'global'
93
  // fission bank its own sites starts in order to ensure reproducibility by
94
  // skipping ahead to the proper seed.
95

96
#ifdef OPENMC_MPI
97
  int64_t start = 0;
39,252✔
98
  int64_t n_bank = simulation::fission_bank.size();
39,252✔
99
  MPI_Exscan(&n_bank, &start, 1, MPI_INT64_T, MPI_SUM, mpi::intracomm);
39,252✔
100

101
  // While we would expect the value of start on rank 0 to be 0, the MPI
102
  // standard says that the receive buffer on rank 0 is undefined and not
103
  // significant
104
  if (mpi::rank == 0)
39,252✔
105
    start = 0;
26,457✔
106

107
  int64_t finish = start + simulation::fission_bank.size();
39,252✔
108
  int64_t total = finish;
39,252✔
109
  MPI_Bcast(&total, 1, MPI_INT64_T, mpi::n_procs - 1, mpi::intracomm);
39,252✔
110

111
#else
112
  int64_t start = 0;
39,698✔
113
  int64_t finish = simulation::fission_bank.size();
39,698✔
114
  int64_t total = finish;
39,698✔
115
#endif
116

117
  // If there are not that many particles per generation, it's possible that no
118
  // fission sites were created at all on a single processor. Rather than add
119
  // extra logic to treat this circumstance, we really want to ensure the user
120
  // runs enough particles to avoid this in the first place.
121

122
  if (simulation::fission_bank.size() == 0) {
78,950!
UNCOV
123
    fatal_error(
×
UNCOV
124
      "No fission sites banked on MPI rank " + std::to_string(mpi::rank));
×
125
  }
126

127
  simulation::time_bank_sample.start();
78,950✔
128

129
  // Allocate temporary source bank -- we don't really know how many fission
130
  // sites were created, so overallocate by a factor of 3
131
  int64_t index_temp = 0;
78,950✔
132

133
  vector<SourceSite> temp_sites(3 * simulation::work_per_rank);
78,950✔
134

135
  // Temporary banks for IFP
136
  vector<vector<int>> temp_delayed_groups;
78,950✔
137
  vector<vector<double>> temp_lifetimes;
78,950✔
138
  if (settings::ifp_on) {
78,950✔
139
    resize_ifp_data(
1,360✔
140
      temp_delayed_groups, temp_lifetimes, 3 * simulation::work_per_rank);
141
  }
142

143
  // ==========================================================================
144
  // SAMPLE N_PARTICLES FROM FISSION BANK AND PLACE IN TEMP_SITES
145

146
  // We use Uniform Combing method to exactly get the targeted particle size
147
  // [https://doi.org/10.1080/00295639.2022.2091906]
148

149
  // Make sure all processors use the same random number seed.
150
  int64_t id = simulation::total_gen + overall_generation();
78,950✔
151
  uint64_t seed = init_seed(id, STREAM_TRACKING);
78,950✔
152

153
  // Comb specification
154
  double teeth_distance = static_cast<double>(total) / settings::n_particles;
78,950✔
155
  double teeth_offset = prn(&seed) * teeth_distance;
78,950✔
156

157
  // First and last hitting tooth
158
  int64_t end = start + simulation::fission_bank.size();
78,950✔
159
  int64_t tooth_start = std::ceil((start - teeth_offset) / teeth_distance);
78,950✔
160
  int64_t tooth_end = std::floor((end - teeth_offset) / teeth_distance) + 1;
78,950✔
161

162
  // Locally comb particles in fission_bank
163
  double tooth = tooth_start * teeth_distance + teeth_offset;
78,950✔
164
  for (int64_t i = tooth_start; i < tooth_end; i++) {
128,645,450✔
165
    int64_t idx = std::floor(tooth) - start;
128,566,500✔
166
    temp_sites[index_temp] = simulation::fission_bank[idx];
128,566,500✔
167
    if (settings::ifp_on) {
128,566,500✔
168
      copy_ifp_data_from_fission_banks(
1,200,000✔
169
        idx, temp_delayed_groups[index_temp], temp_lifetimes[index_temp]);
1,200,000✔
170
    }
171
    ++index_temp;
128,566,500✔
172

173
    // Next tooth
174
    tooth += teeth_distance;
128,566,500✔
175
  }
176

177
  // At this point, the sampling of source sites is done and now we need to
178
  // figure out where to send source sites. Since it is possible that one
179
  // processor's share of the source bank spans more than just the immediate
180
  // neighboring processors, we have to perform an ALLGATHER to determine the
181
  // indices for all processors
182

183
#ifdef OPENMC_MPI
184
  // First do an exclusive scan to get the starting indices for
185
  start = 0;
39,252✔
186
  MPI_Exscan(&index_temp, &start, 1, MPI_INT64_T, MPI_SUM, mpi::intracomm);
39,252✔
187
  finish = start + index_temp;
39,252✔
188

189
  // TODO: protect for MPI_Exscan at rank 0
190

191
  // Allocate space for bank_position if this hasn't been done yet
192
  std::vector<int64_t> bank_position(mpi::n_procs);
39,252✔
193
  MPI_Allgather(&start, 1, MPI_INT64_T, bank_position.data(), 1, MPI_INT64_T,
39,252✔
194
    mpi::intracomm);
195
#else
196
  start = 0;
39,698✔
197
  finish = index_temp;
39,698✔
198
#endif
199

200
  simulation::time_bank_sample.stop();
78,950✔
201
  simulation::time_bank_sendrecv.start();
78,950✔
202

203
#ifdef OPENMC_MPI
204
  // ==========================================================================
205
  // SEND BANK SITES TO NEIGHBORS
206

207
  // IFP number of generation
208
  int ifp_n_generation;
209
  if (settings::ifp_on) {
39,252✔
210
    broadcast_ifp_n_generation(
640✔
211
      ifp_n_generation, temp_delayed_groups, temp_lifetimes);
212
  }
213

214
  int64_t index_local = 0;
39,252✔
215
  vector<MPI_Request> requests;
39,252✔
216

217
  // IFP send buffers
218
  vector<int> send_delayed_groups;
39,252✔
219
  vector<double> send_lifetimes;
39,252✔
220

221
  if (start < settings::n_particles) {
39,252!
222
    // Determine the index of the processor which has the first part of the
223
    // source_bank for the local processor
224
    int neighbor = upper_bound_index(
39,252✔
225
      simulation::work_index.begin(), simulation::work_index.end(), start);
39,252✔
226

227
    // Resize IFP send buffers
228
    if (settings::ifp_on && mpi::n_procs > 1) {
39,252✔
229
      resize_ifp_data(send_delayed_groups, send_lifetimes,
320✔
230
        ifp_n_generation * 3 * simulation::work_per_rank);
320✔
231
    }
232

233
    while (start < finish) {
58,346✔
234
      // Determine the number of sites to send
235
      int64_t n =
236
        std::min(simulation::work_index[neighbor + 1], finish) - start;
51,845✔
237

238
      // Initiate an asynchronous send of source sites to the neighboring
239
      // process
240
      if (neighbor != mpi::rank) {
51,845✔
241
        requests.emplace_back();
12,593✔
242
        MPI_Isend(&temp_sites[index_local], static_cast<int>(n),
12,593✔
243
          mpi::source_site, neighbor, mpi::rank, mpi::intracomm,
244
          &requests.back());
12,593✔
245

246
        if (settings::ifp_on) {
12,593✔
247
          // Send IFP data
248
          if (is_beta_effective_or_both())
160!
249
            send_ifp_info(index_local, n, ifp_n_generation, neighbor, requests,
160✔
250
              temp_delayed_groups, send_delayed_groups);
251
          if (is_generation_time_or_both())
160!
252
            send_ifp_info(index_local, n, ifp_n_generation, neighbor, requests,
160✔
253
              temp_lifetimes, send_lifetimes);
254
        }
255
      }
256

257
      // Increment all indices
258
      start += n;
51,845✔
259
      index_local += n;
51,845✔
260
      ++neighbor;
51,845✔
261

262
      // Check for sites out of bounds -- this only happens in the rare
263
      // circumstance that a processor close to the end has so many sites that
264
      // it would exceed the bank on the last processor
265
      if (neighbor > mpi::n_procs - 1)
51,845✔
266
        break;
32,751✔
267
    }
268
  }
269

270
  // ==========================================================================
271
  // RECEIVE BANK SITES FROM NEIGHBORS OR TEMPORARY BANK
272

273
  start = simulation::work_index[mpi::rank];
39,252✔
274
  index_local = 0;
39,252✔
275

276
  // IFP receive buffers
277
  vector<int> recv_delayed_groups;
39,252✔
278
  vector<double> recv_lifetimes;
39,252✔
279
  vector<DeserializationInfo> deserialization_info;
39,252✔
280

281
  // Determine what process has the source sites that will need to be stored at
282
  // the beginning of this processor's source bank.
283

284
  int neighbor;
285
  if (start >= bank_position[mpi::n_procs - 1]) {
39,252✔
286
    neighbor = mpi::n_procs - 1;
20,163✔
287
  } else {
288
    neighbor =
19,089✔
289
      upper_bound_index(bank_position.begin(), bank_position.end(), start);
19,089✔
290
  }
291

292
  // Resize IFP receive buffers
293
  if (settings::ifp_on && mpi::n_procs > 1) {
39,252✔
294
    resize_ifp_data(recv_delayed_groups, recv_lifetimes,
320✔
295
      ifp_n_generation * simulation::work_per_rank);
320✔
296
  }
297

298
  while (start < simulation::work_index[mpi::rank + 1]) {
91,097✔
299
    // Determine how many sites need to be received
300
    int64_t n;
301
    if (neighbor == mpi::n_procs - 1) {
51,845✔
302
      n = simulation::work_index[mpi::rank + 1] - start;
32,756✔
303
    } else {
304
      n = std::min(bank_position[neighbor + 1],
19,089✔
305
            simulation::work_index[mpi::rank + 1]) -
38,178✔
306
          start;
307
    }
308

309
    if (neighbor != mpi::rank) {
51,845✔
310
      // If the source sites are not on this processor, initiate an
311
      // asynchronous receive for the source sites
312

313
      requests.emplace_back();
12,593✔
314
      MPI_Irecv(&simulation::source_bank[index_local], static_cast<int>(n),
12,593✔
315
        mpi::source_site, neighbor, neighbor, mpi::intracomm, &requests.back());
12,593✔
316

317
      if (settings::ifp_on) {
12,593✔
318
        // Receive IFP data
319
        if (is_beta_effective_or_both())
160!
320
          receive_ifp_data(index_local, n, ifp_n_generation, neighbor, requests,
160✔
321
            recv_delayed_groups, deserialization_info);
322
        if (is_generation_time_or_both())
160!
323
          receive_ifp_data(index_local, n, ifp_n_generation, neighbor, requests,
160✔
324
            recv_lifetimes, deserialization_info);
325
      }
326

327
    } else {
328
      // If the source sites are on this processor, we can simply copy them
329
      // from the temp_sites bank
330

331
      index_temp = start - bank_position[mpi::rank];
39,252✔
332
      std::copy(&temp_sites[index_temp], &temp_sites[index_temp + n],
39,252✔
333
        &simulation::source_bank[index_local]);
39,252✔
334

335
      if (settings::ifp_on) {
39,252✔
336
        copy_partial_ifp_data_to_source_banks(
640✔
337
          index_temp, n, index_local, temp_delayed_groups, temp_lifetimes);
338
      }
339
    }
340

341
    // Increment all indices
342
    start += n;
51,845✔
343
    index_local += n;
51,845✔
344
    ++neighbor;
51,845✔
345
  }
346

347
  // Since we initiated a series of asynchronous ISENDs and IRECVs, now we have
348
  // to ensure that the data has actually been communicated before moving on to
349
  // the next generation
350

351
  int n_request = requests.size();
39,252✔
352
  MPI_Waitall(n_request, requests.data(), MPI_STATUSES_IGNORE);
39,252✔
353

354
  if (settings::ifp_on) {
39,252✔
355
    if (is_beta_effective_or_both())
640!
356
      deserialize_ifp_info(ifp_n_generation, recv_delayed_groups,
640✔
357
        simulation::ifp_source_delayed_group_bank, deserialization_info);
358
    if (is_generation_time_or_both())
640!
359
      deserialize_ifp_info(ifp_n_generation, recv_lifetimes,
640✔
360
        simulation::ifp_source_lifetime_bank, deserialization_info);
361
  }
362

363
#else
364
  std::copy(temp_sites.data(), temp_sites.data() + settings::n_particles,
39,698✔
365
    simulation::source_bank.begin());
366
  if (settings::ifp_on) {
39,698✔
367
    copy_complete_ifp_data_to_source_banks(temp_delayed_groups, temp_lifetimes);
720✔
368
  }
369
#endif
370

371
  simulation::time_bank_sendrecv.stop();
78,950✔
372
  simulation::time_bank.stop();
78,950✔
373
}
78,950✔
374

375
void calculate_average_keff()
85,290✔
376
{
377
  // Determine overall generation and number of active generations
378
  int i = overall_generation() - 1;
85,290✔
379
  int n;
380
  if (simulation::current_batch > settings::n_inactive) {
85,290✔
381
    n = settings::gen_per_batch * simulation::n_realizations +
65,028✔
382
        simulation::current_gen;
383
  } else {
384
    n = 0;
20,262✔
385
  }
386

387
  if (n <= 0) {
85,290✔
388
    // For inactive generations, use current generation k as estimate for next
389
    // generation
390
    simulation::keff = simulation::k_generation[i];
20,262✔
391
  } else {
392
    // Sample mean of keff
393
    simulation::k_sum[0] += simulation::k_generation[i];
65,028✔
394
    simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
65,028✔
395

396
    // Determine mean
397
    simulation::keff = simulation::k_sum[0] / n;
65,028✔
398

399
    if (n > 1) {
65,028✔
400
      double t_value;
401
      if (settings::confidence_intervals) {
61,130✔
402
        // Calculate t-value for confidence intervals
403
        double alpha = 1.0 - CONFIDENCE_LEVEL;
98✔
404
        t_value = t_percentile(1.0 - alpha / 2.0, n - 1);
98✔
405
      } else {
406
        t_value = 1.0;
61,032✔
407
      }
408

409
      // Standard deviation of the sample mean of k
410
      simulation::keff_std =
61,130✔
411
        t_value *
61,130✔
412
        std::sqrt(
61,130✔
413
          (simulation::k_sum[1] / n - std::pow(simulation::keff, 2)) / (n - 1));
61,130✔
414

415
      // In some cases (such as an infinite medium problem), random ray
416
      // may estimate k exactly and in an unvarying manner between iterations.
417
      // In this case, the floating point roundoff between the division and the
418
      // power operations may cause an extremely small negative value to occur
419
      // inside the sqrt operation, leading to NaN. If this occurs, we check for
420
      // it and set the std dev to zero.
421
      if (!std::isfinite(simulation::keff_std)) {
61,130!
UNCOV
422
        simulation::keff_std = 0.0;
×
423
      }
424
    }
425
  }
426
}
85,290✔
427

428
int openmc_get_keff(double* k_combined)
11,834✔
429
{
430
  k_combined[0] = 0.0;
11,834✔
431
  k_combined[1] = 0.0;
11,834✔
432

433
  // Special case for n <=3. Notice that at the end,
434
  // there is a N-3 term in a denominator.
435
  if (simulation::n_realizations <= 3 ||
11,834✔
436
      settings::solver_type == SolverType::RANDOM_RAY) {
9,234✔
437
    k_combined[0] = simulation::keff;
2,860✔
438
    k_combined[1] = simulation::keff_std;
2,860✔
439
    if (simulation::n_realizations <= 1) {
2,860✔
440
      k_combined[1] = std::numeric_limits<double>::infinity();
1,900✔
441
    }
442
    return 0;
2,860✔
443
  }
444

445
  // Initialize variables
446
  int64_t n = simulation::n_realizations;
8,974✔
447

448
  // Copy estimates of k-effective and its variance (not variance of the mean)
449
  const auto& gt = simulation::global_tallies;
8,974✔
450

451
  array<double, 3> kv {};
8,974✔
452
  tensor::Tensor<double> cov = tensor::zeros<double>({3, 3});
8,974✔
453
  kv[0] = gt(GlobalTally::K_COLLISION, TallyResult::SUM) / n;
8,974✔
454
  kv[1] = gt(GlobalTally::K_ABSORPTION, TallyResult::SUM) / n;
8,974✔
455
  kv[2] = gt(GlobalTally::K_TRACKLENGTH, TallyResult::SUM) / n;
8,974✔
456
  cov(0, 0) =
17,948✔
457
    (gt(GlobalTally::K_COLLISION, TallyResult::SUM_SQ) - n * kv[0] * kv[0]) /
8,974✔
458
    (n - 1);
8,974✔
459
  cov(1, 1) =
17,948✔
460
    (gt(GlobalTally::K_ABSORPTION, TallyResult::SUM_SQ) - n * kv[1] * kv[1]) /
8,974✔
461
    (n - 1);
8,974✔
462
  cov(2, 2) =
17,948✔
463
    (gt(GlobalTally::K_TRACKLENGTH, TallyResult::SUM_SQ) - n * kv[2] * kv[2]) /
8,974✔
464
    (n - 1);
8,974✔
465

466
  // Calculate covariances based on sums with Bessel's correction
467
  cov(0, 1) = (simulation::k_col_abs - n * kv[0] * kv[1]) / (n - 1);
8,974✔
468
  cov(0, 2) = (simulation::k_col_tra - n * kv[0] * kv[2]) / (n - 1);
8,974✔
469
  cov(1, 2) = (simulation::k_abs_tra - n * kv[1] * kv[2]) / (n - 1);
8,974✔
470
  cov(1, 0) = cov(0, 1);
8,974✔
471
  cov(2, 0) = cov(0, 2);
8,974✔
472
  cov(2, 1) = cov(1, 2);
8,974✔
473

474
  // Check to see if two estimators are the same; this is guaranteed to happen
475
  // in MG-mode with survival biasing when the collision and absorption
476
  // estimators are the same, but can theoretically happen at anytime.
477
  // If it does, the standard estimators will produce floating-point
478
  // exceptions and an expression specifically derived for the combination of
479
  // two estimators (vice three) should be used instead.
480

481
  // First we will identify if there are any matching estimators
482
  int i, j;
483
  bool use_three = false;
8,974✔
484
  if ((std::abs(kv[0] - kv[1]) / kv[0] < FP_REL_PRECISION) &&
8,994✔
485
      (std::abs(cov(0, 0) - cov(1, 1)) / cov(0, 0) < FP_REL_PRECISION)) {
20!
486
    // 0 and 1 match, so only use 0 and 2 in our comparisons
487
    i = 0;
20✔
488
    j = 2;
20✔
489

490
  } else if ((std::abs(kv[0] - kv[2]) / kv[0] < FP_REL_PRECISION) &&
8,954!
UNCOV
491
             (std::abs(cov(0, 0) - cov(2, 2)) / cov(0, 0) < FP_REL_PRECISION)) {
×
492
    // 0 and 2 match, so only use 0 and 1 in our comparisons
UNCOV
493
    i = 0;
×
494
    j = 1;
×
495

496
  } else if ((std::abs(kv[1] - kv[2]) / kv[1] < FP_REL_PRECISION) &&
8,954!
497
             (std::abs(cov(1, 1) - cov(2, 2)) / cov(1, 1) < FP_REL_PRECISION)) {
×
498
    // 1 and 2 match, so only use 0 and 1 in our comparisons
UNCOV
499
    i = 0;
×
500
    j = 1;
×
501

502
  } else {
503
    // No two estimators match, so set boolean to use all three estimators.
504
    use_three = true;
8,954✔
505
  }
506

507
  if (use_three) {
8,974✔
508
    // Use three estimators as derived in the paper by Urbatsch
509

510
    // Initialize variables
511
    double g = 0.0;
8,954✔
512
    array<double, 3> S {};
8,954✔
513

514
    for (int l = 0; l < 3; ++l) {
35,816✔
515
      // Permutations of estimates
516
      int k;
517
      switch (l) {
26,862!
518
      case 0:
8,954✔
519
        // i = collision, j = absorption, k = tracklength
520
        i = 0;
8,954✔
521
        j = 1;
8,954✔
522
        k = 2;
8,954✔
523
        break;
8,954✔
524
      case 1:
8,954✔
525
        // i = absortion, j = tracklength, k = collision
526
        i = 1;
8,954✔
527
        j = 2;
8,954✔
528
        k = 0;
8,954✔
529
        break;
8,954✔
530
      case 2:
8,954✔
531
        // i = tracklength, j = collision, k = absorption
532
        i = 2;
8,954✔
533
        j = 0;
8,954✔
534
        k = 1;
8,954✔
535
        break;
8,954✔
536
      }
537

538
      // Calculate weighting
539
      double f = cov(j, j) * (cov(k, k) - cov(i, k)) - cov(k, k) * cov(i, j) +
26,862✔
540
                 cov(j, k) * (cov(i, j) + cov(i, k) - cov(j, k));
26,862✔
541

542
      // Add to S sums for variance of combined estimate
543
      S[0] += f * cov(0, l);
26,862✔
544
      S[1] += (cov(j, j) + cov(k, k) - 2.0 * cov(j, k)) * kv[l] * kv[l];
26,862✔
545
      S[2] += (cov(k, k) + cov(i, j) - cov(j, k) - cov(i, k)) * kv[l] * kv[j];
26,862✔
546

547
      // Add to sum for combined k-effective
548
      k_combined[0] += f * kv[l];
26,862✔
549
      g += f;
26,862✔
550
    }
551

552
    // Complete calculations of S sums
553
    for (auto& S_i : S) {
35,816✔
554
      S_i *= (n - 1);
26,862✔
555
    }
556
    S[0] *= (n - 1) * (n - 1);
8,954✔
557

558
    // Calculate combined estimate of k-effective
559
    k_combined[0] /= g;
8,954✔
560

561
    // Calculate standard deviation of combined estimate
562
    g *= (n - 1) * (n - 1);
8,954✔
563
    k_combined[1] =
8,954✔
564
      std::sqrt(S[0] / (g * n * (n - 3)) * (1 + n * ((S[1] - 2 * S[2]) / g)));
8,954✔
565

566
  } else {
567
    // Use only two estimators
568
    // These equations are derived analogously to that done in the paper by
569
    // Urbatsch, but are simpler than for the three estimators case since the
570
    // block matrices of the three estimator equations reduces to scalars here
571

572
    // Store the commonly used term
573
    double f = kv[i] - kv[j];
20✔
574
    double g = cov(i, i) + cov(j, j) - 2.0 * cov(i, j);
20✔
575

576
    // Calculate combined estimate of k-effective
577
    k_combined[0] = kv[i] - (cov(i, i) - cov(i, j)) / g * f;
20✔
578

579
    // Calculate standard deviation of combined estimate
580
    k_combined[1] = (cov(i, i) * cov(j, j) - cov(i, j) * cov(i, j)) *
20✔
581
                    (g + n * f * f) / (n * (n - 2) * g * g);
20✔
582
    k_combined[1] = std::sqrt(k_combined[1]);
20✔
583
  }
584
  return 0;
8,974✔
585
}
8,974✔
586

587
void shannon_entropy()
6,990✔
588
{
589
  // Get source weight in each mesh bin
590
  bool sites_outside;
591
  tensor::Tensor<double> p =
592
    simulation::entropy_mesh->count_sites(simulation::fission_bank.data(),
6,990✔
593
      simulation::fission_bank.size(), &sites_outside);
13,980✔
594

595
  // display warning message if there were sites outside entropy box
596
  if (sites_outside) {
6,990!
UNCOV
597
    if (mpi::master)
×
UNCOV
598
      warning("Fission source site(s) outside of entropy box.");
×
599
  }
600

601
  if (mpi::master) {
6,990✔
602
    // Normalize to total weight of bank sites
603
    p /= p.sum();
6,950✔
604

605
    // Sum values to obtain Shannon entropy
606
    double H = 0.0;
6,950✔
607
    for (auto p_i : p) {
533,050✔
608
      if (p_i > 0.0) {
526,100✔
609
        H -= p_i * std::log2(p_i);
412,450✔
610
      }
611
    }
612

613
    // Add value to vector
614
    simulation::entropy.push_back(H);
6,950✔
615
  }
616
}
6,990✔
617

618
void ufs_count_sites()
140✔
619
{
620
  if (simulation::current_batch == 1 && simulation::current_gen == 1) {
140!
621
    // On the first generation, just assume that the source is already evenly
622
    // distributed so that effectively the production of fission sites is not
623
    // biased
624

625
    std::size_t n = simulation::ufs_mesh->n_bins();
14✔
626
    double vol_frac = simulation::ufs_mesh->volume_frac_;
14✔
627
    simulation::source_frac = tensor::Tensor<double>({n}, vol_frac);
14✔
628

629
  } else {
14✔
630
    // count number of source sites in each ufs mesh cell
631
    bool sites_outside;
632
    simulation::source_frac =
633
      simulation::ufs_mesh->count_sites(simulation::source_bank.data(),
252✔
634
        simulation::source_bank.size(), &sites_outside);
252✔
635

636
    // Check for sites outside of the mesh
637
    if (mpi::master && sites_outside) {
126!
UNCOV
638
      fatal_error("Source sites outside of the UFS mesh!");
×
639
    }
640

641
#ifdef OPENMC_MPI
642
    // Send source fraction to all processors
643
    int n_bins = simulation::ufs_mesh->n_bins();
72✔
644
    MPI_Bcast(
72✔
645
      simulation::source_frac.data(), n_bins, MPI_DOUBLE, 0, mpi::intracomm);
72✔
646
#endif
647

648
    // Normalize to total weight to get fraction of source in each cell
649
    double total = simulation::source_frac.sum();
126✔
650
    simulation::source_frac /= total;
126✔
651

652
    // Since the total starting weight is not equal to n_particles, we need to
653
    // renormalize the weight of the source sites
654
    for (int i = 0; i < simulation::work_per_rank; ++i) {
90,126✔
655
      simulation::source_bank[i].wgt *= settings::n_particles / total;
90,000✔
656
    }
657
  }
658
}
140✔
659

660
double ufs_get_weight(const Particle& p)
85,620✔
661
{
662
  // Determine indices on ufs mesh for current location
663
  int mesh_bin = simulation::ufs_mesh->get_bin(p.r());
85,620✔
664
  if (mesh_bin < 0) {
85,620!
UNCOV
665
    p.write_restart();
×
UNCOV
666
    fatal_error("Source site outside UFS mesh!");
×
667
  }
668

669
  if (simulation::source_frac(mesh_bin) != 0.0) {
85,620✔
670
    return simulation::ufs_mesh->volume_frac_ /
46,450✔
671
           simulation::source_frac(mesh_bin);
46,450✔
672
  } else {
673
    return 1.0;
39,170✔
674
  }
675
}
676

677
void write_eigenvalue_hdf5(hid_t group)
3,884✔
678
{
679
  write_dataset(group, "n_inactive", settings::n_inactive);
3,884✔
680
  write_dataset(group, "generations_per_batch", settings::gen_per_batch);
3,884✔
681
  write_dataset(group, "k_generation", simulation::k_generation);
3,884✔
682
  if (settings::entropy_on) {
3,884✔
683
    write_dataset(group, "entropy", simulation::entropy);
500✔
684
  }
685
  write_dataset(group, "k_col_abs", simulation::k_col_abs);
3,884✔
686
  write_dataset(group, "k_col_tra", simulation::k_col_tra);
3,884✔
687
  write_dataset(group, "k_abs_tra", simulation::k_abs_tra);
3,884✔
688
  array<double, 2> k_combined;
689
  openmc_get_keff(k_combined.data());
3,884✔
690
  write_dataset(group, "k_combined", k_combined);
3,884✔
691
}
3,884✔
692

693
void read_eigenvalue_hdf5(hid_t group)
58✔
694
{
695
  read_dataset(group, "generations_per_batch", settings::gen_per_batch);
58✔
696
  int n = simulation::restart_batch * settings::gen_per_batch;
58✔
697
  simulation::k_generation.resize(n);
58✔
698
  read_dataset(group, "k_generation", simulation::k_generation);
58✔
699
  if (settings::entropy_on) {
58✔
700
    read_dataset(group, "entropy", simulation::entropy);
10✔
701
  }
702
  read_dataset(group, "k_col_abs", simulation::k_col_abs);
58✔
703
  read_dataset(group, "k_col_tra", simulation::k_col_tra);
58✔
704
  read_dataset(group, "k_abs_tra", simulation::k_abs_tra);
58✔
705
}
58✔
706

707
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc