• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 14515233533

17 Apr 2025 12:06PM UTC coverage: 83.16% (-2.3%) from 85.414%
14515233533

Pull #3087

github

web-flow
Merge 6ed397e9d into 47ca2916a
Pull Request #3087: wheel building with scikit build core

140769 of 169274 relevant lines covered (83.16%)

11830168.1 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.58
/src/eigenvalue.cpp
1
#include "openmc/eigenvalue.h"
2

3
#include "xtensor/xbuilder.hpp"
4
#include "xtensor/xmath.hpp"
5
#include "xtensor/xtensor.hpp"
6
#include "xtensor/xview.hpp"
7

8
#include "openmc/array.h"
9
#include "openmc/bank.h"
10
#include "openmc/capi.h"
11
#include "openmc/constants.h"
12
#include "openmc/error.h"
13
#include "openmc/hdf5_interface.h"
14
#include "openmc/ifp.h"
15
#include "openmc/math_functions.h"
16
#include "openmc/mesh.h"
17
#include "openmc/message_passing.h"
18
#include "openmc/random_lcg.h"
19
#include "openmc/search.h"
20
#include "openmc/settings.h"
21
#include "openmc/simulation.h"
22
#include "openmc/tallies/tally.h"
23
#include "openmc/timer.h"
24

25
#include <algorithm> // for min
26
#include <cmath>     // for sqrt, abs, pow
27
#include <iterator>  // for back_inserter
28
#include <limits>    //for infinity
29
#include <string>
30

31
namespace openmc {
32

33
//==============================================================================
34
// Global variables
35
//==============================================================================
36

37
namespace simulation {
38

39
double keff_generation;
40
array<double, 2> k_sum;
41
vector<double> entropy;
42
xt::xtensor<double, 1> source_frac;
43

44
} // namespace simulation
45

46
//==============================================================================
47
// Non-member functions
48
//==============================================================================
49

50
void calculate_generation_keff()
68,640✔
51
{
52
  const auto& gt = simulation::global_tallies;
68,640✔
53

54
  // Get keff for this generation by subtracting off the starting value
55
  simulation::keff_generation =
68,640✔
56
    gt(GlobalTally::K_TRACKLENGTH, TallyResult::VALUE) -
68,640✔
57
    simulation::keff_generation;
58

59
  double keff_reduced;
60
#ifdef OPENMC_MPI
61
  if (settings::solver_type != SolverType::RANDOM_RAY) {
40,295✔
62
    // Combine values across all processors
63
    MPI_Allreduce(&simulation::keff_generation, &keff_reduced, 1, MPI_DOUBLE,
38,395✔
64
      MPI_SUM, mpi::intracomm);
65
  } else {
66
    // If using random ray, MPI parallelism is provided by domain replication.
67
    // As such, all fluxes will be reduced at the end of each transport sweep,
68
    // such that all ranks have identical scalar flux vectors, and will all
69
    // independently compute the same value of k. Thus, there is no need to
70
    // perform any additional MPI reduction here.
71
    keff_reduced = simulation::keff_generation;
1,900✔
72
  }
73
#else
74
  keff_reduced = simulation::keff_generation;
28,345✔
75
#endif
76

77
  // Normalize single batch estimate of k
78
  // TODO: This should be normalized by total_weight, not by n_particles
79
  if (settings::solver_type != SolverType::RANDOM_RAY) {
68,640✔
80
    keff_reduced /= settings::n_particles;
65,600✔
81
  }
82

83
  simulation::k_generation.push_back(keff_reduced);
68,640✔
84
}
68,640✔
85

86
void synchronize_bank()
65,600✔
87
{
88
  simulation::time_bank.start();
65,600✔
89

90
  // In order to properly understand the fission bank algorithm, you need to
91
  // think of the fission and source bank as being one global array divided
92
  // over multiple processors. At the start, each processor has a random amount
93
  // of fission bank sites -- each processor needs to know the total number of
94
  // sites in order to figure out the probability for selecting
95
  // sites. Furthermore, each proc also needs to know where in the 'global'
96
  // fission bank its own sites starts in order to ensure reproducibility by
97
  // skipping ahead to the proper seed.
98

99
#ifdef OPENMC_MPI
100
  int64_t start = 0;
38,395✔
101
  int64_t n_bank = simulation::fission_bank.size();
38,395✔
102
  MPI_Exscan(&n_bank, &start, 1, MPI_INT64_T, MPI_SUM, mpi::intracomm);
38,395✔
103

104
  // While we would expect the value of start on rank 0 to be 0, the MPI
105
  // standard says that the receive buffer on rank 0 is undefined and not
106
  // significant
107
  if (mpi::rank == 0)
38,395✔
108
    start = 0;
22,835✔
109

110
  int64_t finish = start + simulation::fission_bank.size();
38,395✔
111
  int64_t total = finish;
38,395✔
112
  MPI_Bcast(&total, 1, MPI_INT64_T, mpi::n_procs - 1, mpi::intracomm);
38,395✔
113

114
#else
115
  int64_t start = 0;
27,205✔
116
  int64_t finish = simulation::fission_bank.size();
27,205✔
117
  int64_t total = finish;
27,205✔
118
#endif
119

120
  // If there are not that many particles per generation, it's possible that no
121
  // fission sites were created at all on a single processor. Rather than add
122
  // extra logic to treat this circumstance, we really want to ensure the user
123
  // runs enough particles to avoid this in the first place.
124

125
  if (simulation::fission_bank.size() == 0) {
65,600✔
126
    fatal_error(
×
127
      "No fission sites banked on MPI rank " + std::to_string(mpi::rank));
×
128
  }
129

130
  // Make sure all processors start at the same point for random sampling. Then
131
  // skip ahead in the sequence using the starting index in the 'global'
132
  // fission bank for each processor.
133

134
  int64_t id = simulation::total_gen + overall_generation();
65,600✔
135
  uint64_t seed = init_seed(id, STREAM_TRACKING);
65,600✔
136
  advance_prn_seed(start, &seed);
65,600✔
137

138
  // Determine how many fission sites we need to sample from the source bank
139
  // and the probability for selecting a site.
140

141
  int64_t sites_needed;
142
  if (total < settings::n_particles) {
65,600✔
143
    sites_needed = settings::n_particles % total;
33,784✔
144
  } else {
145
    sites_needed = settings::n_particles;
31,816✔
146
  }
147
  double p_sample = static_cast<double>(sites_needed) / total;
65,600✔
148

149
  simulation::time_bank_sample.start();
65,600✔
150

151
  // ==========================================================================
152
  // SAMPLE N_PARTICLES FROM FISSION BANK AND PLACE IN TEMP_SITES
153

154
  // Allocate temporary source bank -- we don't really know how many fission
155
  // sites were created, so overallocate by a factor of 3
156
  int64_t index_temp = 0;
65,600✔
157

158
  vector<SourceSite> temp_sites(3 * simulation::work_per_rank);
65,600✔
159

160
  // Temporary banks for IFP
161
  vector<vector<int>> temp_delayed_groups;
65,600✔
162
  vector<vector<double>> temp_lifetimes;
65,600✔
163
  if (settings::ifp_on) {
65,600✔
164
    resize_ifp_data(
320✔
165
      temp_delayed_groups, temp_lifetimes, 3 * simulation::work_per_rank);
166
  }
167

168
  for (int64_t i = 0; i < simulation::fission_bank.size(); i++) {
126,370,200✔
169
    const auto& site = simulation::fission_bank[i];
126,304,600✔
170

171
    // If there are less than n_particles particles banked, automatically add
172
    // int(n_particles/total) sites to temp_sites. For example, if you need
173
    // 1000 and 300 were banked, this would add 3 source sites per banked site
174
    // and the remaining 100 would be randomly sampled.
175
    if (total < settings::n_particles) {
126,304,600✔
176
      for (int64_t j = 1; j <= settings::n_particles / total; ++j) {
126,526,105✔
177
        temp_sites[index_temp] = site;
63,468,261✔
178
        if (settings::ifp_on) {
63,468,261✔
179
          copy_ifp_data_from_fission_banks(
84,150✔
180
            i, temp_delayed_groups[index_temp], temp_lifetimes[index_temp]);
84,150✔
181
        }
182
        ++index_temp;
63,468,261✔
183
      }
184
    }
185

186
    // Randomly sample sites needed
187
    if (prn(&seed) < p_sample) {
126,304,600✔
188
      temp_sites[index_temp] = site;
62,910,795✔
189
      if (settings::ifp_on) {
62,910,795✔
190
        copy_ifp_data_from_fission_banks(
135,927✔
191
          i, temp_delayed_groups[index_temp], temp_lifetimes[index_temp]);
135,927✔
192
      }
193
      ++index_temp;
62,910,795✔
194
    }
195
  }
196

197
  // At this point, the sampling of source sites is done and now we need to
198
  // figure out where to send source sites. Since it is possible that one
199
  // processor's share of the source bank spans more than just the immediate
200
  // neighboring processors, we have to perform an ALLGATHER to determine the
201
  // indices for all processors
202

203
#ifdef OPENMC_MPI
204
  // First do an exclusive scan to get the starting indices for
205
  start = 0;
38,395✔
206
  MPI_Exscan(&index_temp, &start, 1, MPI_INT64_T, MPI_SUM, mpi::intracomm);
38,395✔
207
  finish = start + index_temp;
38,395✔
208

209
  // TODO: protect for MPI_Exscan at rank 0
210

211
  // Allocate space for bank_position if this hasn't been done yet
212
  int64_t bank_position[mpi::n_procs];
38,395✔
213
  MPI_Allgather(
38,395✔
214
    &start, 1, MPI_INT64_T, bank_position, 1, MPI_INT64_T, mpi::intracomm);
215
#else
216
  start = 0;
27,205✔
217
  finish = index_temp;
27,205✔
218
#endif
219

220
  // Now that the sampling is complete, we need to ensure that we have exactly
221
  // n_particles source sites. The way this is done in a reproducible manner is
222
  // to adjust only the source sites on the last processor.
223

224
  if (mpi::rank == mpi::n_procs - 1) {
65,600✔
225
    if (finish > settings::n_particles) {
50,040✔
226
      // If we have extra sites sampled, we will simply discard the extra
227
      // ones on the last processor
228
      index_temp = settings::n_particles - start;
20,134✔
229

230
    } else if (finish < settings::n_particles) {
29,906✔
231
      // If we have too few sites, repeat sites from the very end of the
232
      // fission bank
233
      sites_needed = settings::n_particles - finish;
23,457✔
234
      // TODO: sites_needed > simulation::fission_bank.size() or other test to
235
      // make sure we don't need info from other proc
236
      for (int i = 0; i < sites_needed; ++i) {
169,069✔
237
        int i_bank = simulation::fission_bank.size() - sites_needed + i;
145,612✔
238
        temp_sites[index_temp] = simulation::fission_bank[i_bank];
145,612✔
239
        if (settings::ifp_on) {
145,612✔
240
          copy_ifp_data_from_fission_banks(i_bank,
473✔
241
            temp_delayed_groups[index_temp], temp_lifetimes[index_temp]);
473✔
242
        }
243
        ++index_temp;
145,612✔
244
      }
245
    }
246

247
    // the last processor should not be sending sites to right
248
    finish = simulation::work_index[mpi::rank + 1];
50,040✔
249
  }
250

251
  simulation::time_bank_sample.stop();
65,600✔
252
  simulation::time_bank_sendrecv.start();
65,600✔
253

254
#ifdef OPENMC_MPI
255
  // ==========================================================================
256
  // SEND BANK SITES TO NEIGHBORS
257

258
  // IFP number of generation
259
  int ifp_n_generation;
260
  if (settings::ifp_on) {
38,395✔
261
    broadcast_ifp_n_generation(
200✔
262
      ifp_n_generation, temp_delayed_groups, temp_lifetimes);
263
  }
264

265
  int64_t index_local = 0;
38,395✔
266
  vector<MPI_Request> requests;
38,395✔
267

268
  // IFP send buffers
269
  vector<int> send_delayed_groups;
38,395✔
270
  vector<double> send_lifetimes;
38,395✔
271

272
  if (start < settings::n_particles) {
38,395✔
273
    // Determine the index of the processor which has the first part of the
274
    // source_bank for the local processor
275
    int neighbor = upper_bound_index(
38,395✔
276
      simulation::work_index.begin(), simulation::work_index.end(), start);
38,395✔
277

278
    // Resize IFP send buffers
279
    if (settings::ifp_on && mpi::n_procs > 1) {
38,395✔
280
      resize_ifp_data(send_delayed_groups, send_lifetimes,
200✔
281
        ifp_n_generation * 3 * simulation::work_per_rank);
200✔
282
    }
283

284
    while (start < finish) {
61,900✔
285
      // Determine the number of sites to send
286
      int64_t n =
287
        std::min(simulation::work_index[neighbor + 1], finish) - start;
53,595✔
288

289
      // Initiate an asynchronous send of source sites to the neighboring
290
      // process
291
      if (neighbor != mpi::rank) {
53,595✔
292
        requests.emplace_back();
15,200✔
293
        MPI_Isend(&temp_sites[index_local], static_cast<int>(n),
15,200✔
294
          mpi::source_site, neighbor, mpi::rank, mpi::intracomm,
295
          &requests.back());
15,200✔
296

297
        if (settings::ifp_on) {
15,200✔
298
          // Send IFP data
299
          send_ifp_info(index_local, n, ifp_n_generation, neighbor, requests,
95✔
300
            temp_delayed_groups, send_delayed_groups, temp_lifetimes,
301
            send_lifetimes);
302
        }
303
      }
304

305
      // Increment all indices
306
      start += n;
53,595✔
307
      index_local += n;
53,595✔
308
      ++neighbor;
53,595✔
309

310
      // Check for sites out of bounds -- this only happens in the rare
311
      // circumstance that a processor close to the end has so many sites that
312
      // it would exceed the bank on the last processor
313
      if (neighbor > mpi::n_procs - 1)
53,595✔
314
        break;
30,090✔
315
    }
316
  }
317

318
  // ==========================================================================
319
  // RECEIVE BANK SITES FROM NEIGHBORS OR TEMPORARY BANK
320

321
  start = simulation::work_index[mpi::rank];
38,395✔
322
  index_local = 0;
38,395✔
323

324
  // IFP receive buffers
325
  vector<int> recv_delayed_groups;
38,395✔
326
  vector<double> recv_lifetimes;
38,395✔
327
  vector<DeserializationInfo> deserialization_info;
38,395✔
328

329
  // Determine what process has the source sites that will need to be stored at
330
  // the beginning of this processor's source bank.
331

332
  int neighbor;
333
  if (start >= bank_position[mpi::n_procs - 1]) {
38,395✔
334
    neighbor = mpi::n_procs - 1;
15,580✔
335
  } else {
336
    neighbor =
22,815✔
337
      upper_bound_index(bank_position, bank_position + mpi::n_procs, start);
22,815✔
338
  }
339

340
  // Resize IFP receive buffers
341
  if (settings::ifp_on && mpi::n_procs > 1) {
38,395✔
342
    resize_ifp_data(recv_delayed_groups, recv_lifetimes,
200✔
343
      ifp_n_generation * simulation::work_per_rank);
200✔
344
  }
345

346
  while (start < simulation::work_index[mpi::rank + 1]) {
91,990✔
347
    // Determine how many sites need to be received
348
    int64_t n;
349
    if (neighbor == mpi::n_procs - 1) {
53,595✔
350
      n = simulation::work_index[mpi::rank + 1] - start;
30,780✔
351
    } else {
352
      n = std::min(bank_position[neighbor + 1],
22,815✔
353
            simulation::work_index[mpi::rank + 1]) -
45,630✔
354
          start;
355
    }
356

357
    if (neighbor != mpi::rank) {
53,595✔
358
      // If the source sites are not on this processor, initiate an
359
      // asynchronous receive for the source sites
360

361
      requests.emplace_back();
15,200✔
362
      MPI_Irecv(&simulation::source_bank[index_local], static_cast<int>(n),
15,200✔
363
        mpi::source_site, neighbor, neighbor, mpi::intracomm, &requests.back());
15,200✔
364

365
      if (settings::ifp_on) {
15,200✔
366
        // Receive IFP data
367
        receive_ifp_data(index_local, n, ifp_n_generation, neighbor, requests,
95✔
368
          recv_delayed_groups, recv_lifetimes, deserialization_info);
369
      }
370

371
    } else {
372
      // If the source sites are on this processor, we can simply copy them
373
      // from the temp_sites bank
374

375
      index_temp = start - bank_position[mpi::rank];
38,395✔
376
      std::copy(&temp_sites[index_temp], &temp_sites[index_temp + n],
38,395✔
377
        &simulation::source_bank[index_local]);
38,395✔
378

379
      if (settings::ifp_on) {
38,395✔
380
        copy_partial_ifp_data_to_source_banks(
200✔
381
          index_temp, n, index_local, temp_delayed_groups, temp_lifetimes);
382
      }
383
    }
384

385
    // Increment all indices
386
    start += n;
53,595✔
387
    index_local += n;
53,595✔
388
    ++neighbor;
53,595✔
389
  }
390

391
  // Since we initiated a series of asynchronous ISENDs and IRECVs, now we have
392
  // to ensure that the data has actually been communicated before moving on to
393
  // the next generation
394

395
  int n_request = requests.size();
38,395✔
396
  MPI_Waitall(n_request, requests.data(), MPI_STATUSES_IGNORE);
38,395✔
397

398
  if (settings::ifp_on) {
38,395✔
399
    deserialize_ifp_info(ifp_n_generation, deserialization_info,
200✔
400
      recv_delayed_groups, recv_lifetimes);
401
  }
402

403
#else
404
  std::copy(temp_sites.data(), temp_sites.data() + settings::n_particles,
27,205✔
405
    simulation::source_bank.begin());
406
  if (settings::ifp_on) {
27,205✔
407
    copy_complete_ifp_data_to_source_banks(temp_delayed_groups, temp_lifetimes);
120✔
408
  }
409
#endif
410

411
  simulation::time_bank_sendrecv.stop();
65,600✔
412
  simulation::time_bank.stop();
65,600✔
413
}
103,995✔
414

415
void calculate_average_keff()
68,640✔
416
{
417
  // Determine overall generation and number of active generations
418
  int i = overall_generation() - 1;
68,640✔
419
  int n;
420
  if (simulation::current_batch > settings::n_inactive) {
68,640✔
421
    n = settings::gen_per_batch * simulation::n_realizations +
53,691✔
422
        simulation::current_gen;
423
  } else {
424
    n = 0;
14,949✔
425
  }
426

427
  if (n <= 0) {
68,640✔
428
    // For inactive generations, use current generation k as estimate for next
429
    // generation
430
    simulation::keff = simulation::k_generation[i];
14,949✔
431
  } else {
432
    // Sample mean of keff
433
    simulation::k_sum[0] += simulation::k_generation[i];
53,691✔
434
    simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
53,691✔
435

436
    // Determine mean
437
    simulation::keff = simulation::k_sum[0] / n;
53,691✔
438

439
    if (n > 1) {
53,691✔
440
      double t_value;
441
      if (settings::confidence_intervals) {
50,562✔
442
        // Calculate t-value for confidence intervals
443
        double alpha = 1.0 - CONFIDENCE_LEVEL;
112✔
444
        t_value = t_percentile(1.0 - alpha / 2.0, n - 1);
112✔
445
      } else {
446
        t_value = 1.0;
50,450✔
447
      }
448

449
      // Standard deviation of the sample mean of k
450
      simulation::keff_std =
50,562✔
451
        t_value *
50,562✔
452
        std::sqrt(
50,562✔
453
          (simulation::k_sum[1] / n - std::pow(simulation::keff, 2)) / (n - 1));
50,562✔
454
    }
455
  }
456
}
68,640✔
457

458
int openmc_get_keff(double* k_combined)
5,505✔
459
{
460
  k_combined[0] = 0.0;
5,505✔
461
  k_combined[1] = 0.0;
5,505✔
462

463
  // Special case for n <=3. Notice that at the end,
464
  // there is a N-3 term in a denominator.
465
  if (simulation::n_realizations <= 3 ||
5,505✔
466
      settings::solver_type == SolverType::RANDOM_RAY) {
5,307✔
467
    k_combined[0] = simulation::keff;
330✔
468
    k_combined[1] = simulation::keff_std;
330✔
469
    if (simulation::n_realizations <= 1) {
330✔
470
      k_combined[1] = std::numeric_limits<double>::infinity();
99✔
471
    }
472
    return 0;
330✔
473
  }
474

475
  // Initialize variables
476
  int64_t n = simulation::n_realizations;
5,175✔
477

478
  // Copy estimates of k-effective and its variance (not variance of the mean)
479
  const auto& gt = simulation::global_tallies;
5,175✔
480

481
  array<double, 3> kv {};
5,175✔
482
  xt::xtensor<double, 2> cov = xt::zeros<double>({3, 3});
5,175✔
483
  kv[0] = gt(GlobalTally::K_COLLISION, TallyResult::SUM) / n;
5,175✔
484
  kv[1] = gt(GlobalTally::K_ABSORPTION, TallyResult::SUM) / n;
5,175✔
485
  kv[2] = gt(GlobalTally::K_TRACKLENGTH, TallyResult::SUM) / n;
5,175✔
486
  cov(0, 0) =
10,350✔
487
    (gt(GlobalTally::K_COLLISION, TallyResult::SUM_SQ) - n * kv[0] * kv[0]) /
5,175✔
488
    (n - 1);
5,175✔
489
  cov(1, 1) =
10,350✔
490
    (gt(GlobalTally::K_ABSORPTION, TallyResult::SUM_SQ) - n * kv[1] * kv[1]) /
5,175✔
491
    (n - 1);
5,175✔
492
  cov(2, 2) =
10,350✔
493
    (gt(GlobalTally::K_TRACKLENGTH, TallyResult::SUM_SQ) - n * kv[2] * kv[2]) /
5,175✔
494
    (n - 1);
5,175✔
495

496
  // Calculate covariances based on sums with Bessel's correction
497
  cov(0, 1) = (simulation::k_col_abs - n * kv[0] * kv[1]) / (n - 1);
5,175✔
498
  cov(0, 2) = (simulation::k_col_tra - n * kv[0] * kv[2]) / (n - 1);
5,175✔
499
  cov(1, 2) = (simulation::k_abs_tra - n * kv[1] * kv[2]) / (n - 1);
5,175✔
500
  cov(1, 0) = cov(0, 1);
5,175✔
501
  cov(2, 0) = cov(0, 2);
5,175✔
502
  cov(2, 1) = cov(1, 2);
5,175✔
503

504
  // Check to see if two estimators are the same; this is guaranteed to happen
505
  // in MG-mode with survival biasing when the collision and absorption
506
  // estimators are the same, but can theoretically happen at anytime.
507
  // If it does, the standard estimators will produce floating-point
508
  // exceptions and an expression specifically derived for the combination of
509
  // two estimators (vice three) should be used instead.
510

511
  // First we will identify if there are any matching estimators
512
  int i, j;
513
  bool use_three = false;
5,175✔
514
  if ((std::abs(kv[0] - kv[1]) / kv[0] < FP_REL_PRECISION) &&
5,197✔
515
      (std::abs(cov(0, 0) - cov(1, 1)) / cov(0, 0) < FP_REL_PRECISION)) {
22✔
516
    // 0 and 1 match, so only use 0 and 2 in our comparisons
517
    i = 0;
22✔
518
    j = 2;
22✔
519

520
  } else if ((std::abs(kv[0] - kv[2]) / kv[0] < FP_REL_PRECISION) &&
5,153✔
521
             (std::abs(cov(0, 0) - cov(2, 2)) / cov(0, 0) < FP_REL_PRECISION)) {
×
522
    // 0 and 2 match, so only use 0 and 1 in our comparisons
523
    i = 0;
×
524
    j = 1;
×
525

526
  } else if ((std::abs(kv[1] - kv[2]) / kv[1] < FP_REL_PRECISION) &&
5,153✔
527
             (std::abs(cov(1, 1) - cov(2, 2)) / cov(1, 1) < FP_REL_PRECISION)) {
×
528
    // 1 and 2 match, so only use 0 and 1 in our comparisons
529
    i = 0;
×
530
    j = 1;
×
531

532
  } else {
533
    // No two estimators match, so set boolean to use all three estimators.
534
    use_three = true;
5,153✔
535
  }
536

537
  if (use_three) {
5,175✔
538
    // Use three estimators as derived in the paper by Urbatsch
539

540
    // Initialize variables
541
    double g = 0.0;
5,153✔
542
    array<double, 3> S {};
5,153✔
543

544
    for (int l = 0; l < 3; ++l) {
20,612✔
545
      // Permutations of estimates
546
      int k;
547
      switch (l) {
15,459✔
548
      case 0:
5,153✔
549
        // i = collision, j = absorption, k = tracklength
550
        i = 0;
5,153✔
551
        j = 1;
5,153✔
552
        k = 2;
5,153✔
553
        break;
5,153✔
554
      case 1:
5,153✔
555
        // i = absortion, j = tracklength, k = collision
556
        i = 1;
5,153✔
557
        j = 2;
5,153✔
558
        k = 0;
5,153✔
559
        break;
5,153✔
560
      case 2:
5,153✔
561
        // i = tracklength, j = collision, k = absorption
562
        i = 2;
5,153✔
563
        j = 0;
5,153✔
564
        k = 1;
5,153✔
565
        break;
5,153✔
566
      }
567

568
      // Calculate weighting
569
      double f = cov(j, j) * (cov(k, k) - cov(i, k)) - cov(k, k) * cov(i, j) +
15,459✔
570
                 cov(j, k) * (cov(i, j) + cov(i, k) - cov(j, k));
15,459✔
571

572
      // Add to S sums for variance of combined estimate
573
      S[0] += f * cov(0, l);
15,459✔
574
      S[1] += (cov(j, j) + cov(k, k) - 2.0 * cov(j, k)) * kv[l] * kv[l];
15,459✔
575
      S[2] += (cov(k, k) + cov(i, j) - cov(j, k) - cov(i, k)) * kv[l] * kv[j];
15,459✔
576

577
      // Add to sum for combined k-effective
578
      k_combined[0] += f * kv[l];
15,459✔
579
      g += f;
15,459✔
580
    }
581

582
    // Complete calculations of S sums
583
    for (auto& S_i : S) {
20,612✔
584
      S_i *= (n - 1);
15,459✔
585
    }
586
    S[0] *= (n - 1) * (n - 1);
5,153✔
587

588
    // Calculate combined estimate of k-effective
589
    k_combined[0] /= g;
5,153✔
590

591
    // Calculate standard deviation of combined estimate
592
    g *= (n - 1) * (n - 1);
5,153✔
593
    k_combined[1] =
5,153✔
594
      std::sqrt(S[0] / (g * n * (n - 3)) * (1 + n * ((S[1] - 2 * S[2]) / g)));
5,153✔
595

596
  } else {
597
    // Use only two estimators
598
    // These equations are derived analogously to that done in the paper by
599
    // Urbatsch, but are simpler than for the three estimators case since the
600
    // block matrices of the three estimator equations reduces to scalars here
601

602
    // Store the commonly used term
603
    double f = kv[i] - kv[j];
22✔
604
    double g = cov(i, i) + cov(j, j) - 2.0 * cov(i, j);
22✔
605

606
    // Calculate combined estimate of k-effective
607
    k_combined[0] = kv[i] - (cov(i, i) - cov(i, j)) / g * f;
22✔
608

609
    // Calculate standard deviation of combined estimate
610
    k_combined[1] = (cov(i, i) * cov(j, j) - cov(i, j) * cov(i, j)) *
22✔
611
                    (g + n * f * f) / (n * (n - 2) * g * g);
22✔
612
    k_combined[1] = std::sqrt(k_combined[1]);
22✔
613
  }
614
  return 0;
5,175✔
615
}
5,175✔
616

617
void shannon_entropy()
1,260✔
618
{
619
  // Get source weight in each mesh bin
620
  bool sites_outside;
621
  xt::xtensor<double, 1> p =
622
    simulation::entropy_mesh->count_sites(simulation::fission_bank.data(),
1,260✔
623
      simulation::fission_bank.size(), &sites_outside);
2,520✔
624

625
  // display warning message if there were sites outside entropy box
626
  if (sites_outside) {
1,260✔
627
    if (mpi::master)
×
628
      warning("Fission source site(s) outside of entropy box.");
×
629
  }
630

631
  if (mpi::master) {
1,260✔
632
    // Normalize to total weight of bank sites
633
    p /= xt::sum(p);
1,210✔
634

635
    // Sum values to obtain Shannon entropy
636
    double H = 0.0;
1,210✔
637
    for (auto p_i : p) {
221,210✔
638
      if (p_i > 0.0) {
220,000✔
639
        H -= p_i * std::log2(p_i);
136,565✔
640
      }
641
    }
642

643
    // Add value to vector
644
    simulation::entropy.push_back(H);
1,210✔
645
  }
646
}
1,260✔
647

648
void ufs_count_sites()
160✔
649
{
650
  if (simulation::current_batch == 1 && simulation::current_gen == 1) {
160✔
651
    // On the first generation, just assume that the source is already evenly
652
    // distributed so that effectively the production of fission sites is not
653
    // biased
654

655
    std::size_t n = simulation::ufs_mesh->n_bins();
16✔
656
    double vol_frac = simulation::ufs_mesh->volume_frac_;
16✔
657
    simulation::source_frac = xt::xtensor<double, 1>({n}, vol_frac);
16✔
658

659
  } else {
16✔
660
    // count number of source sites in each ufs mesh cell
661
    bool sites_outside;
662
    simulation::source_frac =
663
      simulation::ufs_mesh->count_sites(simulation::source_bank.data(),
288✔
664
        simulation::source_bank.size(), &sites_outside);
288✔
665

666
    // Check for sites outside of the mesh
667
    if (mpi::master && sites_outside) {
144✔
668
      fatal_error("Source sites outside of the UFS mesh!");
×
669
    }
670

671
#ifdef OPENMC_MPI
672
    // Send source fraction to all processors
673
    int n_bins = simulation::ufs_mesh->n_bins();
90✔
674
    MPI_Bcast(
90✔
675
      simulation::source_frac.data(), n_bins, MPI_DOUBLE, 0, mpi::intracomm);
90✔
676
#endif
677

678
    // Normalize to total weight to get fraction of source in each cell
679
    double total = xt::sum(simulation::source_frac)();
144✔
680
    simulation::source_frac /= total;
144✔
681

682
    // Since the total starting weight is not equal to n_particles, we need to
683
    // renormalize the weight of the source sites
684
    for (int i = 0; i < simulation::work_per_rank; ++i) {
99,144✔
685
      simulation::source_bank[i].wgt *= settings::n_particles / total;
99,000✔
686
    }
687
  }
688
}
160✔
689

690
double ufs_get_weight(const Particle& p)
93,456✔
691
{
692
  // Determine indices on ufs mesh for current location
693
  int mesh_bin = simulation::ufs_mesh->get_bin(p.r());
93,456✔
694
  if (mesh_bin < 0) {
93,456✔
695
    p.write_restart();
×
696
    fatal_error("Source site outside UFS mesh!");
×
697
  }
698

699
  if (simulation::source_frac(mesh_bin) != 0.0) {
93,456✔
700
    return simulation::ufs_mesh->volume_frac_ /
50,622✔
701
           simulation::source_frac(mesh_bin);
50,622✔
702
  } else {
703
    return 1.0;
42,834✔
704
  }
705
}
706

707
void write_eigenvalue_hdf5(hid_t group)
2,604✔
708
{
709
  write_dataset(group, "n_inactive", settings::n_inactive);
2,604✔
710
  write_dataset(group, "generations_per_batch", settings::gen_per_batch);
2,604✔
711
  write_dataset(group, "k_generation", simulation::k_generation);
2,604✔
712
  if (settings::entropy_on) {
2,604✔
713
    write_dataset(group, "entropy", simulation::entropy);
154✔
714
  }
715
  write_dataset(group, "k_col_abs", simulation::k_col_abs);
2,604✔
716
  write_dataset(group, "k_col_tra", simulation::k_col_tra);
2,604✔
717
  write_dataset(group, "k_abs_tra", simulation::k_abs_tra);
2,604✔
718
  array<double, 2> k_combined;
719
  openmc_get_keff(k_combined.data());
2,604✔
720
  write_dataset(group, "k_combined", k_combined);
2,604✔
721
}
2,604✔
722

723
void read_eigenvalue_hdf5(hid_t group)
43✔
724
{
725
  read_dataset(group, "generations_per_batch", settings::gen_per_batch);
43✔
726
  int n = simulation::restart_batch * settings::gen_per_batch;
43✔
727
  simulation::k_generation.resize(n);
43✔
728
  read_dataset(group, "k_generation", simulation::k_generation);
43✔
729
  if (settings::entropy_on) {
43✔
730
    read_dataset(group, "entropy", simulation::entropy);
×
731
  }
732
  read_dataset(group, "k_col_abs", simulation::k_col_abs);
43✔
733
  read_dataset(group, "k_col_tra", simulation::k_col_tra);
43✔
734
  read_dataset(group, "k_abs_tra", simulation::k_abs_tra);
43✔
735
}
43✔
736

737
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc