• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 21034277178

15 Jan 2026 02:11PM UTC coverage: 81.871% (-0.2%) from 82.044%
21034277178

Pull #3702

github

web-flow
Merge d0e725296 into 179048b80
Pull Request #3702: Random Ray Kinetic Simulation Mode

17650 of 24558 branches covered (71.87%)

Branch coverage included in aggregate %.

1008 of 1149 new or added lines in 20 files covered. (87.73%)

166 existing lines in 15 files now uncovered.

56377 of 65861 relevant lines covered (85.6%)

47768466.83 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

75.36
/src/random_ray/flat_source_domain.cpp
1
#include "openmc/random_ray/flat_source_domain.h"
2
#include "openmc/random_ray/bd_utilities.h"
3

4
#include "openmc/cell.h"
5
#include "openmc/constants.h"
6
#include "openmc/eigenvalue.h"
7
#include "openmc/geometry.h"
8
#include "openmc/material.h"
9
#include "openmc/message_passing.h"
10
#include "openmc/mgxs_interface.h"
11
#include "openmc/output.h"
12
#include "openmc/plot.h"
13
#include "openmc/random_ray/random_ray.h"
14
#include "openmc/simulation.h"
15
#include "openmc/tallies/filter.h"
16
#include "openmc/tallies/tally.h"
17
#include "openmc/tallies/tally_scoring.h"
18
#include "openmc/timer.h"
19
#include "openmc/weight_windows.h"
20

21
#include <cmath>
22
#include <cstdio>
23
#include <deque>
24

25
namespace openmc {
26

27
//==============================================================================
28
// FlatSourceDomain implementation
29
//==============================================================================
30

31
// Static Variable Declarations
32
RandomRayVolumeEstimator FlatSourceDomain::volume_estimator_ {
33
  RandomRayVolumeEstimator::HYBRID};
34
bool FlatSourceDomain::volume_normalized_flux_tallies_ {false};
35
bool FlatSourceDomain::adjoint_ {false};
36
double FlatSourceDomain::diagonal_stabilization_rho_ {1.0};
37
std::unordered_map<int, vector<std::pair<Source::DomainType, int>>>
38
  FlatSourceDomain::mesh_domain_map_;
39

40
FlatSourceDomain::FlatSourceDomain()
703✔
41
  : negroups_(data::mg.num_energy_groups_),
703✔
42
    ndgroups_(data::mg.num_delayed_groups_)
703✔
43

44
{
45
  // Count the number of source regions, compute the cell offset
46
  // indices, and store the material type The reason for the offsets is that
47
  // some cell types may not have material fills, and therefore do not
48
  // produce FSRs. Thus, we cannot index into the global arrays directly
49
  int base_source_regions = 0;
703✔
50
  for (const auto& c : model::cells) {
5,789✔
51
    if (c->type_ != Fill::MATERIAL) {
5,086✔
52
      source_region_offsets_.push_back(-1);
2,328✔
53
    } else {
54
      source_region_offsets_.push_back(base_source_regions);
2,758✔
55
      base_source_regions += c->n_instances();
2,758✔
56
    }
57
  }
58

59
  // Initialize source regions.
60
  bool is_linear = RandomRay::source_shape_ != RandomRaySourceShape::FLAT;
703✔
61
  source_regions_ = SourceRegionContainer(negroups_, ndgroups_, is_linear);
703✔
62

63
  // Initialize tally volumes
64
  if (volume_normalized_flux_tallies_) {
703✔
65
    tally_volumes_.resize(model::tallies.size());
442✔
66
    for (int i = 0; i < model::tallies.size(); i++) {
1,573✔
67
      //  Get the shape of the 3D result tensor
68
      auto shape = model::tallies[i]->results().shape();
1,131✔
69

70
      // Create a new 2D tensor with the same size as the first
71
      // two dimensions of the 3D tensor
72
      tally_volumes_[i] =
1,131✔
73
        xt::xtensor<double, 2>::from_shape({shape[0], shape[1]});
2,262✔
74
    }
75
  }
76

77
  // Compute simulation domain volume based on ray source
78
  auto* is = dynamic_cast<IndependentSource*>(RandomRay::ray_source_.get());
703!
79
  SpatialDistribution* space_dist = is->space();
703✔
80
  SpatialBox* sb = dynamic_cast<SpatialBox*>(space_dist);
703!
81
  Position dims = sb->upper_right() - sb->lower_left();
703✔
82
  simulation_volume_ = dims.x * dims.y * dims.z;
703✔
83
}
703✔
84

85
void FlatSourceDomain::batch_reset()
104,172✔
86
{
87
// Reset scalar fluxes and iteration volume tallies to zero
88
#pragma omp parallel for
39,072✔
89
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
108,517,075✔
90
    source_regions_.volume(sr) = 0.0;
108,451,975✔
91
    source_regions_.volume_sq(sr) = 0.0;
108,451,975✔
92
  }
93

94
#pragma omp parallel for
39,072✔
95
  for (int64_t se = 0; se < n_source_elements(); se++) {
577,125,895✔
96
    source_regions_.scalar_flux_new(se) = 0.0;
577,060,795✔
97
  }
98

99
  if (settings::kinetic_simulation && settings::create_delayed_neutrons) {
104,172!
100
#pragma omp parallel for
35,700✔
101
    for (int64_t de = 0; de < n_delay_elements(); de++)
607,698,060✔
102
      source_regions_.precursors_new(de) = 0.0;
607,638,560✔
103
  }
104
}
104,172✔
105

106
void FlatSourceDomain::accumulate_iteration_flux()
50,726✔
107
{
108
#pragma omp parallel for
19,026✔
109
  for (int64_t se = 0; se < n_source_elements(); se++) {
284,518,200✔
110
    source_regions_.scalar_flux_final(se) +=
284,486,500✔
111
      source_regions_.scalar_flux_new(se);
284,486,500✔
112
  }
113
}
50,726✔
114

115
void FlatSourceDomain::update_single_neutron_source(SourceRegionHandle& srh)
153,739,444✔
116
{
117
  // Reset all source regions to zero (important for void regions)
118
  for (int g = 0; g < negroups_; g++) {
1,054,687,148✔
119
    srh.source(g) = 0.0;
900,947,704✔
120
  }
121

122
  // Add scattering + fission source
123
  int material = srh.material();
153,739,444✔
124
  double density_mult = srh.density_mult();
153,739,444✔
125
  if (material != MATERIAL_VOID) {
153,739,444✔
126
    double inverse_k_eff = 1.0 / k_eff_;
153,418,036✔
127
    for (int g_out = 0; g_out < negroups_; g_out++) {
1,054,043,564✔
128
      double sigma_t = sigma_t_[material * negroups_ + g_out] * density_mult;
900,625,528✔
129
      double scatter_source = 0.0;
900,625,528✔
130
      double fission_source = 0.0;
900,625,528✔
131
      double total_source = 0.0;
900,625,528✔
132

133
      for (int g_in = 0; g_in < negroups_; g_in++) {
2,147,483,647✔
134
        double scalar_flux = srh.scalar_flux_old(g_in);
2,147,483,647✔
135
        double sigma_s = sigma_s_[material * negroups_ * negroups_ +
2,147,483,647✔
136
                                  g_out * negroups_ + g_in] *
2,147,483,647✔
137
                         density_mult;
2,147,483,647✔
138
        double nu_sigma_f;
139
        double chi;
140
        if (settings::kinetic_simulation && !simulation::is_initial_condition) {
2,147,483,647✔
141
          nu_sigma_f = nu_p_sigma_f_[material * negroups_ + g_in];
2,147,483,647✔
142
          chi = chi_p_[material * negroups_ + g_out];
2,147,483,647✔
143
        } else {
144
          nu_sigma_f = nu_sigma_f_[material * negroups_ + g_in];
2,085,776,112✔
145
          chi = chi_[material * negroups_ + g_out];
2,085,776,112✔
146
        }
147
        scatter_source += sigma_s * scalar_flux;
2,147,483,647✔
148
        if (settings::create_fission_neutrons) {
2,147,483,647!
149
          fission_source += nu_sigma_f * density_mult * scalar_flux * chi;
2,147,483,647✔
150
        }
151
      }
152
      total_source = (scatter_source + fission_source * inverse_k_eff);
900,625,528✔
153

154
      // Add delayed source for kinetic simulation if delayed neutrons are
155
      // turned on
156
      if (settings::kinetic_simulation && !simulation::is_initial_condition &&
900,625,528!
157
          settings::create_delayed_neutrons) {
158
        double delayed_source = 0.0;
617,843,200✔
159
        for (int dg = 0; dg < ndgroups_; dg++) {
2,147,483,647✔
160
          double chi_d =
161
            chi_d_[material * negroups_ * ndgroups_ + dg * negroups_ + g_out];
2,147,483,647✔
162
          double lambda = lambda_[material * ndgroups_ + dg];
2,147,483,647✔
163
          double precursors = srh.precursors_old(dg);
2,147,483,647✔
164
          delayed_source += chi_d * precursors * lambda;
2,147,483,647✔
165
        }
166
        total_source += delayed_source;
617,843,200✔
167
      }
168
      srh.source(g_out) = total_source / sigma_t;
900,625,528✔
169
    }
170
  }
171

172
  // Add external source if in fixed source mode
173
  if (settings::run_mode == RunMode::FIXED_SOURCE) {
153,739,444✔
174
    for (int g = 0; g < negroups_; g++) {
65,422,700✔
175
      srh.source(g) += srh.external_source(g);
33,755,872✔
176
    }
177
  }
178
}
153,739,444✔
179

180
// Compute new estimate of scattering + fission sources in each source region
181
// based on the flux estimate from the previous iteration.
182
void FlatSourceDomain::update_all_neutron_sources()
104,172✔
183
{
184
  simulation::time_update_src.start();
104,172✔
185

186
#pragma omp parallel for
39,072✔
187
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
108,517,075✔
188
    SourceRegionHandle srh = source_regions_.get_source_region_handle(sr);
108,451,975✔
189
    update_single_neutron_source(srh);
108,451,975✔
190
    if (settings::kinetic_simulation && !simulation::is_initial_condition) {
108,451,975✔
191
      if (RandomRay::time_method_ == RandomRayTimeMethod::ISOTROPIC)
54,340,000✔
192
        compute_single_phi_prime(srh);
54,210,000✔
193
      else if (RandomRay::time_method_ == RandomRayTimeMethod::PROPAGATION)
130,000!
194
        compute_single_T1(srh);
130,000✔
195
    }
196
  }
197

198
  simulation::time_update_src.stop();
104,172✔
199
}
104,172✔
200

201
// Normalizes flux and updates simulation-averaged volume estimate
202
void FlatSourceDomain::normalize_scalar_flux_and_volumes(
99,172✔
203
  double total_active_distance_per_iteration)
204
{
205
  double normalization_factor = 1.0 / total_active_distance_per_iteration;
99,172✔
206
  double volume_normalization_factor =
99,172✔
207
    1.0 / (total_active_distance_per_iteration * simulation::current_batch);
99,172✔
208

209
// Normalize scalar flux to total distance travelled by all rays this
210
// iteration
211
#pragma omp parallel for
37,197✔
212
  for (int64_t se = 0; se < n_source_elements(); se++) {
563,121,845✔
213
    source_regions_.scalar_flux_new(se) *= normalization_factor;
563,059,870✔
214
  }
215

216
// Accumulate cell-wise ray length tallies collected this iteration, then
217
// update the simulation-averaged cell-wise volume estimates
218
#pragma omp parallel for
37,197✔
219
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
96,117,425✔
220
    source_regions_.volume_t(sr) += source_regions_.volume(sr);
96,055,450✔
221
    source_regions_.volume_sq_t(sr) += source_regions_.volume_sq(sr);
96,055,450✔
222
    source_regions_.volume_naive(sr) =
192,110,900✔
223
      source_regions_.volume(sr) * normalization_factor;
96,055,450✔
224
    source_regions_.volume_sq(sr) =
192,110,900✔
225
      source_regions_.volume_sq_t(sr) / source_regions_.volume_t(sr);
96,055,450✔
226
    source_regions_.volume(sr) =
96,055,450✔
227
      source_regions_.volume_t(sr) * volume_normalization_factor;
96,055,450✔
228
  }
229
}
99,172✔
230

231
void FlatSourceDomain::set_flux_to_flux_plus_source(
896,549,340✔
232
  int64_t sr, double volume, int g)
233
{
234
  int material = source_regions_.material(sr);
896,549,340✔
235
  double density_mult = source_regions_.density_mult(sr);
896,549,340✔
236
  if (material == MATERIAL_VOID) {
896,549,340✔
237
    source_regions_.scalar_flux_new(sr, g) /= volume;
642,176✔
238
    if (settings::run_mode == RunMode::FIXED_SOURCE) {
642,176!
239
      source_regions_.scalar_flux_new(sr, g) +=
642,176✔
240
        0.5f * source_regions_.external_source(sr, g) *
642,176✔
241
        source_regions_.volume_sq(sr);
642,176✔
242
    }
243
  } else {
244
    double sigma_t = sigma_t_[material * negroups_ + g] * density_mult;
895,907,164✔
245
    source_regions_.scalar_flux_new(sr, g) /= (sigma_t * volume);
895,907,164✔
246
    source_regions_.scalar_flux_new(sr, g) += source_regions_.source(sr, g);
895,907,164✔
247
  }
248
  if (settings::kinetic_simulation && !simulation::is_initial_condition) {
896,549,340✔
249
    double inverse_vbar = inverse_vbar_[material * negroups_ + g];
617,643,840✔
250
    double scalar_flux_rhs_bd = source_regions_.scalar_flux_rhs_bd(sr, g);
617,643,840✔
251
    double A0 =
252
      (bd_coefficients_first_order_.at(RandomRay::bd_order_))[0] / settings::dt;
617,643,840✔
253
    // TODO: Add support for expicit void regions
254
    double sigma_t = sigma_t_[material * negroups_ + g] * density_mult;
617,643,840✔
255
    source_regions_.scalar_flux_new(sr, g) -=
617,643,840✔
256
      scalar_flux_rhs_bd * inverse_vbar / sigma_t;
617,643,840✔
257
    source_regions_.scalar_flux_new(sr, g) /= 1 + A0 * inverse_vbar / sigma_t;
617,643,840✔
258
  }
259
}
896,549,340✔
260

261
void FlatSourceDomain::set_flux_to_old_flux(int64_t sr, int g)
1,122✔
262
{
263
  source_regions_.scalar_flux_new(sr, g) =
2,244✔
264
    source_regions_.scalar_flux_old(sr, g);
1,122✔
265
}
1,122✔
266

267
void FlatSourceDomain::set_flux_to_source(int64_t sr, int g)
6,770,298✔
268
{
269
  source_regions_.scalar_flux_new(sr, g) = source_regions_.source(sr, g);
6,770,298✔
270
}
6,770,298✔
271

272
// Combine transport flux contributions and flat source contributions from the
273
// previous iteration to generate this iteration's estimate of scalar flux.
274
int64_t FlatSourceDomain::add_source_to_scalar_flux()
104,172✔
275
{
276
  int64_t n_hits = 0;
104,172✔
277
  double inverse_batch = 1.0 / simulation::current_batch;
104,172✔
278

279
#pragma omp parallel for reduction(+ : n_hits)
39,072✔
280
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
109,576,115✔
281

282
    double volume_simulation_avg = source_regions_.volume(sr);
109,511,015✔
283
    double volume_iteration = source_regions_.volume_naive(sr);
109,511,015✔
284

285
    // Increment the number of hits if cell was hit this iteration
286
    if (volume_iteration) {
109,511,015✔
287
      n_hits++;
105,426,960✔
288
    }
289

290
    // Set the SR to small status if its expected number of hits
291
    // per iteration is less than 1.5
292
    if (source_regions_.n_hits(sr) * inverse_batch < MIN_HITS_PER_BATCH) {
109,511,015✔
293
      source_regions_.is_small(sr) = 1;
6,813,175✔
294
    } else {
295
      source_regions_.is_small(sr) = 0;
102,697,840✔
296
    }
297

298
    // The volume treatment depends on the volume estimator type
299
    // and whether or not an external source is present in the cell.
300
    double volume;
301
    switch (volume_estimator_) {
109,511,015!
302
    case RandomRayVolumeEstimator::NAIVE:
777,600✔
303
      volume = volume_iteration;
777,600✔
304
      break;
777,600✔
305
    case RandomRayVolumeEstimator::SIMULATION_AVERAGED:
432,000✔
306
      volume = volume_simulation_avg;
432,000✔
307
      break;
432,000✔
308
    case RandomRayVolumeEstimator::HYBRID:
108,301,415✔
309
      if (source_regions_.external_source_present(sr) ||
212,414,430✔
310
          source_regions_.is_small(sr)) {
104,113,015✔
311
        volume = volume_iteration;
11,001,195✔
312
      } else {
313
        volume = volume_simulation_avg;
97,300,220✔
314
      }
315
      break;
108,301,415✔
316
    default:
317
      fatal_error("Invalid volume estimator type");
318
    }
319

320
    for (int g = 0; g < negroups_; g++) {
688,018,450✔
321
      // There are three scenarios we need to consider:
322
      if (volume_iteration > 0.0) {
578,507,435✔
323
        // 1. If the FSR was hit this iteration, then the new flux is equal to
324
        // the flat source from the previous iteration plus the contributions
325
        // from rays passing through the source region (computed during the
326
        // transport sweep)
327
        set_flux_to_flux_plus_source(sr, volume, g);
574,273,830✔
328
      } else if (volume_simulation_avg > 0.0) {
4,233,605✔
329
        // 2. If the FSR was not hit this iteration, but has been hit some
330
        // previous iteration, then we need to make a choice about what
331
        // to do. Naively we will usually want to set the flux to be equal
332
        // to the reduced source. However, in fixed source problems where
333
        // there is a strong external source present in the cell, and where
334
        // the cell has a very low cross section, this approximation will
335
        // cause a huge upward bias in the flux estimate of the cell (in these
336
        // conditions, the flux estimate can be orders of magnitude too large).
337
        // Thus, to avoid this bias, if any external source is present
338
        // in the cell we will use the previous iteration's flux estimate. This
339
        // injects a small degree of correlation into the simulation, but this
340
        // is going to be trivial when the miss rate is a few percent or less.
341
        if (source_regions_.external_source_present(sr)) {
4,232,755✔
342
          set_flux_to_old_flux(sr, g);
1,320✔
343
        } else {
344
          set_flux_to_source(sr, g);
4,231,435✔
345
        }
346
      }
347
      // Halt if NaN implosion is detected
348
      if (!std::isfinite(source_regions_.scalar_flux_new(sr, g))) {
578,507,435!
349
        fatal_error("A source region scalar flux is not finite. "
350
                    "This indicates a numerical instability in the "
351
                    "simulation. Consider increasing ray density or adjusting "
352
                    "the source region mesh.");
353
      }
354
    }
355
  }
356

357
  // Return the number of source regions that were hit this iteration
358
  return n_hits;
104,172✔
359
}
360

361
// Generates new estimate of k_eff based on the differences between this
362
// iteration's estimate of the scalar flux and the last iteration's estimate.
363
void FlatSourceDomain::compute_k_eff()
29,120✔
364
{
365
  double fission_rate_old = 0;
29,120✔
366
  double fission_rate_new = 0;
29,120✔
367

368
  // Vector for gathering fission source terms for Shannon entropy calculation
369
  vector<float> p(n_source_regions(), 0.0f);
29,120✔
370

371
#pragma omp parallel for reduction(+ : fission_rate_old, fission_rate_new)
10,920✔
372
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
22,076,570✔
373

374
    // If simulation averaged volume is zero, don't include this cell
375
    double volume = source_regions_.volume(sr);
22,058,370✔
376
    if (volume == 0.0) {
22,058,370✔
377
      continue;
20✔
378
    }
379

380
    int material = source_regions_.material(sr);
22,058,350✔
381
    if (material == MATERIAL_VOID) {
22,058,350!
382
      continue;
383
    }
384

385
    double sr_fission_source_old = 0;
22,058,350✔
386
    double sr_fission_source_new = 0;
22,058,350✔
387

388
    for (int g = 0; g < negroups_; g++) {
178,691,000✔
389
      double nu_sigma_f = nu_sigma_f_[material * negroups_ + g] *
156,632,650✔
390
                          source_regions_.density_mult(sr);
156,632,650✔
391
      sr_fission_source_old +=
156,632,650✔
392
        nu_sigma_f * source_regions_.scalar_flux_old(sr, g);
156,632,650✔
393
      sr_fission_source_new +=
156,632,650✔
394
        nu_sigma_f * source_regions_.scalar_flux_new(sr, g);
156,632,650✔
395
    }
396

397
    // Compute total fission rates in FSR
398
    sr_fission_source_old *= volume;
22,058,350✔
399
    sr_fission_source_new *= volume;
22,058,350✔
400

401
    // Accumulate totals
402
    fission_rate_old += sr_fission_source_old;
22,058,350✔
403
    fission_rate_new += sr_fission_source_new;
22,058,350✔
404

405
    // Store total fission rate in the FSR for Shannon calculation
406
    p[sr] = sr_fission_source_new;
22,058,350✔
407
  }
408

409
  double k_eff_new = k_eff_ * (fission_rate_new / fission_rate_old);
29,120✔
410

411
  double H = 0.0;
29,120✔
412
  // defining an inverse sum for better performance
413
  double inverse_sum = 1 / fission_rate_new;
29,120✔
414

415
#pragma omp parallel for reduction(+ : H)
10,920✔
416
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
22,076,570✔
417
    // Only if FSR has non-negative and non-zero fission source
418
    if (p[sr] > 0.0f) {
22,058,370✔
419
      // Normalize to total weight of bank sites. p_i for better performance
420
      float p_i = p[sr] * inverse_sum;
10,151,355✔
421
      // Sum values to obtain Shannon entropy.
422
      H -= p_i * std::log2(p_i);
10,151,355✔
423
    }
424
  }
425

426
  // Adds entropy value to shared entropy vector in openmc namespace.
427
  simulation::entropy.push_back(H);
29,120✔
428

429
  fission_rate_ = fission_rate_new;
29,120✔
430
  k_eff_ = k_eff_new;
29,120✔
431
}
29,120✔
432

433
// This function is responsible for generating a mapping between random
434
// ray flat source regions (cell instances) and tally bins. The mapping
435
// takes the form of a "TallyTask" object, which accounts for one single
436
// score being applied to a single tally. Thus, a single source region
437
// may have anywhere from zero to many tally tasks associated with it ---
438
// meaning that the global "tally_task" data structure is in 2D. The outer
439
// dimension corresponds to the source element (i.e., each entry corresponds
440
// to a specific energy group within a specific source region), and the
441
// inner dimension corresponds to the tallying task itself. Mechanically,
442
// the mapping between FSRs and spatial filters is done by considering
443
// the location of a single known ray midpoint that passed through the
444
// FSR. I.e., during transport, the first ray to pass through a given FSR
445
// will write down its midpoint for use with this function. This is a cheap
446
// and easy way of mapping FSRs to spatial tally filters, but comes with
447
// the downside of adding the restriction that spatial tally filters must
448
// share boundaries with the physical geometry of the simulation (so as
449
// not to subdivide any FSR). It is acceptable for a spatial tally region
450
// to contain multiple FSRs, but not the other way around.
451

452
// TODO: In future work, it would be preferable to offer a more general
453
// (but perhaps slightly more expensive) option for handling arbitrary
454
// spatial tallies that would be allowed to subdivide FSRs.
455

456
// Besides generating the mapping structure, this function also keeps track
457
// of whether or not all flat source regions have been hit yet. This is
458
// required, as there is no guarantee that all flat source regions will
459
// be hit every iteration, such that in the first few iterations some FSRs
460
// may not have a known position within them yet to facilitate mapping to
461
// spatial tally filters. However, after several iterations, if all FSRs
462
// have been hit and have had a tally map generated, then this status will
463
// be passed back to the caller to alert them that this function doesn't
464
// need to be called for the remainder of the simulation.
465

466
// It takes as an argument the starting index in the source region array,
467
// and it will operate from that index until the end of the array. This
468
// is useful as it can be called for both explicit user source regions or
469
// when a source region mesh is overlaid.
470

471
void FlatSourceDomain::convert_source_regions_to_tallies(int64_t start_sr_id)
689✔
472
{
473
  openmc::simulation::time_tallies.start();
689✔
474

475
  // Tracks if we've generated a mapping yet for all source regions.
476
  bool all_source_regions_mapped = true;
689✔
477

478
// Attempt to generate mapping for all source regions
479
#pragma omp parallel for
259✔
480
  for (int64_t sr = start_sr_id; sr < n_source_regions(); sr++) {
1,059,470✔
481

482
    // If this source region has not been hit by a ray yet, then
483
    // we aren't going to be able to map it, so skip it.
484
    if (!source_regions_.position_recorded(sr)) {
1,059,040!
485
      all_source_regions_mapped = false;
486
      continue;
487
    }
488

489
    // A particle located at the recorded midpoint of a ray
490
    // crossing through this source region is used to estabilish
491
    // the spatial location of the source region
492
    Particle p;
1,059,040✔
493
    p.r() = source_regions_.position(sr);
1,059,040✔
494
    p.r_last() = source_regions_.position(sr);
1,059,040✔
495
    p.u() = {1.0, 0.0, 0.0};
1,059,040✔
496
    bool found = exhaustive_find_cell(p);
1,059,040✔
497

498
    // Loop over energy groups (so as to support energy filters)
499
    for (int g = 0; g < negroups_; g++) {
2,505,680✔
500

501
      // Set particle to the current energy
502
      p.g() = g;
1,446,640✔
503
      p.g_last() = g;
1,446,640✔
504
      p.E() = data::mg.energy_bin_avg_[p.g()];
1,446,640✔
505
      p.E_last() = p.E();
1,446,640✔
506

507
      int64_t source_element = sr * negroups_ + g;
1,446,640✔
508

509
      // If this task has already been populated, we don't need to do
510
      // it again.
511
      if (source_regions_.tally_task(sr, g).size() > 0) {
1,446,640!
512
        continue;
513
      }
514

515
      // Loop over all active tallies. This logic is essentially identical
516
      // to what happens when scanning for applicable tallies during
517
      // MC transport.
518
      for (int i_tally = 0; i_tally < model::tallies.size(); i_tally++) {
5,142,800✔
519
        Tally& tally {*model::tallies[i_tally]};
3,696,160✔
520

521
        // Initialize an iterator over valid filter bin combinations.
522
        // If there are no valid combinations, use a continue statement
523
        // to ensure we skip the assume_separate break below.
524
        auto filter_iter = FilterBinIter(tally, p);
3,696,160✔
525
        auto end = FilterBinIter(tally, true, &p.filter_matches());
3,696,160✔
526
        if (filter_iter == end)
3,696,160✔
527
          continue;
2,001,440✔
528

529
        // Loop over filter bins.
530
        for (; filter_iter != end; ++filter_iter) {
3,389,440✔
531
          auto filter_index = filter_iter.index_;
1,694,720✔
532
          auto filter_weight = filter_iter.weight_;
1,694,720✔
533

534
          // Loop over scores
535
          for (int score = 0; score < tally.scores_.size(); score++) {
3,915,840✔
536
            auto score_bin = tally.scores_[score];
2,410,400✔
537
            // Skip precursor score. These must be scored by delay group via
538
            // tally_delay_task.
539
            if (score_bin == SCORE_PRECURSORS)
2,410,400✔
540
              break;
189,280✔
541
            // If a valid tally, filter, and score combination has been found,
542
            // then add it to the list of tally tasks for this source element.
543
            TallyTask task(i_tally, filter_index, score, score_bin);
2,221,120✔
544
            source_regions_.tally_task(sr, g).push_back(task);
2,221,120✔
545

546
            // Also add this task to the list of volume tasks for this source
547
            // region.
548
            source_regions_.volume_task(sr).insert(task);
2,221,120✔
549
          }
550
        }
551
      }
552
      // Reset all the filter matches for the next tally event.
553
      for (auto& match : p.filter_matches())
5,787,680✔
554
        match.bins_present_ = false;
4,341,040✔
555
    }
556
    // Loop over delayed groups (so as to support tallying delayed quantities)
557
    if (settings::kinetic_simulation && settings::create_delayed_neutrons) {
1,059,040!
558
      for (int dg = 0; dg < ndgroups_; dg++) {
272,480✔
559

560
        // Set particle to the current delay group
561
        p.delayed_group() = dg;
241,280✔
562

563
        int64_t delay_element = sr * ndgroups_ + dg;
241,280✔
564

565
        // If this task has already been populated, we don't need to do
566
        // it again.
567
        if (source_regions_.tally_delay_task(sr, dg).size() > 0) {
241,280!
568
          continue;
569
        }
570

571
        // Loop over all active tallies. This logic is essentially identical
572
        // to what happens when scanning for applicable tallies during
573
        // MC transport.
574
        // Loop over all active tallies. This logic is essentially identical
575
        for (int i_tally = 0; i_tally < model::tallies.size(); i_tally++) {
673,920✔
576
          Tally& tally {*model::tallies[i_tally]};
432,640✔
577

578
          // Initialize an iterator over valid filter bin combinations.
579
          // If there are no valid combinations, use a continue statement
580
          // to ensure we skip the assume_separate break below.
581
          auto filter_iter = FilterBinIter(tally, p);
432,640✔
582
          auto end = FilterBinIter(tally, true, &p.filter_matches());
432,640✔
583
          if (filter_iter == end)
432,640!
584
            continue;
585

586
          // Loop over filter bins.
587
          for (; filter_iter != end; ++filter_iter) {
865,280✔
588
            auto filter_index = filter_iter.index_;
432,640✔
589
            auto filter_weight = filter_iter.weight_;
432,640✔
590

591
            // Loop over scores
592
            for (int score = 0; score < tally.scores_.size(); score++) {
648,960✔
593
              auto score_bin = tally.scores_[score];
432,640✔
594
              // We only want to score precursors
595
              if (score_bin != SCORE_PRECURSORS)
432,640✔
596
                break;
216,320✔
597
              // If a valid tally, filter, and score combination has been found,
598
              // then add it to the list of tally tasks for this source element.
599
              TallyTask task(i_tally, filter_index + dg, score, score_bin);
216,320✔
600
              source_regions_.tally_delay_task(sr, dg).push_back(task);
216,320✔
601

602
              // Also add this task to the list of volume tasks for this source
603
              // region.
604
              source_regions_.volume_task(sr).insert(task);
216,320✔
605
            }
606
          }
607
        }
608
        // Reset all the filter matches for the next tally event.
609
        for (auto& match : p.filter_matches())
882,560✔
610
          match.bins_present_ = false;
641,280✔
611
      }
612
    }
613
  }
1,059,040✔
614
  openmc::simulation::time_tallies.stop();
689✔
615

616
  mapped_all_tallies_ = all_source_regions_mapped;
689✔
617
}
689✔
618

619
// Set the volume accumulators to zero for all tallies
620
void FlatSourceDomain::reset_tally_volumes()
50,726✔
621
{
622
  if (volume_normalized_flux_tallies_) {
50,726✔
623
#pragma omp parallel for
17,880✔
624
    for (int i = 0; i < tally_volumes_.size(); i++) {
90,750✔
625
      auto& tensor = tally_volumes_[i];
60,950✔
626
      tensor.fill(0.0); // Set all elements of the tensor to 0.0
60,950✔
627
    }
628
  }
629
}
50,726✔
630

631
// In fixed source mode, due to the way that volumetric fixed sources are
632
// converted and applied as volumetric sources in one or more source regions,
633
// we need to perform an additional normalization step to ensure that the
634
// reported scalar fluxes are in units per source neutron. This allows for
635
// direct comparison of reported tallies to Monte Carlo flux results.
636
// This factor needs to be computed at each iteration, as it is based on the
637
// volume estimate of each FSR, which improves over the course of the
638
// simulation
639
double FlatSourceDomain::compute_fixed_source_normalization_factor() const
51,495✔
640
{
641
  // Eigenvalue mode normalization
642
  if (settings::run_mode == RunMode::EIGENVALUE) {
51,495✔
643
    // Normalize fluxes by total number of fission neutrons produced. This
644
    // ensures consistent scaling of the eigenvector such that its magnitude is
645
    // comparable to the eigenvector produced by the Monte Carlo solver.
646
    // Multiplying by the eigenvalue is unintuitive, but it is necessary.
647
    // If the eigenvalue is 1.2, per starting source neutron, you will
648
    // generate 1.2 neutrons. Thus if we normalize to generating only ONE
649
    // neutron in total for the whole domain, then we don't actually have enough
650
    // flux to generate the required 1.2 neutrons. We only know the flux
651
    // required to generate 1 neutron (which would have required less than one
652
    // starting neutron). Thus, you have to scale the flux up by the eigenvalue
653
    // such that 1.2 neutrons are generated, so as to be consistent with the
654
    // bookkeeping in MC which is all done per starting source neutron (not per
655
    // neutron produced).
656
    return k_eff_ / (fission_rate_ * simulation_volume_);
48,337✔
657
  }
658

659
  // If we are in adjoint mode of a fixed source problem, the external
660
  // source is already normalized, such that all resulting fluxes are
661
  // also normalized.
662
  if (adjoint_) {
3,158✔
663
    return 1.0;
296✔
664
  }
665

666
  // Fixed source mode normalization
667

668
  // Step 1 is to sum over all source regions and energy groups to get the
669
  // total external source strength in the simulation.
670
  double simulation_external_source_strength = 0.0;
2,862✔
671
#pragma omp parallel for reduction(+ : simulation_external_source_strength)
1,105✔
672
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
14,678,157✔
673
    int material = source_regions_.material(sr);
14,676,400✔
674
    double volume = source_regions_.volume(sr) * simulation_volume_;
14,676,400✔
675
    for (int g = 0; g < negroups_; g++) {
29,895,680✔
676
      // For non-void regions, we store the external source pre-divided by
677
      // sigma_t. We need to multiply non-void regions back up by sigma_t
678
      // to get the total source strength in the expected units.
679
      double sigma_t = 1.0;
15,219,280✔
680
      if (material != MATERIAL_VOID) {
15,219,280✔
681
        sigma_t =
15,009,040✔
682
          sigma_t_[material * negroups_ + g] * source_regions_.density_mult(sr);
15,009,040✔
683
      }
684
      simulation_external_source_strength +=
15,219,280✔
685
        source_regions_.external_source(sr, g) * sigma_t * volume;
15,219,280✔
686
    }
687
  }
688

689
  // Step 2 is to determine the total user-specified external source strength
690
  double user_external_source_strength = 0.0;
2,862✔
691
  for (auto& ext_source : model::external_sources) {
5,724✔
692
    user_external_source_strength += ext_source->strength();
2,862✔
693
  }
694

695
  // The correction factor is the ratio of the user-specified external source
696
  // strength to the simulation external source strength.
697
  double source_normalization_factor =
2,862✔
698
    user_external_source_strength / simulation_external_source_strength;
699

700
  return source_normalization_factor;
2,862✔
701
}
702

703
// Tallying in random ray is not done directly during transport, rather,
704
// it is done only once after each power iteration. This is made possible
705
// by way of a mapping data structure that relates spatial source regions
706
// (FSRs) to tally/filter/score combinations. The mechanism by which the
707
// mapping is done (and the limitations incurred) is documented in the
708
// "convert_source_regions_to_tallies()" function comments above. The present
709
// tally function simply traverses the mapping data structure and executes
710
// the scoring operations to OpenMC's native tally result arrays.
711

712
// TODO: Add support for prompt and delayed nu fission tallies
713
void FlatSourceDomain::random_ray_tally()
50,726✔
714
{
715
  openmc::simulation::time_tallies.start();
50,726✔
716

717
  // Reset our tally volumes to zero
718
  reset_tally_volumes();
50,726✔
719

720
  double source_normalization_factor =
721
    compute_fixed_source_normalization_factor();
50,726✔
722

723
// We loop over all source regions and energy groups. For each
724
// element, we check if there are any scores needed and apply
725
// them.
726
#pragma omp parallel for
19,026✔
727
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
53,335,200✔
728
    // The fsr.volume_ is the unitless fractional simulation averaged volume
729
    // (i.e., it is the FSR's fraction of the overall simulation volume). The
730
    // simulation_volume_ is the total 3D physical volume in cm^3 of the
731
    // entire global simulation domain (as defined by the ray source box).
732
    // Thus, the FSR's true 3D spatial volume in cm^3 is found by multiplying
733
    // its fraction of the total volume by the total volume. Not important in
734
    // eigenvalue solves, but useful in fixed source solves for returning the
735
    // flux shape with a magnitude that makes sense relative to the fixed
736
    // source strength.
737
    double volume = source_regions_.volume(sr) * simulation_volume_;
53,303,500✔
738

739
    int material = source_regions_.material(sr);
53,303,500✔
740
    double density_mult = source_regions_.density_mult(sr);
53,303,500✔
741

742
    for (int g = 0; g < negroups_; g++) {
337,790,000✔
743
      double flux =
744
        source_regions_.scalar_flux_new(sr, g) * source_normalization_factor;
284,486,500✔
745

746
      // Determine numerical score value
747
      for (auto& task : source_regions_.tally_task(sr, g)) {
1,102,254,400✔
748
        double score = 0.0;
817,767,900✔
749
        switch (task.score_type) {
817,767,900!
750

751
        case SCORE_FLUX:
284,526,100✔
752
          score = flux * volume;
284,526,100✔
753
          break;
284,526,100✔
754

755
        case SCORE_TOTAL:
756
          if (material != MATERIAL_VOID) {
×
757
            score =
758
              flux * volume * sigma_t_[material * negroups_ + g] * density_mult;
759
          }
760
          break;
761

762
        case SCORE_FISSION:
266,620,900✔
763
          if (material != MATERIAL_VOID) {
266,620,900!
764
            score =
266,620,900✔
765
              flux * volume * sigma_f_[material * negroups_ + g] * density_mult;
266,620,900✔
766
          }
767
          break;
266,620,900✔
768

769
        case SCORE_NU_FISSION:
266,620,900✔
770
          if (material != MATERIAL_VOID) {
266,620,900!
771
            score = flux * volume * nu_sigma_f_[material * negroups_ + g] *
266,620,900✔
772
                    density_mult;
773
          }
774
          break;
266,620,900✔
775

776
        case SCORE_EVENTS:
777
          score = 1.0;
778
          break;
779

780
        case SCORE_PRECURSORS:
781
          // Score precursors in tally_delay_tasks
782
          if (settings::kinetic_simulation &&
×
783
              settings::create_delayed_neutrons) {
784
            break;
785
          } else {
786
            fatal_error("Invalid score specified in tallies.xml. Precursors "
787
                        "are only supported in random ray mode for kinetic "
788
                        "simulations when delayed neutrons are turned on.");
789
          }
790

791
        default:
792
          fatal_error("Invalid score specified in tallies.xml. Only flux, "
793
                      "total, fission, nu-fission, and events are supported in "
794
                      "random ray mode (precursors are supported in kinetic "
795
                      "simulations when delayed neutrons are turned on).");
796
          break;
797
        }
798
        // Apply score to the appropriate tally bin
799
        Tally& tally {*model::tallies[task.tally_idx]};
817,767,900✔
800
#pragma omp atomic
801
        tally.results_(task.filter_idx, task.score_idx, TallyResult::VALUE) +=
817,767,900✔
802
          score;
803
      }
804
    }
805
    if (settings::kinetic_simulation && settings::create_delayed_neutrons) {
53,303,500!
806
      for (int dg = 0; dg < ndgroups_; dg++) {
341,723,200✔
807
        // Determine numerical score value
808
        for (auto& task : source_regions_.tally_delay_task(sr, dg)) {
606,569,600✔
809
          double score = 0.0;
302,848,000✔
810
          switch (task.score_type) {
302,848,000!
811

812
          // Certain scores already tallied
813
          case SCORE_FLUX:
814
          case SCORE_TOTAL:
815
          case SCORE_FISSION:
816
          case SCORE_NU_FISSION:
817
          case SCORE_EVENTS:
818
            break;
819

820
          case SCORE_PRECURSORS:
302,848,000✔
821
            score = source_regions_.precursors_new(sr, dg) *
302,848,000✔
822
                    source_normalization_factor * volume;
823
            break;
302,848,000✔
824

825
          default:
826
            fatal_error(
827
              "Invalid score specified in tallies.xml. Only flux, "
828
              "total, fission, nu-fission, and events are supported in "
829
              "random ray mode (precursors are supported in kinetic "
830
              "simulations when delayed neutrons are turned on).");
831
            break;
832
          }
833

834
          // Apply score to the appropriate tally bin
835
          Tally& tally {*model::tallies[task.tally_idx]};
302,848,000✔
836
#pragma omp atomic
837
          tally.results_(task.filter_idx, task.score_idx, TallyResult::VALUE) +=
302,848,000✔
838
            score;
839
        }
840
      }
841
    }
842

843
    // For flux and precursor tallies, the total volume of the spatial region is
844
    // needed for normalizing the tally. We store this volume in a separate
845
    // tensor. We only contribute to each volume tally bin once per FSR.
846
    if (volume_normalized_flux_tallies_) {
53,303,500✔
847
      for (const auto& task : source_regions_.volume_task(sr)) {
1,159,662,200✔
848
        if (task.score_type == SCORE_FLUX ||
1,106,649,700✔
849
            task.score_type == SCORE_PRECURSORS) {
826,807,800✔
850
#pragma omp atomic
851
          tally_volumes_[task.tally_idx](task.filter_idx, task.score_idx) +=
582,689,900✔
852
            volume;
853
        }
854
      }
855
    }
856
  } // end FSR loop
857

858
  // Normalize any flux or precursor scores by the total volume of the FSRs
859
  // scoring to that bin. To do this, we loop over all tallies, and then all
860
  // filter bins, and then scores. For each score, we check the tally data
861
  // structure to see what index that score corresponds to. If that score is a
862
  // flux or precursor score, then we divide it by volume.
863
  if (volume_normalized_flux_tallies_) {
50,726✔
864
    for (int i = 0; i < model::tallies.size(); i++) {
145,200✔
865
      Tally& tally {*model::tallies[i]};
97,520✔
866
#pragma omp parallel for
36,570✔
867
      for (int bin = 0; bin < tally.n_filter_bins(); bin++) {
1,726,900✔
868
        for (int score_idx = 0; score_idx < tally.n_scores(); score_idx++) {
4,163,500✔
869
          auto score_type = tally.scores_[score_idx];
2,497,550✔
870
          if (score_type == SCORE_FLUX || score_type == SCORE_PRECURSORS) {
2,497,550✔
871
            double vol = tally_volumes_[i](bin, score_idx);
1,665,950✔
872
            if (vol > 0.0) {
1,665,950!
873
              tally.results_(bin, score_idx, TallyResult::VALUE) /= vol;
1,665,950✔
874
            }
875
          }
876
        }
877
      }
878
    }
879
  }
880

881
  openmc::simulation::time_tallies.stop();
50,726✔
882
}
50,726✔
883

884
double FlatSourceDomain::evaluate_flux_at_point(
×
885
  Position r, int64_t sr, int g) const
886
{
887
  return source_regions_.scalar_flux_final(sr, g) /
×
888
         (settings::n_batches - settings::n_inactive);
×
889
}
890

891
// Outputs all basic material, FSR ID, multigroup flux, and
892
// fission source data to .vtk file that can be directly
893
// loaded and displayed by Paraview. Note that .vtk binary
894
// files require big endian byte ordering, so endianness
895
// is checked and flipped if necessary.
896
void FlatSourceDomain::output_to_vtk() const
×
897
{
898
  // Rename .h5 plot filename(s) to .vtk filenames
899
  for (int p = 0; p < model::plots.size(); p++) {
×
900
    PlottableInterface* plot = model::plots[p].get();
×
901
    plot->path_plot() =
×
902
      plot->path_plot().substr(0, plot->path_plot().find_last_of('.')) + ".vtk";
×
903
  }
904

905
  // Print header information
906
  print_plot();
×
907

908
  // Outer loop over plots
909
  for (int plt = 0; plt < model::plots.size(); plt++) {
×
910

911
    // Get handle to OpenMC plot object and extract params
912
    Plot* openmc_plot = dynamic_cast<Plot*>(model::plots[plt].get());
×
913

914
    // Random ray plots only support voxel plots
915
    if (!openmc_plot) {
×
916
      warning(fmt::format("Plot {} is invalid plot type -- only voxel plotting "
×
917
                          "is allowed in random ray mode.",
918
        plt));
919
      continue;
×
920
    } else if (openmc_plot->type_ != Plot::PlotType::voxel) {
×
921
      warning(fmt::format("Plot {} is invalid plot type -- only voxel plotting "
×
922
                          "is allowed in random ray mode.",
923
        plt));
924
      continue;
×
925
    }
926

927
    int Nx = openmc_plot->pixels_[0];
×
928
    int Ny = openmc_plot->pixels_[1];
×
929
    int Nz = openmc_plot->pixels_[2];
×
930
    Position origin = openmc_plot->origin_;
×
931
    Position width = openmc_plot->width_;
×
932
    Position ll = origin - width / 2.0;
×
933
    double x_delta = width.x / Nx;
×
934
    double y_delta = width.y / Ny;
×
935
    double z_delta = width.z / Nz;
×
936
    std::string filename = openmc_plot->path_plot();
×
937

938
    // Perform sanity checks on file size
939
    uint64_t bytes = Nx * Ny * Nz * (negroups_ + 1 + 1 + 1) * sizeof(float);
×
940
    write_message(5, "Processing plot {}: {}... (Estimated size is {} MB)",
×
941
      openmc_plot->id(), filename, bytes / 1.0e6);
×
942
    if (bytes / 1.0e9 > 1.0) {
×
943
      warning("Voxel plot specification is very large (>1 GB). Plotting may be "
×
944
              "slow.");
945
    } else if (bytes / 1.0e9 > 100.0) {
×
946
      fatal_error("Voxel plot specification is too large (>100 GB). Exiting.");
×
947
    }
948

949
    // Relate voxel spatial locations to random ray source regions
950
    vector<int> voxel_indices(Nx * Ny * Nz);
×
951
    vector<Position> voxel_positions(Nx * Ny * Nz);
×
952
    vector<double> weight_windows(Nx * Ny * Nz);
×
953
    float min_weight = 1e20;
×
954
#pragma omp parallel for collapse(3) reduction(min : min_weight)
955
    for (int z = 0; z < Nz; z++) {
×
956
      for (int y = 0; y < Ny; y++) {
×
957
        for (int x = 0; x < Nx; x++) {
×
958
          Position sample;
959
          sample.z = ll.z + z_delta / 2.0 + z * z_delta;
960
          sample.y = ll.y + y_delta / 2.0 + y * y_delta;
961
          sample.x = ll.x + x_delta / 2.0 + x * x_delta;
962
          Particle p;
×
963
          p.r() = sample;
964
          p.r_last() = sample;
965
          p.E() = 1.0;
966
          p.E_last() = 1.0;
967
          p.u() = {1.0, 0.0, 0.0};
968

969
          bool found = exhaustive_find_cell(p);
×
970
          if (!found) {
×
971
            voxel_indices[z * Ny * Nx + y * Nx + x] = -1;
972
            voxel_positions[z * Ny * Nx + y * Nx + x] = sample;
973
            weight_windows[z * Ny * Nx + y * Nx + x] = 0.0;
974
            continue;
975
          }
976

977
          SourceRegionKey sr_key = lookup_source_region_key(p);
×
978
          int64_t sr = -1;
979
          auto it = source_region_map_.find(sr_key);
×
980
          if (it != source_region_map_.end()) {
×
981
            sr = it->second;
982
          }
983

984
          voxel_indices[z * Ny * Nx + y * Nx + x] = sr;
985
          voxel_positions[z * Ny * Nx + y * Nx + x] = sample;
986

987
          if (variance_reduction::weight_windows.size() == 1) {
×
988
            WeightWindow ww =
989
              variance_reduction::weight_windows[0]->get_weight_window(p);
×
990
            float weight = ww.lower_weight;
991
            weight_windows[z * Ny * Nx + y * Nx + x] = weight;
992
            if (weight < min_weight)
×
993
              min_weight = weight;
994
          }
995
        }
×
996
      }
997
    }
998

999
    double source_normalization_factor =
1000
      compute_fixed_source_normalization_factor();
×
1001

1002
    // Open file for writing
1003
    std::FILE* plot = std::fopen(filename.c_str(), "wb");
×
1004

1005
    // Write vtk metadata
1006
    std::fprintf(plot, "# vtk DataFile Version 2.0\n");
×
1007
    std::fprintf(plot, "Dataset File\n");
×
1008
    std::fprintf(plot, "BINARY\n");
×
1009
    std::fprintf(plot, "DATASET STRUCTURED_POINTS\n");
×
1010
    std::fprintf(plot, "DIMENSIONS %d %d %d\n", Nx, Ny, Nz);
×
1011
    std::fprintf(plot, "ORIGIN %lf %lf %lf\n", ll.x, ll.y, ll.z);
×
1012
    std::fprintf(plot, "SPACING %lf %lf %lf\n", x_delta, y_delta, z_delta);
×
1013
    std::fprintf(plot, "POINT_DATA %d\n", Nx * Ny * Nz);
×
1014

1015
    int64_t num_neg = 0;
×
1016
    int64_t num_samples = 0;
×
1017
    float min_flux = 0.0;
×
1018
    float max_flux = -1.0e20;
×
1019
    // Plot multigroup flux data
1020
    for (int g = 0; g < negroups_; g++) {
×
1021
      std::fprintf(plot, "SCALARS flux_group_%d float\n", g);
×
1022
      std::fprintf(plot, "LOOKUP_TABLE default\n");
×
1023
      for (int i = 0; i < Nx * Ny * Nz; i++) {
×
1024
        int64_t fsr = voxel_indices[i];
×
1025
        int64_t source_element = fsr * negroups_ + g;
×
1026
        float flux = 0;
×
1027
        if (fsr >= 0) {
×
1028
          flux = evaluate_flux_at_point(voxel_positions[i], fsr, g);
×
1029
          if (flux < 0.0)
×
1030
            flux = FlatSourceDomain::evaluate_flux_at_point(
×
1031
              voxel_positions[i], fsr, g);
×
1032
        }
1033
        if (flux < 0.0) {
×
1034
          num_neg++;
×
1035
          if (flux < min_flux) {
×
1036
            min_flux = flux;
×
1037
          }
1038
        }
1039
        if (flux > max_flux)
×
1040
          max_flux = flux;
×
1041
        num_samples++;
×
1042
        flux = convert_to_big_endian<float>(flux);
×
1043
        std::fwrite(&flux, sizeof(float), 1, plot);
×
1044
      }
1045
    }
1046

1047
    // Slightly negative fluxes can be normal when sampling corners of linear
1048
    // source regions. However, very common and high magnitude negative fluxes
1049
    // may indicate numerical instability.
1050
    if (num_neg > 0) {
×
1051
      warning(fmt::format("{} plot samples ({:.4f}%) contained negative fluxes "
×
1052
                          "(minumum found = {:.2e} maximum_found = {:.2e})",
1053
        num_neg, (100.0 * num_neg) / num_samples, min_flux, max_flux));
×
1054
    }
1055

1056
    // Plot FSRs
1057
    std::fprintf(plot, "SCALARS FSRs float\n");
×
1058
    std::fprintf(plot, "LOOKUP_TABLE default\n");
×
1059
    for (int fsr : voxel_indices) {
×
1060
      float value = future_prn(10, fsr);
×
1061
      value = convert_to_big_endian<float>(value);
×
1062
      std::fwrite(&value, sizeof(float), 1, plot);
×
1063
    }
1064

1065
    // Plot Materials
1066
    std::fprintf(plot, "SCALARS Materials int\n");
×
1067
    std::fprintf(plot, "LOOKUP_TABLE default\n");
×
1068
    for (int fsr : voxel_indices) {
×
1069
      int mat = -1;
×
1070
      if (fsr >= 0)
×
1071
        mat = source_regions_.material(fsr);
×
1072
      mat = convert_to_big_endian<int>(mat);
×
1073
      std::fwrite(&mat, sizeof(int), 1, plot);
×
1074
    }
1075

1076
    // Plot fission source
1077
    if (settings::run_mode == RunMode::EIGENVALUE) {
×
1078
      std::fprintf(plot, "SCALARS total_fission_source float\n");
×
1079
      std::fprintf(plot, "LOOKUP_TABLE default\n");
×
1080
      for (int i = 0; i < Nx * Ny * Nz; i++) {
×
1081
        int64_t fsr = voxel_indices[i];
×
1082
        float total_fission = 0.0;
×
1083
        if (fsr >= 0) {
×
1084
          int mat = source_regions_.material(fsr);
×
1085
          if (mat != MATERIAL_VOID) {
×
1086
            for (int g = 0; g < negroups_; g++) {
×
1087
              int64_t source_element = fsr * negroups_ + g;
×
1088
              float flux = evaluate_flux_at_point(voxel_positions[i], fsr, g);
×
1089
              double sigma_f = sigma_f_[mat * negroups_ + g] *
×
1090
                               source_regions_.density_mult(fsr);
×
1091
              total_fission += sigma_f * flux;
×
1092
            }
1093
          }
1094
        }
1095
        total_fission = convert_to_big_endian<float>(total_fission);
×
1096
        std::fwrite(&total_fission, sizeof(float), 1, plot);
×
1097
      }
1098
    } else {
1099
      std::fprintf(plot, "SCALARS external_source float\n");
×
1100
      std::fprintf(plot, "LOOKUP_TABLE default\n");
×
1101
      for (int i = 0; i < Nx * Ny * Nz; i++) {
×
1102
        int64_t fsr = voxel_indices[i];
×
1103
        int mat = source_regions_.material(fsr);
×
1104
        float total_external = 0.0f;
×
1105
        if (fsr >= 0) {
×
1106
          for (int g = 0; g < negroups_; g++) {
×
1107
            // External sources are already divided by sigma_t, so we need to
1108
            // multiply it back to get the true external source.
1109
            double sigma_t = 1.0;
×
1110
            if (mat != MATERIAL_VOID) {
×
1111
              sigma_t = sigma_t_[mat * negroups_ + g] *
×
1112
                        source_regions_.density_mult(fsr);
×
1113
            }
1114
            total_external += source_regions_.external_source(fsr, g) * sigma_t;
×
1115
          }
1116
        }
1117
        total_external = convert_to_big_endian<float>(total_external);
×
1118
        std::fwrite(&total_external, sizeof(float), 1, plot);
×
1119
      }
1120
    }
1121

1122
    // Plot weight window data
1123
    if (variance_reduction::weight_windows.size() == 1) {
×
1124
      std::fprintf(plot, "SCALARS weight_window_lower float\n");
×
1125
      std::fprintf(plot, "LOOKUP_TABLE default\n");
×
1126
      for (int i = 0; i < Nx * Ny * Nz; i++) {
×
1127
        float weight = weight_windows[i];
×
1128
        if (weight == 0.0)
×
1129
          weight = min_weight;
×
1130
        weight = convert_to_big_endian<float>(weight);
×
1131
        std::fwrite(&weight, sizeof(float), 1, plot);
×
1132
      }
1133
    }
1134

1135
    std::fclose(plot);
×
1136
  }
×
1137
}
×
1138

1139
void FlatSourceDomain::apply_external_source_to_source_region(
3,337✔
1140
  int src_idx, SourceRegionHandle& srh)
1141
{
1142
  auto s = model::external_sources[src_idx].get();
3,337✔
1143
  auto is = dynamic_cast<IndependentSource*>(s);
3,337!
1144
  auto discrete = dynamic_cast<Discrete*>(is->energy());
3,337!
1145
  double strength_factor = is->strength();
3,337✔
1146
  const auto& discrete_energies = discrete->x();
3,337✔
1147
  const auto& discrete_probs = discrete->prob();
3,337✔
1148

1149
  srh.external_source_present() = 1;
3,337✔
1150

1151
  for (int i = 0; i < discrete_energies.size(); i++) {
6,770✔
1152
    int g = data::mg.get_group_index(discrete_energies[i]);
3,433✔
1153
    srh.external_source(g) += discrete_probs[i] * strength_factor;
3,433✔
1154
  }
1155
}
3,337✔
1156

1157
void FlatSourceDomain::apply_external_source_to_cell_instances(int32_t i_cell,
351✔
1158
  int src_idx, int target_material_id, const vector<int32_t>& instances)
1159
{
1160
  Cell& cell = *model::cells[i_cell];
351✔
1161

1162
  if (cell.type_ != Fill::MATERIAL)
351!
1163
    return;
×
1164

1165
  for (int j : instances) {
25,129✔
1166
    int cell_material_idx = cell.material(j);
24,778✔
1167
    int cell_material_id;
1168
    if (cell_material_idx == MATERIAL_VOID) {
24,778✔
1169
      cell_material_id = MATERIAL_VOID;
208✔
1170
    } else {
1171
      cell_material_id = model::materials[cell_material_idx]->id();
24,570✔
1172
    }
1173
    if (target_material_id == C_NONE ||
24,778✔
1174
        cell_material_id == target_material_id) {
1175
      int64_t source_region = source_region_offsets_[i_cell] + j;
2,418✔
1176
      external_volumetric_source_map_[source_region].push_back(src_idx);
2,418✔
1177
    }
1178
  }
1179
}
1180

1181
void FlatSourceDomain::apply_external_source_to_cell_and_children(
377✔
1182
  int32_t i_cell, int src_idx, int32_t target_material_id)
1183
{
1184
  Cell& cell = *model::cells[i_cell];
377✔
1185

1186
  if (cell.type_ == Fill::MATERIAL) {
377✔
1187
    vector<int> instances(cell.n_instances());
351✔
1188
    std::iota(instances.begin(), instances.end(), 0);
351✔
1189
    apply_external_source_to_cell_instances(
351✔
1190
      i_cell, src_idx, target_material_id, instances);
1191
  } else if (target_material_id == C_NONE) {
377!
1192
    std::unordered_map<int32_t, vector<int32_t>> cell_instance_list =
1193
      cell.get_contained_cells(0, nullptr);
×
1194
    for (const auto& pair : cell_instance_list) {
×
1195
      int32_t i_child_cell = pair.first;
×
1196
      apply_external_source_to_cell_instances(
×
1197
        i_child_cell, src_idx, target_material_id, pair.second);
×
1198
    }
1199
  }
×
1200
}
377✔
1201

1202
void FlatSourceDomain::count_external_source_regions()
2,044✔
1203
{
1204
  n_external_source_regions_ = 0;
2,044✔
1205
#pragma omp parallel for reduction(+ : n_external_source_regions_)
945✔
1206
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
1,404,079✔
1207
    if (source_regions_.external_source_present(sr)) {
1,402,980✔
1208
      n_external_source_regions_++;
157,250✔
1209
    }
1210
  }
1211
}
2,044✔
1212

1213
void FlatSourceDomain::convert_external_sources()
339✔
1214
{
1215
  // Loop over external sources
1216
  for (int es = 0; es < model::external_sources.size(); es++) {
678✔
1217

1218
    // Extract source information
1219
    Source* s = model::external_sources[es].get();
339✔
1220
    IndependentSource* is = dynamic_cast<IndependentSource*>(s);
339!
1221
    Discrete* energy = dynamic_cast<Discrete*>(is->energy());
339!
1222
    const std::unordered_set<int32_t>& domain_ids = is->domain_ids();
339✔
1223
    double strength_factor = is->strength();
339✔
1224

1225
    // If there is no domain constraint specified, then this must be a point
1226
    // source. In this case, we need to find the source region that contains the
1227
    // point source and apply or relate it to the external source.
1228
    if (is->domain_ids().size() == 0) {
339✔
1229

1230
      // Extract the point source coordinate and find the base source region at
1231
      // that point
1232
      auto sp = dynamic_cast<SpatialPoint*>(is->space());
14!
1233
      GeometryState gs;
14✔
1234
      gs.r() = sp->r();
14✔
1235
      gs.r_last() = sp->r();
14✔
1236
      gs.u() = {1.0, 0.0, 0.0};
14✔
1237
      bool found = exhaustive_find_cell(gs);
14✔
1238
      if (!found) {
14!
1239
        fatal_error(fmt::format("Could not find cell containing external "
×
1240
                                "point source at {}",
1241
          sp->r()));
×
1242
      }
1243
      SourceRegionKey key = lookup_source_region_key(gs);
14✔
1244

1245
      // With the source region and mesh bin known, we can use the
1246
      // accompanying SourceRegionKey as a key into a map that stores the
1247
      // corresponding external source index for the point source. Notably, we
1248
      // do not actually apply the external source to any source regions here,
1249
      // as if mesh subdivision is enabled, they haven't actually been
1250
      // discovered & initilized yet. When discovered, they will read from the
1251
      // external_source_map to determine if there are any external source
1252
      // terms that should be applied.
1253
      external_point_source_map_[key].push_back(es);
14✔
1254

1255
    } else {
14✔
1256
      // If not a point source, then use the volumetric domain constraints to
1257
      // determine which source regions to apply the external source to.
1258
      if (is->domain_type() == Source::DomainType::MATERIAL) {
325✔
1259
        for (int32_t material_id : domain_ids) {
26✔
1260
          for (int i_cell = 0; i_cell < model::cells.size(); i_cell++) {
78✔
1261
            apply_external_source_to_cell_and_children(i_cell, es, material_id);
65✔
1262
          }
1263
        }
1264
      } else if (is->domain_type() == Source::DomainType::CELL) {
312✔
1265
        for (int32_t cell_id : domain_ids) {
26✔
1266
          int32_t i_cell = model::cell_map[cell_id];
13✔
1267
          apply_external_source_to_cell_and_children(i_cell, es, C_NONE);
13✔
1268
        }
1269
      } else if (is->domain_type() == Source::DomainType::UNIVERSE) {
299!
1270
        for (int32_t universe_id : domain_ids) {
598✔
1271
          int32_t i_universe = model::universe_map[universe_id];
299✔
1272
          Universe& universe = *model::universes[i_universe];
299✔
1273
          for (int32_t i_cell : universe.cells_) {
598✔
1274
            apply_external_source_to_cell_and_children(i_cell, es, C_NONE);
299✔
1275
          }
1276
        }
1277
      }
1278
    }
1279
  } // End loop over external sources
1280
}
339✔
1281

1282
void FlatSourceDomain::flux_swap()
104,172✔
1283
{
1284
  source_regions_.flux_swap();
104,172✔
1285
}
104,172✔
1286

1287
void FlatSourceDomain::flatten_xs()
703✔
1288
{
1289
  // Temperature and angle indices, if using multiple temperature
1290
  // data sets and/or anisotropic data sets.
1291
  // TODO: Currently assumes we are only using single temp/single angle data.
1292
  const int t = 0;
703✔
1293
  const int a = 0;
703✔
1294

1295
  n_materials_ = data::mg.macro_xs_.size();
703✔
1296
  for (int i = 0; i < n_materials_; i++) {
2,641✔
1297
    auto& m = data::mg.macro_xs_[i];
1,938✔
1298
    for (int g_out = 0; g_out < negroups_; g_out++) {
15,577✔
1299
      if (m.exists_in_model) {
13,639✔
1300
        double sigma_t =
1301
          m.get_xs(MgxsType::TOTAL, g_out, NULL, NULL, NULL, t, a);
13,587✔
1302
        sigma_t_.push_back(sigma_t);
13,587✔
1303

1304
        if (sigma_t < MINIMUM_MACRO_XS) {
13,587✔
1305
          Material* mat = model::materials[i].get();
13✔
1306
          warning(fmt::format(
13✔
1307
            "Material \"{}\" (id: {}) has a group {} total cross section "
1308
            "({:.3e}) below the minimum threshold "
1309
            "({:.3e}). Material will be treated as pure void.",
1310
            mat->name(), mat->id(), g_out, sigma_t, MINIMUM_MACRO_XS));
26✔
1311
        }
1312

1313
        double nu_sigma_f =
1314
          m.get_xs(MgxsType::NU_FISSION, g_out, NULL, NULL, NULL, t, a);
13,587✔
1315
        nu_sigma_f_.push_back(nu_sigma_f);
13,587✔
1316

1317
        double sigma_f =
1318
          m.get_xs(MgxsType::FISSION, g_out, NULL, NULL, NULL, t, a);
13,587✔
1319
        sigma_f_.push_back(sigma_f);
13,587✔
1320

1321
        double chi = m.get_xs(MgxsType::CHI, g_out, &g_out, NULL, NULL, t, a);
13,587✔
1322
        if (!std::isfinite(chi)) {
13,587!
1323
          // MGXS interface may return NaN in some cases, such as when material
1324
          // is fissionable but has very small sigma_f.
UNCOV
1325
          chi = 0.0;
×
1326
        }
1327
        chi_.push_back(chi);
13,587✔
1328

1329
        for (int g_in = 0; g_in < negroups_; g_in++) {
617,012✔
1330
          double sigma_s =
1331
            m.get_xs(MgxsType::NU_SCATTER, g_in, &g_out, NULL, NULL, t, a);
603,425✔
1332
          sigma_s_.push_back(sigma_s);
603,425✔
1333
          // For transport corrected XS data, diagonal elements may be negative.
1334
          // In this case, set a flag to enable transport stabilization for the
1335
          // simulation.
1336
          if (g_out == g_in && sigma_s < 0.0)
603,425✔
1337
            is_transport_stabilization_needed_ = true;
2,093✔
1338
        }
1339
        // Prompt cross-sections for kinetic simulations
1340
        if (settings::kinetic_simulation) {
13,587✔
1341
          double chi_p =
1342
            m.get_xs(MgxsType::CHI_PROMPT, g_out, &g_out, NULL, NULL, t, a);
6,838✔
1343
          if (!std::isfinite(chi_p)) {
6,838!
1344
            // MGXS interface may return NaN in some cases, such as when
1345
            // material is fissionable but has very small sigma_f.
NEW
1346
            chi_p = 0.0;
×
1347
          }
1348
          chi_p_.push_back(chi_p);
6,838✔
1349

1350
          double inverse_vbar =
1351
            m.get_xs(MgxsType::INVERSE_VELOCITY, g_out, NULL, NULL, NULL, t, a);
6,838✔
1352
          inverse_vbar_.push_back(inverse_vbar);
6,838✔
1353

1354
          double nu_p_Sigma_f = m.get_xs(
6,838✔
1355
            MgxsType::PROMPT_NU_FISSION, g_out, NULL, NULL, NULL, t, a);
6,838✔
1356
          nu_p_sigma_f_.push_back(nu_p_Sigma_f);
6,838✔
1357
        }
1358
      } else {
1359
        sigma_t_.push_back(0);
52✔
1360
        nu_sigma_f_.push_back(0);
52✔
1361
        sigma_f_.push_back(0);
52✔
1362
        chi_.push_back(0);
52✔
1363
        for (int g_in = 0; g_in < negroups_; g_in++) {
104✔
1364
          sigma_s_.push_back(0);
52✔
1365
        }
1366
        if (settings::kinetic_simulation) {
52!
NEW
1367
          chi_p_.push_back(0);
×
NEW
1368
          inverse_vbar_.push_back(0);
×
NEW
1369
          nu_p_sigma_f_.push_back(0);
×
1370
        }
1371
      }
1372
    }
1373
    // Delayed cross sections for time-dependent simulations
1374
    if (settings::kinetic_simulation) {
1,938✔
1375
      for (int dg = 0; dg < ndgroups_; dg++) {
3,354✔
1376
        if (m.exists_in_model) {
2,912!
1377
          double lambda =
1378
            m.get_xs(MgxsType::DECAY_RATE, 0, NULL, NULL, &dg, t, a);
2,912✔
1379
          lambda_.push_back(lambda);
2,912✔
1380
          for (int g_out = 0; g_out < negroups_; g_out++) {
45,760✔
1381
            double nu_d_Sigma_f = m.get_xs(
42,848✔
1382
              MgxsType::DELAYED_NU_FISSION, g_out, NULL, NULL, &dg, t, a);
42,848✔
1383
            nu_d_sigma_f_.push_back(nu_d_Sigma_f);
42,848✔
1384
            double chi_d =
1385
              m.get_xs(MgxsType::CHI_DELAYED, g_out, &g_out, NULL, &dg, t, a);
42,848✔
1386
            if (!std::isfinite(chi_d)) {
42,848!
1387
              // MGXS interface may return NaN in some cases, such as when
1388
              // material is fissionable but has very small sigma_f.
NEW
1389
              chi_d = 0.0;
×
1390
            }
1391
            chi_d_.push_back(chi_d);
42,848✔
1392
          }
1393
        } else {
NEW
1394
          lambda_.push_back(0);
×
NEW
1395
          for (int g_out = 0; g_out < negroups_; g_out++) {
×
NEW
1396
            nu_d_sigma_f_.push_back(0);
×
NEW
1397
            chi_d_.push_back(0);
×
1398
          }
1399
        }
1400
      }
1401
    }
1402
  }
1403
}
703✔
1404

1405
void FlatSourceDomain::set_adjoint_sources()
53✔
1406
{
1407
  // Set the adjoint external source to 1/forward_flux. If the forward flux is
1408
  // negative, zero, or extremely close to zero, set the adjoint source to zero,
1409
  // as this is likely a very small source region that we don't need to bother
1410
  // trying to vector particles towards. In the case of flux "being extremely
1411
  // close to zero", we define this as being a fixed fraction of the maximum
1412
  // forward flux, below which we assume the flux would be physically
1413
  // undetectable.
1414

1415
  // First, find the maximum forward flux value
1416
  double max_flux = 0.0;
53✔
1417
#pragma omp parallel for reduction(max : max_flux)
25✔
1418
  for (int64_t se = 0; se < n_source_elements(); se++) {
155,548✔
1419
    double flux = source_regions_.scalar_flux_final(se);
155,520✔
1420
    if (flux > max_flux) {
155,520✔
1421
      max_flux = flux;
25✔
1422
    }
1423
  }
1424

1425
  // Then, compute the adjoint source for each source region
1426
#pragma omp parallel for
25✔
1427
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
155,548✔
1428
    for (int g = 0; g < negroups_; g++) {
311,040✔
1429
      double flux = source_regions_.scalar_flux_final(sr, g);
155,520✔
1430
      if (flux <= ZERO_FLUX_CUTOFF * max_flux) {
155,520✔
1431
        source_regions_.external_source(sr, g) = 0.0;
325✔
1432
      } else {
1433
        source_regions_.external_source(sr, g) = 1.0 / flux;
155,195✔
1434
      }
1435
      if (flux > 0.0) {
155,520✔
1436
        source_regions_.external_source_present(sr) = 1;
155,195✔
1437
      }
1438
      source_regions_.scalar_flux_final(sr, g) = 0.0;
155,520✔
1439
    }
1440
  }
1441

1442
  // "Small" source regions in OpenMC are defined as those that are hit by
1443
  // MIN_HITS_PER_BATCH rays or fewer each batch. These regions typically have
1444
  // very small volumes combined with a low aspect ratio, and are often
1445
  // generated when applying a source region mesh that clips the edge of a
1446
  // curved surface. As perhaps only a few rays will visit these regions over
1447
  // the entire forward simulation, the forward flux estimates are extremely
1448
  // noisy and unreliable. In some cases, the noise may make the forward fluxes
1449
  // extremely low, leading to unphysically large adjoint source terms,
1450
  // resulting in weight windows that aggressively try to drive particles
1451
  // towards these regions. To fix this, we simply filter out any "small" source
1452
  // regions from consideration. If a source region is "small", we
1453
  // set its adjoint source to zero. This adds negligible bias to the adjoint
1454
  // flux solution, as the true total adjoint source contribution from small
1455
  // regions is likely to be negligible.
1456
#pragma omp parallel for
25✔
1457
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
155,548✔
1458
    if (source_regions_.is_small(sr)) {
155,520!
1459
      for (int g = 0; g < negroups_; g++) {
×
1460
        source_regions_.external_source(sr, g) = 0.0;
1461
      }
1462
      source_regions_.external_source_present(sr) = 0;
1463
    }
1464
  }
1465
  // Divide the fixed source term by sigma t (to save time when applying each
1466
  // iteration)
1467
#pragma omp parallel for
25✔
1468
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
155,548✔
1469
    int material = source_regions_.material(sr);
155,520✔
1470
    double density_mult = source_regions_.density_mult(sr);
155,520✔
1471
    if (material == MATERIAL_VOID) {
155,520!
1472
      continue;
1473
    }
1474
    for (int g = 0; g < negroups_; g++) {
311,040✔
1475
      double sigma_t = sigma_t_[material * negroups_ + g] * density_mult;
155,520✔
1476
      source_regions_.external_source(sr, g) /= sigma_t;
155,520✔
1477
    }
1478
  }
1479
}
53✔
1480

1481
void FlatSourceDomain::transpose_scattering_matrix()
66✔
1482
{
1483
  // Transpose the inner two dimensions for each material
1484
  for (int m = 0; m < n_materials_; ++m) {
249✔
1485
    int material_offset = m * negroups_ * negroups_;
183✔
1486
    for (int i = 0; i < negroups_; ++i) {
523✔
1487
      for (int j = i + 1; j < negroups_; ++j) {
887✔
1488
        // Calculate indices of the elements to swap
1489
        int idx1 = material_offset + i * negroups_ + j;
547✔
1490
        int idx2 = material_offset + j * negroups_ + i;
547✔
1491

1492
        // Swap the elements to transpose the matrix
1493
        std::swap(sigma_s_[idx1], sigma_s_[idx2]);
547✔
1494
      }
1495
    }
1496
  }
1497
}
66✔
1498

1499
void FlatSourceDomain::serialize_final_fluxes(vector<double>& flux)
×
1500
{
1501
  // Ensure array is correct size
1502
  flux.resize(n_source_regions() * negroups_);
×
1503
// Serialize the final fluxes for output
1504
#pragma omp parallel for
1505
  for (int64_t se = 0; se < n_source_elements(); se++) {
×
1506
    flux[se] = source_regions_.scalar_flux_final(se);
1507
  }
1508
}
×
1509

NEW
1510
void FlatSourceDomain::serialize_final_sources(vector<double>& source)
×
1511
{
1512
  // Ensure array is correct size
NEW
1513
  source.resize(n_source_regions() * negroups_);
×
1514
  // Serialize the final sources for output
1515
#pragma omp parallel for
1516
  for (int64_t se = 0; se < n_source_elements(); se++) {
×
1517
    source[se] = source_regions_.source_final(se);
1518
  }
NEW
1519
}
×
1520

1521
void FlatSourceDomain::apply_mesh_to_cell_instances(int32_t i_cell,
1,146✔
1522
  int32_t mesh_idx, int target_material_id, const vector<int32_t>& instances,
1523
  bool is_target_void)
1524
{
1525
  Cell& cell = *model::cells[i_cell];
1,146✔
1526
  if (cell.type_ != Fill::MATERIAL)
1,146!
1527
    return;
×
1528
  for (int32_t j : instances) {
149,608✔
1529
    int cell_material_idx = cell.material(j);
148,462✔
1530
    int cell_material_id = (cell_material_idx == C_NONE)
1531
                             ? C_NONE
148,462✔
1532
                             : model::materials[cell_material_idx]->id();
148,461✔
1533

1534
    if ((target_material_id == C_NONE && !is_target_void) ||
148,462!
1535
        cell_material_id == target_material_id) {
1536
      int64_t sr = source_region_offsets_[i_cell] + j;
122,462✔
1537
      // Check if the key is already present in the mesh_map_
1538
      if (mesh_map_.find(sr) != mesh_map_.end()) {
122,462!
1539
        fatal_error(fmt::format("Source region {} already has mesh idx {} "
×
1540
                                "applied, but trying to apply mesh idx {}",
1541
          sr, mesh_map_[sr], mesh_idx));
×
1542
      }
1543
      // If the SR has not already been assigned, then we can write to it
1544
      mesh_map_[sr] = mesh_idx;
122,462✔
1545
    }
1546
  }
1547
}
1548

1549
void FlatSourceDomain::apply_mesh_to_cell_and_children(int32_t i_cell,
885✔
1550
  int32_t mesh_idx, int32_t target_material_id, bool is_target_void)
1551
{
1552
  Cell& cell = *model::cells[i_cell];
885✔
1553

1554
  if (cell.type_ == Fill::MATERIAL) {
885✔
1555
    vector<int> instances(cell.n_instances());
754✔
1556
    std::iota(instances.begin(), instances.end(), 0);
754✔
1557
    apply_mesh_to_cell_instances(
754✔
1558
      i_cell, mesh_idx, target_material_id, instances, is_target_void);
1559
  } else if (target_material_id == C_NONE && !is_target_void) {
885!
1560
    for (int j = 0; j < cell.n_instances(); j++) {
158✔
1561
      std::unordered_map<int32_t, vector<int32_t>> cell_instance_list =
1562
        cell.get_contained_cells(j, nullptr);
79✔
1563
      for (const auto& pair : cell_instance_list) {
471✔
1564
        int32_t i_child_cell = pair.first;
392✔
1565
        apply_mesh_to_cell_instances(i_child_cell, mesh_idx, target_material_id,
392✔
1566
          pair.second, is_target_void);
392✔
1567
      }
1568
    }
79✔
1569
  }
1570
}
885✔
1571

1572
void FlatSourceDomain::apply_meshes()
703✔
1573
{
1574
  // Skip if there are no mappings between mesh IDs and domains
1575
  if (mesh_domain_map_.empty())
703✔
1576
    return;
390✔
1577

1578
  // Loop over meshes
1579
  for (int mesh_idx = 0; mesh_idx < model::meshes.size(); mesh_idx++) {
717✔
1580
    Mesh* mesh = model::meshes[mesh_idx].get();
404✔
1581
    int mesh_id = mesh->id();
404✔
1582

1583
    // Skip if mesh id is not present in the map
1584
    if (mesh_domain_map_.find(mesh_id) == mesh_domain_map_.end())
404✔
1585
      continue;
39✔
1586

1587
    // Loop over domains associated with the mesh
1588
    for (auto& domain : mesh_domain_map_[mesh_id]) {
730✔
1589
      Source::DomainType domain_type = domain.first;
365✔
1590
      int domain_id = domain.second;
365✔
1591

1592
      if (domain_type == Source::DomainType::MATERIAL) {
365✔
1593
        for (int i_cell = 0; i_cell < model::cells.size(); i_cell++) {
156✔
1594
          if (domain_id == C_NONE) {
130!
1595
            apply_mesh_to_cell_and_children(i_cell, mesh_idx, domain_id, true);
×
1596
          } else {
1597
            apply_mesh_to_cell_and_children(i_cell, mesh_idx, domain_id, false);
130✔
1598
          }
1599
        }
1600
      } else if (domain_type == Source::DomainType::CELL) {
339✔
1601
        int32_t i_cell = model::cell_map[domain_id];
26✔
1602
        apply_mesh_to_cell_and_children(i_cell, mesh_idx, C_NONE, false);
26✔
1603
      } else if (domain_type == Source::DomainType::UNIVERSE) {
313!
1604
        int32_t i_universe = model::universe_map[domain_id];
313✔
1605
        Universe& universe = *model::universes[i_universe];
313✔
1606
        for (int32_t i_cell : universe.cells_) {
1,042✔
1607
          apply_mesh_to_cell_and_children(i_cell, mesh_idx, C_NONE, false);
729✔
1608
        }
1609
      }
1610
    }
1611
  }
1612
}
1613

1614
SourceRegionHandle FlatSourceDomain::get_subdivided_source_region_handle(
2,147,483,647✔
1615
  SourceRegionKey sr_key, Position r, Direction u)
1616
{
1617
  // Case 1: Check if the source region key is already present in the permanent
1618
  // map. This is the most common condition, as any source region visited in a
1619
  // previous power iteration will already be present in the permanent map. If
1620
  // the source region key is found, we translate the key into a specific 1D
1621
  // source region index and return a handle its position in the
1622
  // source_regions_ vector.
1623
  auto it = source_region_map_.find(sr_key);
2,147,483,647✔
1624
  if (it != source_region_map_.end()) {
2,147,483,647✔
1625
    int64_t sr = it->second;
2,147,483,647✔
1626
    return source_regions_.get_source_region_handle(sr);
2,147,483,647✔
1627
  }
1628

1629
  // Case 2: Check if the source region key is present in the temporary (thread
1630
  // safe) map. This is a common occurrence in the first power iteration when
1631
  // the source region has already been visited already by some other ray. We
1632
  // begin by locking the temporary map before any operations are performed. The
1633
  // lock is not global over the full data structure -- it will be dependent on
1634
  // which key is used.
1635
  discovered_source_regions_.lock(sr_key);
42,399,485✔
1636

1637
  // If the key is found in the temporary map, then we return a handle to the
1638
  // source region that is stored in the temporary map.
1639
  if (discovered_source_regions_.contains(sr_key)) {
42,399,485✔
1640
    SourceRegionHandle handle {discovered_source_regions_[sr_key]};
40,630,518✔
1641
    discovered_source_regions_.unlock(sr_key);
40,630,518✔
1642
    return handle;
40,630,518✔
1643
  }
1644

1645
  // Case 3: The source region key is not present anywhere, but it is only due
1646
  // to floating point artifacts. These artifacts occur when the overlaid mesh
1647
  // overlaps with actual geometry surfaces. In these cases, roundoff error may
1648
  // result in the ray tracer detecting an additional (very short) segment
1649
  // though a mesh bin that is actually past the physical source region
1650
  // boundary. This is a result of the the multi-level ray tracing treatment in
1651
  // OpenMC, which depending on the number of universes in the hierarchy etc can
1652
  // result in the wrong surface being selected as the nearest. This can happen
1653
  // in a lattice when there are two directions that both are very close in
1654
  // distance, within the tolerance of FP_REL_PRECISION, and the are thus
1655
  // treated as being equivalent so alternative logic is used. However, when we
1656
  // go and ray trace on this with the mesh tracer we may go past the surface
1657
  // bounding the current source region.
1658
  //
1659
  // To filter out this case, before we create the new source region, we double
1660
  // check that the actual starting point of this segment (r) is still in the
1661
  // same geometry source region that we started in. If an artifact is detected,
1662
  // we discard the segment (and attenuation through it) as it is not really a
1663
  // valid source region and will have only an infinitessimally small cell
1664
  // combined with the mesh bin. Thankfully, this is a fairly rare condition,
1665
  // and only triggers for very short ray lengths. It can be fixed by decreasing
1666
  // the value of FP_REL_PRECISION in constants.h, but this may have unknown
1667
  // consequences for the general ray tracer, so for now we do the below sanity
1668
  // checks before generating phantom source regions. A significant extra cost
1669
  // is incurred in instantiating the GeometryState object and doing a cell
1670
  // lookup, but again, this is going to be an extremely rare thing to check
1671
  // after the first power iteration has completed.
1672

1673
  // Sanity check on source region id
1674
  GeometryState gs;
1,768,967✔
1675
  gs.r() = r + TINY_BIT * u;
1,768,967✔
1676
  gs.u() = {1.0, 0.0, 0.0};
1,768,967✔
1677
  exhaustive_find_cell(gs);
1,768,967✔
1678
  int64_t sr_found = lookup_base_source_region_idx(gs);
1,768,967✔
1679
  if (sr_found != sr_key.base_source_region_id) {
1,768,967✔
1680
    discovered_source_regions_.unlock(sr_key);
624✔
1681
    SourceRegionHandle handle;
624✔
1682
    handle.is_numerical_fp_artifact_ = true;
624✔
1683
    return handle;
624✔
1684
  }
1685

1686
  // Sanity check on mesh bin
1687
  int mesh_idx = lookup_mesh_idx(sr_key.base_source_region_id);
1,768,343✔
1688
  if (mesh_idx == C_NONE) {
1,768,343✔
1689
    if (sr_key.mesh_bin != 0) {
284,800!
1690
      discovered_source_regions_.unlock(sr_key);
×
1691
      SourceRegionHandle handle;
×
1692
      handle.is_numerical_fp_artifact_ = true;
×
1693
      return handle;
×
1694
    }
1695
  } else {
1696
    Mesh* mesh = model::meshes[mesh_idx].get();
1,483,543✔
1697
    int bin_found = mesh->get_bin(r + TINY_BIT * u);
1,483,543✔
1698
    if (bin_found != sr_key.mesh_bin) {
1,483,543!
1699
      discovered_source_regions_.unlock(sr_key);
×
1700
      SourceRegionHandle handle;
×
1701
      handle.is_numerical_fp_artifact_ = true;
×
1702
      return handle;
×
1703
    }
1704
  }
1705

1706
  // Case 4: The source region key is valid, but is not present anywhere. This
1707
  // condition only occurs the first time the source region is discovered
1708
  // (typically in the first power iteration). In this case, we need to handle
1709
  // creation of the new source region and its storage into the parallel map.
1710
  // Additionally, we need to determine the source region's material, initialize
1711
  // the starting scalar flux guess, and apply any known external sources.
1712

1713
  // Call the basic constructor for the source region and store in the parallel
1714
  // map.
1715
  bool is_linear = RandomRay::source_shape_ != RandomRaySourceShape::FLAT;
1,768,343✔
1716
  SourceRegion* sr_ptr = discovered_source_regions_.emplace(
1,768,343✔
1717
    sr_key, {negroups_, ndgroups_, is_linear});
1718
  SourceRegionHandle handle {*sr_ptr};
1,768,343✔
1719

1720
  // Determine the material
1721
  int gs_i_cell = gs.lowest_coord().cell();
1,768,343✔
1722
  Cell& cell = *model::cells[gs_i_cell];
1,768,343✔
1723
  int material = cell.material(gs.cell_instance());
1,768,343✔
1724

1725
  // If material total XS is extremely low, just set it to void to avoid
1726
  // problems with 1/Sigma_t
1727
  for (int g = 0; g < negroups_; g++) {
4,140,885✔
1728
    double sigma_t = sigma_t_[material * negroups_ + g];
2,388,670✔
1729
    if (sigma_t < MINIMUM_MACRO_XS) {
2,388,670✔
1730
      material = MATERIAL_VOID;
16,128✔
1731
      break;
16,128✔
1732
    }
1733
  }
1734

1735
  if (settings::kinetic_simulation && material == MATERIAL_VOID) {
1,768,343!
NEW
1736
    fatal_error("Explicit void treatment for kinetic simulations "
×
1737
                " is not currently supported.");
1738
  }
1739

1740
  handle.material() = material;
1,768,343✔
1741

1742
  handle.density_mult() = cell.density_mult(gs.cell_instance());
1,768,343✔
1743

1744
  // Store the mesh index (if any) assigned to this source region
1745
  handle.mesh() = mesh_idx;
1,768,343✔
1746

1747
  if (settings::run_mode == RunMode::FIXED_SOURCE) {
1,768,343✔
1748
    // Determine if there are any volumetric sources, and apply them.
1749
    // Volumetric sources are specifc only to the base SR idx.
1750
    auto it_vol =
1751
      external_volumetric_source_map_.find(sr_key.base_source_region_id);
1,680,575✔
1752
    if (it_vol != external_volumetric_source_map_.end()) {
1,680,575✔
1753
      const vector<int>& vol_sources = it_vol->second;
3,328✔
1754
      for (int src_idx : vol_sources) {
6,656✔
1755
        apply_external_source_to_source_region(src_idx, handle);
3,328✔
1756
      }
1757
    }
1758

1759
    // Determine if there are any point sources, and apply them.
1760
    // Point sources are specific to the source region key.
1761
    auto it_point = external_point_source_map_.find(sr_key);
1,680,575✔
1762
    if (it_point != external_point_source_map_.end()) {
1,680,575✔
1763
      const vector<int>& point_sources = it_point->second;
9✔
1764
      for (int src_idx : point_sources) {
18✔
1765
        apply_external_source_to_source_region(src_idx, handle);
9✔
1766
      }
1767
    }
1768

1769
    // Divide external source term by sigma_t
1770
    if (material != C_NONE) {
1,680,575✔
1771
      for (int g = 0; g < negroups_; g++) {
3,362,325✔
1772
        double sigma_t =
1773
          sigma_t_[material * negroups_ + g] * handle.density_mult();
1,697,878✔
1774
        handle.external_source(g) /= sigma_t;
1,697,878✔
1775
      }
1776
    }
1777
  }
1778

1779
  // Compute the combined source term
1780
  update_single_neutron_source(handle);
1,768,343✔
1781
  if (settings::kinetic_simulation && !simulation::is_initial_condition) {
1,768,343!
NEW
1782
    if (RandomRay::time_method_ == RandomRayTimeMethod::ISOTROPIC)
×
NEW
1783
      compute_single_phi_prime(handle);
×
NEW
1784
    else if (RandomRay::time_method_ == RandomRayTimeMethod::PROPAGATION)
×
NEW
1785
      compute_single_T1(handle);
×
1786
  }
1787

1788
  // Unlock the parallel map. Note: we may be tempted to release
1789
  // this lock earlier, and then just use the source region's lock to protect
1790
  // the flux/source initialization stages above. However, the rest of the code
1791
  // only protects updates to the new flux and volume fields, and assumes that
1792
  // the source is constant for the duration of transport. Thus, using just the
1793
  // source region's lock by itself would result in other threads potentially
1794
  // reading from the source before it is computed, as they won't use the lock
1795
  // when only reading from the SR's source. It would be expensive to protect
1796
  // those operations, whereas generating the SR is only done once, so we just
1797
  // hold the map's bucket lock until the source region is fully initialized.
1798
  discovered_source_regions_.unlock(sr_key);
1,768,343✔
1799

1800
  return handle;
1,768,343✔
1801
}
1,768,967✔
1802

1803
void FlatSourceDomain::finalize_discovered_source_regions()
104,172✔
1804
{
1805
  // Extract keys for entries with a valid volume.
1806
  vector<SourceRegionKey> keys;
104,172✔
1807
  for (const auto& pair : discovered_source_regions_) {
1,872,515✔
1808
    if (pair.second.volume_ > 0.0) {
1,768,343✔
1809
      keys.push_back(pair.first);
1,694,551✔
1810
    }
1811
  }
1812

1813
  if (!keys.empty()) {
104,172✔
1814
    // Sort the keys, so as to ensure reproducible ordering given that source
1815
    // regions may have been added to discovered_source_regions_ in an arbitrary
1816
    // order due to shared memory threading.
1817
    std::sort(keys.begin(), keys.end());
689✔
1818

1819
    // Remember the index of the first new source region
1820
    int64_t start_sr_id = source_regions_.n_source_regions();
689✔
1821

1822
    // Append the source regions in the sorted key order.
1823
    for (const auto& key : keys) {
1,695,240✔
1824
      const SourceRegion& sr = discovered_source_regions_[key];
1,694,551✔
1825
      source_region_map_[key] = source_regions_.n_source_regions();
1,694,551✔
1826
      source_regions_.push_back(sr);
1,694,551✔
1827
    }
1828

1829
    // Map all new source regions to tallies
1830
    convert_source_regions_to_tallies(start_sr_id);
689✔
1831
  }
1832

1833
  discovered_source_regions_.clear();
104,172✔
1834
}
104,172✔
1835

1836
// This is the "diagonal stabilization" technique developed by Gunow et al. in:
1837
//
1838
// Geoffrey Gunow, Benoit Forget, Kord Smith, Stabilization of multi-group
1839
// neutron transport with transport-corrected cross-sections, Annals of Nuclear
1840
// Energy, Volume 126, 2019, Pages 211-219, ISSN 0306-4549,
1841
// https://doi.org/10.1016/j.anucene.2018.10.036.
1842
void FlatSourceDomain::apply_transport_stabilization()
104,172✔
1843
{
1844
  // Don't do anything if all in-group scattering
1845
  // cross sections are positive
1846
  if (!is_transport_stabilization_needed_) {
104,172✔
1847
    return;
101,772✔
1848
  }
1849

1850
  // Apply the stabilization factor to all source elements
1851
#pragma omp parallel for
900✔
1852
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
148,300✔
1853
    int material = source_regions_.material(sr);
146,800✔
1854
    double density_mult = source_regions_.density_mult(sr);
146,800✔
1855
    if (material == MATERIAL_VOID) {
146,800!
1856
      continue;
1857
    }
1858
    for (int g = 0; g < negroups_; g++) {
10,422,800✔
1859
      // Only apply stabilization if the diagonal (in-group) scattering XS is
1860
      // negative
1861
      double sigma_s =
1862
        sigma_s_[material * negroups_ * negroups_ + g * negroups_ + g] *
10,276,000✔
1863
        density_mult;
10,276,000✔
1864
      if (sigma_s < 0.0) {
10,276,000✔
1865
        double sigma_t = sigma_t_[material * negroups_ + g] * density_mult;
3,550,000✔
1866
        double phi_new = source_regions_.scalar_flux_new(sr, g);
3,550,000✔
1867
        double phi_old = source_regions_.scalar_flux_old(sr, g);
3,550,000✔
1868

1869
        // Equation 18 in the above Gunow et al. 2019 paper. For a default
1870
        // rho of 1.0, this ensures there are no negative diagonal elements
1871
        // in the iteration matrix. A lesser rho could be used (or exposed
1872
        // as a user input parameter) to reduce the negative impact on
1873
        // convergence rate though would need to be experimentally tested to see
1874
        // if it doesn't become unstable. rho = 1.0 is good as it gives the
1875
        // highest assurance of stability, and the impacts on convergence rate
1876
        // are pretty mild.
1877
        double D = diagonal_stabilization_rho_ * sigma_s / sigma_t;
3,550,000✔
1878

1879
        // Equation 16 in the above Gunow et al. 2019 paper
1880
        source_regions_.scalar_flux_new(sr, g) =
3,550,000✔
1881
          (phi_new - D * phi_old) / (1.0 - D);
3,550,000✔
1882
      }
1883
    }
1884
  }
1885
}
1886

1887
// Determines the base source region index (i.e., a material filled cell
1888
// instance) that corresponds to a particular location in the geometry. Requires
1889
// that the "gs" object passed in has already been initialized and has called
1890
// find_cell etc.
1891
int64_t FlatSourceDomain::lookup_base_source_region_idx(
2,147,483,647✔
1892
  const GeometryState& gs) const
1893
{
1894
  int i_cell = gs.lowest_coord().cell();
2,147,483,647✔
1895
  int64_t sr = source_region_offsets_[i_cell] + gs.cell_instance();
2,147,483,647✔
1896
  return sr;
2,147,483,647✔
1897
}
1898

1899
// Determines the index of the mesh (if any) that has been applied
1900
// to a particular base source region index.
1901
int FlatSourceDomain::lookup_mesh_idx(int64_t sr) const
2,147,483,647✔
1902
{
1903
  int mesh_idx = C_NONE;
2,147,483,647✔
1904
  auto mesh_it = mesh_map_.find(sr);
2,147,483,647✔
1905
  if (mesh_it != mesh_map_.end()) {
2,147,483,647✔
1906
    mesh_idx = mesh_it->second;
2,147,483,647✔
1907
  }
1908
  return mesh_idx;
2,147,483,647✔
1909
}
1910

1911
// Determines the source region key that corresponds to a particular location in
1912
// the geometry. This takes into account both the base source region index as
1913
// well as the mesh bin if a mesh is applied to this source region for
1914
// subdivision.
1915
SourceRegionKey FlatSourceDomain::lookup_source_region_key(
10,925,534✔
1916
  const GeometryState& gs) const
1917
{
1918
  int64_t sr = lookup_base_source_region_idx(gs);
10,925,534✔
1919
  int64_t mesh_bin = lookup_mesh_bin(sr, gs.r());
10,925,534✔
1920
  return SourceRegionKey {sr, mesh_bin};
10,925,534✔
1921
}
1922

1923
// Determines the mesh bin that corresponds to a particular base source region
1924
// index and position.
1925
int64_t FlatSourceDomain::lookup_mesh_bin(int64_t sr, Position r) const
10,925,534✔
1926
{
1927
  int mesh_idx = lookup_mesh_idx(sr);
10,925,534✔
1928
  int mesh_bin = 0;
10,925,534✔
1929
  if (mesh_idx != C_NONE) {
10,925,534✔
1930
    mesh_bin = model::meshes[mesh_idx]->get_bin(r);
5,905,534✔
1931
  }
1932
  return mesh_bin;
10,925,534✔
1933
}
1934

1935
//------------------------------------------------------------------------------
1936
// Methods for kinetic simulations
1937

1938
// Generates new estimate of k_dynamic based on the fraction between this
1939
// timestep's estimate of neutron production and loss. (previous timestep
1940
// fission vs current timestep fission?)
1941
// TODO: implement compute_k_dynamic
1942

1943
// Compute new estimate of scattering + fission (+ precursor decay for
1944
// kinetic simulations) sources in each source region based on the flux
1945
// estimate from the previous iteration.
1946

1947
void FlatSourceDomain::compute_single_phi_prime(SourceRegionHandle& srh)
86,736,000✔
1948
{
1949
  double A0 =
1950
    (bd_coefficients_first_order_.at(RandomRay::bd_order_))[0] / settings::dt;
86,736,000✔
1951
  int material = srh.material();
86,736,000✔
1952
  double density_mult = srh.density_mult();
86,736,000✔
1953
  for (int g = 0; g < negroups_; g++) {
698,505,600✔
1954
    double inverse_vbar = inverse_vbar_[material * negroups_ + g];
611,769,600✔
1955
    // TODO: add support for explicit void
1956
    double sigma_t = sigma_t_[material * negroups_ + g] * density_mult;
611,769,600✔
1957
    ;
1958

1959
    double scalar_flux_time_derivative =
1960
      A0 * srh.scalar_flux_old(g) + srh.scalar_flux_rhs_bd(g);
611,769,600✔
1961
    srh.phi_prime(g) = scalar_flux_time_derivative * inverse_vbar / sigma_t;
611,769,600✔
1962
  }
1963
}
86,736,000✔
1964

1965
// T1 calculation
1966
void FlatSourceDomain::compute_single_T1(SourceRegionHandle& srh)
208,000✔
1967
{
1968
  double A0 =
1969
    (bd_coefficients_first_order_.at(RandomRay::bd_order_))[0] / settings::dt;
208,000✔
1970
  double B0 = (bd_coefficients_second_order_.at(RandomRay::bd_order_))[0] /
208,000✔
1971
              (settings::dt * settings::dt);
208,000✔
1972
  int material = srh.material();
208,000✔
1973
  double density_mult = srh.density_mult();
208,000✔
1974
  for (int g = 0; g < negroups_; g++) {
6,281,600✔
1975
    double inverse_vbar = inverse_vbar_[material * negroups_ + g];
6,073,600✔
1976
    // TODO: add support for explicit void
1977
    double sigma_t = sigma_t_[material * negroups_ + g] * density_mult;
6,073,600✔
1978

1979
    // Multiply out sigma_t to correctly compute the derivative term
1980
    float source_time_derivative =
1981
      A0 * srh.source(g) * sigma_t + srh.source_rhs_bd(g);
6,073,600✔
1982

1983
    double scalar_flux_time_derivative_2 =
1984
      B0 * srh.scalar_flux_old(g) + srh.scalar_flux_rhs_bd_2(g);
6,073,600✔
1985
    scalar_flux_time_derivative_2 *= inverse_vbar;
6,073,600✔
1986

1987
    // Divide by sigma_t to save time during transport
1988
    srh.T1(g) =
6,073,600✔
1989
      (source_time_derivative - scalar_flux_time_derivative_2) / sigma_t;
6,073,600✔
1990
  }
1991
}
208,000✔
1992

1993
void FlatSourceDomain::compute_single_delayed_fission_source(
121,721,568✔
1994
  SourceRegionHandle& srh)
1995
{
1996

1997
  // Reset all delayed fission sources to zero (important for void regions)
1998
  for (int dg = 0; dg < ndgroups_; dg++) {
1,094,329,312✔
1999
    srh.delayed_fission_source(dg) = 0.0;
972,607,744✔
2000
  }
2001

2002
  int material = srh.material();
121,721,568✔
2003
  double density_mult = srh.density_mult();
121,721,568✔
2004
  if (material != MATERIAL_VOID) {
121,721,568!
2005
    double inverse_k_eff = 1.0 / k_eff_;
121,721,568✔
2006
    for (int dg = 0; dg < ndgroups_; dg++) {
1,094,329,312✔
2007
      // We cannot have delayed neutrons if there is no delayed data
2008
      double lambda = lambda_[material * ndgroups_ + dg];
972,607,744✔
2009
      if (lambda != 0.0) {
972,607,744✔
2010
        for (int g = 0; g < negroups_; g++) {
2,147,483,647✔
2011
          double scalar_flux = scalar_flux = srh.scalar_flux_new(g);
2,147,483,647✔
2012
          double nu_d_sigma_f = nu_d_sigma_f_[material * negroups_ * ndgroups_ +
2,147,483,647✔
2013
                                              dg * negroups_ + g] *
2,147,483,647✔
2014
                                density_mult;
2,147,483,647✔
2015
          srh.delayed_fission_source(dg) += nu_d_sigma_f * scalar_flux;
2,147,483,647✔
2016
        }
2017
        srh.delayed_fission_source(dg) *= inverse_k_eff;
448,358,272✔
2018
      }
2019
    }
2020
  }
2021
}
121,721,568✔
2022

2023
void FlatSourceDomain::compute_single_precursors(SourceRegionHandle& srh)
121,721,568✔
2024
{
2025
  // Reset all precursors to zero (important for void regions)
2026
  for (int dg = 0; dg < ndgroups_; dg++) {
1,094,329,312✔
2027
    srh.precursors_new(dg) = 0.0;
972,607,744✔
2028
  }
2029

2030
  int material = srh.material();
121,721,568✔
2031
  if (material != MATERIAL_VOID) {
121,721,568!
2032
    for (int dg = 0; dg < ndgroups_; dg++) {
1,094,329,312✔
2033
      double lambda = lambda_[material * ndgroups_ + dg];
972,607,744✔
2034
      if (lambda != 0.0) {
972,607,744✔
2035
        double delayed_fission_source = srh.delayed_fission_source(dg);
448,358,272✔
2036
        if (simulation::is_initial_condition) {
448,358,272✔
2037
          srh.precursors_new(dg) = delayed_fission_source / lambda;
128,102,272✔
2038
        } else {
2039
          double precursor_rhs_bd = srh.precursors_rhs_bd(dg);
320,256,000✔
2040
          srh.precursors_new(dg) = delayed_fission_source - precursor_rhs_bd;
320,256,000✔
2041
          double A0 =
2042
            (bd_coefficients_first_order_.at(RandomRay::bd_order_))[0] /
320,256,000✔
2043
            settings::dt;
320,256,000✔
2044
          srh.precursors_new(dg) /= A0 + lambda;
320,256,000✔
2045
        }
2046
      }
2047
    }
2048
  }
2049
}
121,721,568✔
2050

2051
void FlatSourceDomain::compute_all_precursors()
95,200✔
2052
{
2053
  simulation::time_compute_precursors.start();
95,200✔
2054

2055
#pragma omp parallel for
35,700✔
2056
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
76,135,480✔
2057
    SourceRegionHandle srh = source_regions_.get_source_region_handle(sr);
76,075,980✔
2058
    compute_single_delayed_fission_source(srh);
76,075,980✔
2059
    compute_single_precursors(srh);
76,075,980✔
2060
  }
2061

2062
  simulation::time_compute_precursors.start();
95,200✔
2063
}
95,200✔
2064

NEW
2065
void FlatSourceDomain::serialize_final_precursors(vector<double>& precursors)
×
2066
{
2067
  // Ensure array is correct size
NEW
2068
  precursors.resize(n_source_regions() * ndgroups_);
×
2069
// Serialize the precursors for output
2070
#pragma omp parallel for
2071
  for (int64_t de = 0; de < n_delay_elements(); de++) {
×
2072
    precursors[de] = source_regions_.precursors_final(de);
2073
  }
NEW
2074
}
×
2075

2076
void FlatSourceDomain::precursors_swap()
95,200✔
2077
{
2078
  source_regions_.precursors_swap();
95,200✔
2079
}
95,200✔
2080

2081
void FlatSourceDomain::accumulate_iteration_quantities()
50,726✔
2082
{
2083
  accumulate_iteration_flux();
50,726✔
2084
  if (settings::kinetic_simulation) {
50,726✔
2085
#pragma omp parallel for
17,640✔
2086
    for (int64_t sr = 0; sr < n_source_regions(); sr++) {
38,031,000✔
2087
      for (int g = 0; g < negroups_; g++) {
305,760,000✔
2088
        if (RandomRay::time_method_ == RandomRayTimeMethod::PROPAGATION) {
267,758,400✔
2089
          source_regions_.source_final(sr, g) += source_regions_.source(sr, g);
1,383,200✔
2090
        }
2091
      }
2092
      if (settings::create_delayed_neutrons) {
38,001,600!
2093
        for (int dg = 0; dg < ndgroups_; dg++) {
341,723,200✔
2094
          source_regions_.precursors_final(sr, dg) +=
303,721,600✔
2095
            source_regions_.precursors_new(sr, dg);
303,721,600✔
2096
        }
2097
      }
2098
    }
2099
  }
2100
}
50,726✔
2101

2102
void FlatSourceDomain::normalize_final_quantities()
1,705✔
2103
{
2104
  double normalization_factor =
1,705✔
2105
    1.0 / (settings::n_batches - settings::n_inactive);
1,705✔
2106
  double source_normalization_factor;
2107
  if (!settings::kinetic_simulation ||
1,705!
2108
      settings::kinetic_simulation &&
1,092✔
2109
        simulation::current_timestep == settings::n_timesteps)
1,092✔
2110
    source_normalization_factor =
769✔
2111
      compute_fixed_source_normalization_factor() * normalization_factor;
769✔
2112
  else
2113
    // The source normalization should only be applied to internal quantities at
2114
    // the end of time stepping in preparation for an adjoint solve.
2115
    source_normalization_factor = 1.0 * normalization_factor;
936✔
2116

2117
#pragma omp parallel for
788✔
2118
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
1,403,897✔
2119
    for (int g = 0; g < negroups_; g++) {
4,623,600✔
2120
      source_regions_.scalar_flux_final(sr, g) *= source_normalization_factor;
3,220,620✔
2121
      if (RandomRay::time_method_ == RandomRayTimeMethod::PROPAGATION) {
3,220,620✔
2122
        source_regions_.source_final(sr, g) *= source_normalization_factor;
276,640✔
2123
      }
2124
    }
2125
    if (settings::kinetic_simulation) {
1,402,980✔
2126
      for (int dg = 0; dg < ndgroups_; dg++) {
1,907,360✔
2127
        source_regions_.precursors_final(sr, dg) *= source_normalization_factor;
1,688,960✔
2128
      }
2129
    }
2130
  }
2131
}
1,705✔
2132

2133
void FlatSourceDomain::propagate_final_quantities()
936✔
2134
{
2135
#pragma omp parallel for
432✔
2136
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
187,704✔
2137
    for (int g = 0; g < negroups_; g++) {
1,797,120✔
2138
      source_regions_.scalar_flux_old(sr, g) =
1,609,920✔
2139
        source_regions_.scalar_flux_final(sr, g);
1,609,920✔
2140
    }
2141
    if (settings::create_delayed_neutrons) {
187,200!
2142
      for (int dg = 0; dg < ndgroups_; dg++) {
1,634,880✔
2143
        source_regions_.precursors_old(sr, dg) =
1,447,680✔
2144
          source_regions_.precursors_final(sr, dg);
1,447,680✔
2145
      }
2146
    }
2147
  }
2148
}
936✔
2149

2150
void FlatSourceDomain::store_time_step_quantities(bool increment_not_initialize)
936✔
2151
{
2152
#pragma omp parallel for
432✔
2153
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
187,704✔
2154
    for (int g = 0; g < negroups_; g++) {
1,797,120✔
2155
      int j = 0;
1,609,920✔
2156
      if (RandomRay::time_method_ == RandomRayTimeMethod::PROPAGATION)
1,609,920✔
2157
        j = 1;
237,120✔
2158
      add_value_to_bd_vector(source_regions_.scalar_flux_bd(sr, g),
1,609,920✔
2159
        source_regions_.scalar_flux_final(sr, g), increment_not_initialize,
2160
        RandomRay::bd_order_ + j);
2161
      if (RandomRay::time_method_ == RandomRayTimeMethod::PROPAGATION) {
1,609,920✔
2162
        // Multiply out sigma_t to store the base source
2163
        int material = source_regions_.material(sr);
237,120✔
2164
        double density_mult = source_regions_.density_mult(sr);
237,120✔
2165
        // TODO: add support for explicit void regions
2166
        double sigma_t = sigma_t_[material * negroups_ + g] * density_mult;
237,120✔
2167
        float source = source_regions_.source_final(sr, g) * sigma_t;
237,120✔
2168
        add_value_to_bd_vector(source_regions_.source_bd(sr, g), source,
237,120✔
2169
          increment_not_initialize, RandomRay::bd_order_);
2170
      }
2171
    }
2172
    if (settings::create_delayed_neutrons) {
187,200!
2173
      for (int dg = 0; dg < ndgroups_; dg++) {
1,634,880✔
2174
        add_value_to_bd_vector(source_regions_.precursors_bd(sr, dg),
1,447,680✔
2175
          source_regions_.precursors_final(sr, dg), increment_not_initialize,
2176
          RandomRay::bd_order_);
2177
      }
2178
    }
2179
  }
2180
}
936✔
2181

2182
void FlatSourceDomain::compute_rhs_bd_quantities()
780✔
2183
{
2184
#pragma omp parallel for
360✔
2185
  for (int64_t sr = 0; sr < n_source_regions(); sr++) {
156,420✔
2186
    for (int g = 0; g < negroups_; g++) {
1,497,600✔
2187
      source_regions_.scalar_flux_rhs_bd(sr, g) =
2,683,200✔
2188
        rhs_backwards_difference(source_regions_.scalar_flux_bd(sr, g),
1,341,600✔
2189
          RandomRay::bd_order_, settings::dt);
2190

2191
      if (RandomRay::time_method_ == RandomRayTimeMethod::PROPAGATION) {
1,341,600✔
2192
        source_regions_.source_rhs_bd(sr, g) = rhs_backwards_difference(
197,600✔
2193
          source_regions_.source_bd(sr, g), RandomRay::bd_order_, settings::dt);
2194

2195
        source_regions_.scalar_flux_rhs_bd_2(sr, g) =
197,600✔
2196
          rhs_backwards_difference(source_regions_.scalar_flux_bd(sr, g),
197,600✔
2197
            RandomRay::bd_order_, settings::dt, 2);
2198
      }
2199
    }
2200
    if (settings::create_delayed_neutrons) {
156,000!
2201
      for (int dg = 0; dg < ndgroups_; dg++) {
1,362,400✔
2202
        source_regions_.precursors_rhs_bd(sr, dg) =
1,206,400✔
2203
          rhs_backwards_difference(source_regions_.precursors_bd(sr, dg),
1,206,400✔
2204
            RandomRay::bd_order_, settings::dt);
2205
      }
2206
    }
2207
  }
2208
}
780✔
2209

2210
// Update material density and cross sections
2211
void FlatSourceDomain::update_material_density(int i)
780✔
2212
{
2213
#pragma omp parallel for
360✔
2214
  for (int j = 0; j < model::materials.size(); j++) {
1,610✔
2215
    auto& mat {model::materials[j]};
1,190✔
2216
    if (mat->density_timeseries_.size() != 0) {
1,190✔
2217
      double density_factor = mat->density_timeseries_[i] / mat->density_;
420✔
2218
      mat->density_ = mat->density_timeseries_[i];
420✔
2219
      for (int g_out = 0; g_out < negroups_; g_out++) {
6,720✔
2220
        for (int dg = 0; dg < ndgroups_; dg++) {
46,060✔
2221
          nu_d_sigma_f_[j * negroups_ * ndgroups_ + dg * negroups_ + g_out] *=
39,760✔
2222
            density_factor;
2223
        }
2224
        nu_p_sigma_f_[j * negroups_ + g_out] *= density_factor;
6,300✔
2225
        sigma_t_[j * negroups_ + g_out] *= density_factor;
6,300✔
2226
        nu_sigma_f_[j * negroups_ + g_out] *= density_factor;
6,300✔
2227
        sigma_f_[j * negroups_ + g_out] *= density_factor;
6,300✔
2228
        for (int g_in = 0; g_in < negroups_; g_in++) {
357,000✔
2229
          sigma_s_[j * negroups_ * negroups_ + g_out * negroups_ + g_in] *=
350,700✔
2230
            density_factor;
2231
        }
2232
      }
2233
    }
2234
  }
2235
}
780✔
2236

2237
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc