• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 12605079372

03 Jan 2025 11:05PM UTC coverage: 84.823% (-0.003%) from 84.826%
12605079372

Pull #3129

github

web-flow
Merge d4ae59bfd into 775c41512
Pull Request #3129: Compute material volumes in mesh elements based on raytracing

196 of 222 new or added lines in 4 files covered. (88.29%)

1044 existing lines in 27 files now uncovered.

49961 of 58900 relevant lines covered (84.82%)

34048479.7 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

80.72
/src/random_ray/flat_source_domain.cpp
1
#include "openmc/random_ray/flat_source_domain.h"
2

3
#include "openmc/cell.h"
4
#include "openmc/eigenvalue.h"
5
#include "openmc/geometry.h"
6
#include "openmc/material.h"
7
#include "openmc/message_passing.h"
8
#include "openmc/mgxs_interface.h"
9
#include "openmc/output.h"
10
#include "openmc/plot.h"
11
#include "openmc/random_ray/random_ray.h"
12
#include "openmc/simulation.h"
13
#include "openmc/tallies/filter.h"
14
#include "openmc/tallies/tally.h"
15
#include "openmc/tallies/tally_scoring.h"
16
#include "openmc/timer.h"
17

18
#include <cstdio>
19

20
namespace openmc {
21

22
//==============================================================================
23
// FlatSourceDomain implementation
24
//==============================================================================
25

26
// Static Variable Declarations
27
RandomRayVolumeEstimator FlatSourceDomain::volume_estimator_ {
28
  RandomRayVolumeEstimator::HYBRID};
29
bool FlatSourceDomain::volume_normalized_flux_tallies_ {false};
30
bool FlatSourceDomain::adjoint_ {false};
31

32
FlatSourceDomain::FlatSourceDomain() : negroups_(data::mg.num_energy_groups_)
391✔
33
{
34
  // Count the number of source regions, compute the cell offset
35
  // indices, and store the material type The reason for the offsets is that
36
  // some cell types may not have material fills, and therefore do not
37
  // produce FSRs. Thus, we cannot index into the global arrays directly
38
  for (const auto& c : model::cells) {
3,791✔
39
    if (c->type_ != Fill::MATERIAL) {
3,400✔
40
      source_region_offsets_.push_back(-1);
1,751✔
41
    } else {
42
      source_region_offsets_.push_back(n_source_regions_);
1,649✔
43
      n_source_regions_ += c->n_instances_;
1,649✔
44
      n_source_elements_ += c->n_instances_ * negroups_;
1,649✔
45
    }
46
  }
47

48
  // Initialize cell-wise arrays
49
  lock_.resize(n_source_regions_);
391✔
50
  material_.resize(n_source_regions_);
391✔
51
  position_recorded_.assign(n_source_regions_, 0);
391✔
52
  position_.resize(n_source_regions_);
391✔
53
  volume_.assign(n_source_regions_, 0.0);
391✔
54
  volume_t_.assign(n_source_regions_, 0.0);
391✔
55
  volume_naive_.assign(n_source_regions_, 0.0);
391✔
56

57
  // Initialize element-wise arrays
58
  scalar_flux_new_.assign(n_source_elements_, 0.0);
391✔
59
  scalar_flux_final_.assign(n_source_elements_, 0.0);
391✔
60
  source_.resize(n_source_elements_);
391✔
61

62
  tally_task_.resize(n_source_elements_);
391✔
63
  volume_task_.resize(n_source_regions_);
391✔
64

65
  if (settings::run_mode == RunMode::EIGENVALUE) {
391✔
66
    // If in eigenvalue mode, set starting flux to guess of unity
67
    scalar_flux_old_.assign(n_source_elements_, 1.0);
102✔
68
  } else {
69
    // If in fixed source mode, set starting flux to guess of zero
70
    // and initialize external source arrays
71
    scalar_flux_old_.assign(n_source_elements_, 0.0);
289✔
72
    external_source_.assign(n_source_elements_, 0.0);
289✔
73
    external_source_present_.assign(n_source_regions_, false);
289✔
74
  }
75

76
  // Initialize material array
77
  int64_t source_region_id = 0;
391✔
78
  for (int i = 0; i < model::cells.size(); i++) {
3,791✔
79
    Cell& cell = *model::cells[i];
3,400✔
80
    if (cell.type_ == Fill::MATERIAL) {
3,400✔
81
      for (int j = 0; j < cell.n_instances_; j++) {
483,565✔
82
        material_[source_region_id++] = cell.material(j);
481,916✔
83
      }
84
    }
85
  }
86

87
  // Sanity check
88
  if (source_region_id != n_source_regions_) {
391✔
UNCOV
89
    fatal_error("Unexpected number of source regions");
×
90
  }
91

92
  // Initialize tally volumes
93
  if (volume_normalized_flux_tallies_) {
391✔
94
    tally_volumes_.resize(model::tallies.size());
323✔
95
    for (int i = 0; i < model::tallies.size(); i++) {
1,122✔
96
      //  Get the shape of the 3D result tensor
97
      auto shape = model::tallies[i]->results().shape();
799✔
98

99
      // Create a new 2D tensor with the same size as the first
100
      // two dimensions of the 3D tensor
101
      tally_volumes_[i] =
799✔
102
        xt::xtensor<double, 2>::from_shape({shape[0], shape[1]});
1,598✔
103
    }
104
  }
105

106
  // Compute simulation domain volume based on ray source
107
  auto* is = dynamic_cast<IndependentSource*>(RandomRay::ray_source_.get());
391✔
108
  SpatialDistribution* space_dist = is->space();
391✔
109
  SpatialBox* sb = dynamic_cast<SpatialBox*>(space_dist);
391✔
110
  Position dims = sb->upper_right() - sb->lower_left();
391✔
111
  simulation_volume_ = dims.x * dims.y * dims.z;
391✔
112
}
391✔
113

114
void FlatSourceDomain::batch_reset()
11,390✔
115
{
116
  // Reset scalar fluxes, iteration volume tallies, and region hit flags to
117
  // zero
118
  parallel_fill<double>(scalar_flux_new_, 0.0);
11,390✔
119
  parallel_fill<double>(volume_, 0.0);
11,390✔
120
}
11,390✔
121

122
void FlatSourceDomain::accumulate_iteration_flux()
4,420✔
123
{
124
#pragma omp parallel for
2,340✔
125
  for (int64_t se = 0; se < n_source_elements_; se++) {
3,822,080✔
126
    scalar_flux_final_[se] += scalar_flux_new_[se];
3,820,000✔
127
  }
128
}
4,420✔
129

130
// Compute new estimate of scattering + fission sources in each source region
131
// based on the flux estimate from the previous iteration.
132
void FlatSourceDomain::update_neutron_source(double k_eff)
4,505✔
133
{
134
  simulation::time_update_src.start();
4,505✔
135

136
  double inverse_k_eff = 1.0 / k_eff;
4,505✔
137

138
  // Add scattering source
139
#pragma omp parallel for
2,385✔
140
  for (int sr = 0; sr < n_source_regions_; sr++) {
1,832,040✔
141
    int material = material_[sr];
1,829,920✔
142

143
    for (int g_out = 0; g_out < negroups_; g_out++) {
6,099,200✔
144
      double sigma_t = sigma_t_[material * negroups_ + g_out];
4,269,280✔
145
      double scatter_source = 0.0;
4,269,280✔
146

147
      for (int g_in = 0; g_in < negroups_; g_in++) {
25,614,080✔
148
        double scalar_flux = scalar_flux_old_[sr * negroups_ + g_in];
21,344,800✔
149
        double sigma_s =
150
          sigma_s_[material * negroups_ * negroups_ + g_out * negroups_ + g_in];
21,344,800✔
151
        scatter_source += sigma_s * scalar_flux;
21,344,800✔
152
      }
153

154
      source_[sr * negroups_ + g_out] = scatter_source / sigma_t;
4,269,280✔
155
    }
156
  }
157

158
  // Add fission source
159
#pragma omp parallel for
2,385✔
160
  for (int sr = 0; sr < n_source_regions_; sr++) {
1,832,040✔
161
    int material = material_[sr];
1,829,920✔
162

163
    for (int g_out = 0; g_out < negroups_; g_out++) {
6,099,200✔
164
      double sigma_t = sigma_t_[material * negroups_ + g_out];
4,269,280✔
165
      double fission_source = 0.0;
4,269,280✔
166

167
      for (int g_in = 0; g_in < negroups_; g_in++) {
25,614,080✔
168
        double scalar_flux = scalar_flux_old_[sr * negroups_ + g_in];
21,344,800✔
169
        double nu_sigma_f = nu_sigma_f_[material * negroups_ + g_in];
21,344,800✔
170
        double chi = chi_[material * negroups_ + g_out];
21,344,800✔
171
        fission_source += nu_sigma_f * scalar_flux * chi;
21,344,800✔
172
      }
173
      source_[sr * negroups_ + g_out] +=
4,269,280✔
174
        fission_source * inverse_k_eff / sigma_t;
4,269,280✔
175
    }
176
  }
177

178
  // Add external source if in fixed source mode
179
  if (settings::run_mode == RunMode::FIXED_SOURCE) {
4,505✔
180
#pragma omp parallel for
2,025✔
181
    for (int64_t se = 0; se < n_source_elements_; se++) {
3,820,200✔
182
      source_[se] += external_source_[se];
3,818,400✔
183
    }
184
  }
185

186
  simulation::time_update_src.stop();
4,505✔
187
}
4,505✔
188

189
// Normalizes flux and updates simulation-averaged volume estimate
190
void FlatSourceDomain::normalize_scalar_flux_and_volumes(
4,505✔
191
  double total_active_distance_per_iteration)
192
{
193
  double normalization_factor = 1.0 / total_active_distance_per_iteration;
4,505✔
194
  double volume_normalization_factor =
4,505✔
195
    1.0 / (total_active_distance_per_iteration * simulation::current_batch);
4,505✔
196

197
// Normalize scalar flux to total distance travelled by all rays this iteration
198
#pragma omp parallel for
2,385✔
199
  for (int64_t se = 0; se < scalar_flux_new_.size(); se++) {
4,271,400✔
200
    scalar_flux_new_[se] *= normalization_factor;
4,269,280✔
201
  }
202

203
// Accumulate cell-wise ray length tallies collected this iteration, then
204
// update the simulation-averaged cell-wise volume estimates
205
#pragma omp parallel for
2,385✔
206
  for (int64_t sr = 0; sr < n_source_regions_; sr++) {
1,832,040✔
207
    volume_t_[sr] += volume_[sr];
1,829,920✔
208
    volume_naive_[sr] = volume_[sr] * normalization_factor;
1,829,920✔
209
    volume_[sr] = volume_t_[sr] * volume_normalization_factor;
1,829,920✔
210
  }
211
}
4,505✔
212

213
void FlatSourceDomain::set_flux_to_flux_plus_source(
9,072,186✔
214
  int64_t idx, double volume, int material, int g)
215
{
216
  double sigma_t = sigma_t_[material * negroups_ + g];
9,072,186✔
217
  scalar_flux_new_[idx] /= (sigma_t * volume);
9,072,186✔
218
  scalar_flux_new_[idx] += source_[idx];
9,072,186✔
219
}
9,072,186✔
220

UNCOV
221
void FlatSourceDomain::set_flux_to_old_flux(int64_t idx)
×
222
{
UNCOV
223
  scalar_flux_new_[idx] = scalar_flux_old_[idx];
×
224
}
225

226
void FlatSourceDomain::set_flux_to_source(int64_t idx)
34✔
227
{
228
  scalar_flux_new_[idx] = source_[idx];
34✔
229
}
34✔
230

231
// Combine transport flux contributions and flat source contributions from the
232
// previous iteration to generate this iteration's estimate of scalar flux.
233
int64_t FlatSourceDomain::add_source_to_scalar_flux()
11,390✔
234
{
235
  int64_t n_hits = 0;
11,390✔
236

237
#pragma omp parallel for reduction(+ : n_hits)
6,030✔
238
  for (int sr = 0; sr < n_source_regions_; sr++) {
5,104,240✔
239

240
    double volume_simulation_avg = volume_[sr];
5,098,880✔
241
    double volume_iteration = volume_naive_[sr];
5,098,880✔
242

243
    // Increment the number of hits if cell was hit this iteration
244
    if (volume_iteration) {
5,098,880✔
245
      n_hits++;
5,098,864✔
246
    }
247

248
    // Check if an external source is present in this source region
249
    bool external_source_present =
250
      external_source_present_.size() && external_source_present_[sr];
5,098,880✔
251

252
    // The volume treatment depends on the volume estimator type
253
    // and whether or not an external source is present in the cell.
254
    double volume;
255
    switch (volume_estimator_) {
5,098,880✔
256
    case RandomRayVolumeEstimator::NAIVE:
691,200✔
257
      volume = volume_iteration;
691,200✔
258
      break;
691,200✔
259
    case RandomRayVolumeEstimator::SIMULATION_AVERAGED:
691,200✔
260
      volume = volume_simulation_avg;
691,200✔
261
      break;
691,200✔
262
    case RandomRayVolumeEstimator::HYBRID:
3,716,480✔
263
      if (external_source_present) {
3,716,480✔
264
        volume = volume_iteration;
14,160✔
265
      } else {
266
        volume = volume_simulation_avg;
3,702,320✔
267
      }
268
      break;
3,716,480✔
269
    default:
270
      fatal_error("Invalid volume estimator type");
271
    }
272

273
    int material = material_[sr];
5,098,880✔
274
    for (int g = 0; g < negroups_; g++) {
15,662,080✔
275
      int64_t idx = (sr * negroups_) + g;
10,563,200✔
276

277
      // There are three scenarios we need to consider:
278
      if (volume_iteration > 0.0) {
10,563,200✔
279
        // 1. If the FSR was hit this iteration, then the new flux is equal to
280
        // the flat source from the previous iteration plus the contributions
281
        // from rays passing through the source region (computed during the
282
        // transport sweep)
283
        set_flux_to_flux_plus_source(idx, volume, material, g);
10,563,184✔
284
      } else if (volume_simulation_avg > 0.0) {
16✔
285
        // 2. If the FSR was not hit this iteration, but has been hit some
286
        // previous iteration, then we need to make a choice about what
287
        // to do. Naively we will usually want to set the flux to be equal
288
        // to the reduced source. However, in fixed source problems where
289
        // there is a strong external source present in the cell, and where
290
        // the cell has a very low cross section, this approximation will
291
        // cause a huge upward bias in the flux estimate of the cell (in these
292
        // conditions, the flux estimate can be orders of magnitude too large).
293
        // Thus, to avoid this bias, if any external source is present
294
        // in the cell we will use the previous iteration's flux estimate. This
295
        // injects a small degree of correlation into the simulation, but this
296
        // is going to be trivial when the miss rate is a few percent or less.
297
        if (external_source_present) {
16✔
298
          set_flux_to_old_flux(idx);
299
        } else {
300
          set_flux_to_source(idx);
16✔
301
        }
302
      }
303
      // If the FSR was not hit this iteration, and it has never been hit in
304
      // any iteration (i.e., volume is zero), then we want to set this to 0
305
      // to avoid dividing anything by a zero volume. This happens implicitly
306
      // given that the new scalar flux arrays are set to zero each iteration.
307
    }
308
  }
309

310
  // Return the number of source regions that were hit this iteration
311
  return n_hits;
11,390✔
312
}
313

314
// Generates new estimate of k_eff based on the differences between this
315
// iteration's estimate of the scalar flux and the last iteration's estimate.
316
double FlatSourceDomain::compute_k_eff(double k_eff_old) const
2,040✔
317
{
318
  double fission_rate_old = 0;
2,040✔
319
  double fission_rate_new = 0;
2,040✔
320

321
  // Vector for gathering fission source terms for Shannon entropy calculation
322
  vector<float> p(n_source_regions_, 0.0f);
2,040✔
323

324
#pragma omp parallel for reduction(+ : fission_rate_old, fission_rate_new)
1,080✔
325
  for (int sr = 0; sr < n_source_regions_; sr++) {
256,640✔
326

327
    // If simulation averaged volume is zero, don't include this cell
328
    double volume = volume_[sr];
255,680✔
329
    if (volume == 0.0) {
255,680✔
330
      continue;
331
    }
332

333
    int material = material_[sr];
255,680✔
334

335
    double sr_fission_source_old = 0;
255,680✔
336
    double sr_fission_source_new = 0;
255,680✔
337

338
    for (int g = 0; g < negroups_; g++) {
1,799,680✔
339
      int64_t idx = (sr * negroups_) + g;
1,544,000✔
340
      double nu_sigma_f = nu_sigma_f_[material * negroups_ + g];
1,544,000✔
341
      sr_fission_source_old += nu_sigma_f * scalar_flux_old_[idx];
1,544,000✔
342
      sr_fission_source_new += nu_sigma_f * scalar_flux_new_[idx];
1,544,000✔
343
    }
344

345
    // Compute total fission rates in FSR
346
    sr_fission_source_old *= volume;
255,680✔
347
    sr_fission_source_new *= volume;
255,680✔
348

349
    // Accumulate totals
350
    fission_rate_old += sr_fission_source_old;
255,680✔
351
    fission_rate_new += sr_fission_source_new;
255,680✔
352

353
    // Store total fission rate in the FSR for Shannon calculation
354
    p[sr] = sr_fission_source_new;
255,680✔
355
  }
356

357
  double k_eff_new = k_eff_old * (fission_rate_new / fission_rate_old);
2,040✔
358

359
  double H = 0.0;
2,040✔
360
  // defining an inverse sum for better performance
361
  double inverse_sum = 1 / fission_rate_new;
2,040✔
362

363
#pragma omp parallel for reduction(+ : H)
1,080✔
364
  for (int sr = 0; sr < n_source_regions_; sr++) {
256,640✔
365
    // Only if FSR has non-negative and non-zero fission source
366
    if (p[sr] > 0.0f) {
255,680✔
367
      // Normalize to total weight of bank sites. p_i for better performance
368
      float p_i = p[sr] * inverse_sum;
104,320✔
369
      // Sum values to obtain Shannon entropy.
370
      H -= p_i * std::log2(p_i);
104,320✔
371
    }
372
  }
373

374
  // Adds entropy value to shared entropy vector in openmc namespace.
375
  simulation::entropy.push_back(H);
2,040✔
376

377
  return k_eff_new;
2,040✔
378
}
2,040✔
379

380
// This function is responsible for generating a mapping between random
381
// ray flat source regions (cell instances) and tally bins. The mapping
382
// takes the form of a "TallyTask" object, which accounts for one single
383
// score being applied to a single tally. Thus, a single source region
384
// may have anywhere from zero to many tally tasks associated with it ---
385
// meaning that the global "tally_task" data structure is in 2D. The outer
386
// dimension corresponds to the source element (i.e., each entry corresponds
387
// to a specific energy group within a specific source region), and the
388
// inner dimension corresponds to the tallying task itself. Mechanically,
389
// the mapping between FSRs and spatial filters is done by considering
390
// the location of a single known ray midpoint that passed through the
391
// FSR. I.e., during transport, the first ray to pass through a given FSR
392
// will write down its midpoint for use with this function. This is a cheap
393
// and easy way of mapping FSRs to spatial tally filters, but comes with
394
// the downside of adding the restriction that spatial tally filters must
395
// share boundaries with the physical geometry of the simulation (so as
396
// not to subdivide any FSR). It is acceptable for a spatial tally region
397
// to contain multiple FSRs, but not the other way around.
398

399
// TODO: In future work, it would be preferable to offer a more general
400
// (but perhaps slightly more expensive) option for handling arbitrary
401
// spatial tallies that would be allowed to subdivide FSRs.
402

403
// Besides generating the mapping structure, this function also keeps track
404
// of whether or not all flat source regions have been hit yet. This is
405
// required, as there is no guarantee that all flat source regions will
406
// be hit every iteration, such that in the first few iterations some FSRs
407
// may not have a known position within them yet to facilitate mapping to
408
// spatial tally filters. However, after several iterations, if all FSRs
409
// have been hit and have had a tally map generated, then this status will
410
// be passed back to the caller to alert them that this function doesn't
411
// need to be called for the remainder of the simulation.
412

413
void FlatSourceDomain::convert_source_regions_to_tallies()
276✔
414
{
415
  openmc::simulation::time_tallies.start();
276✔
416

417
  // Tracks if we've generated a mapping yet for all source regions.
418
  bool all_source_regions_mapped = true;
276✔
419

420
// Attempt to generate mapping for all source regions
421
#pragma omp parallel for
138✔
422
  for (int sr = 0; sr < n_source_regions_; sr++) {
170,226✔
423

424
    // If this source region has not been hit by a ray yet, then
425
    // we aren't going to be able to map it, so skip it.
426
    if (!position_recorded_[sr]) {
170,088✔
427
      all_source_regions_mapped = false;
428
      continue;
429
    }
430

431
    // A particle located at the recorded midpoint of a ray
432
    // crossing through this source region is used to estabilish
433
    // the spatial location of the source region
434
    Particle p;
170,088✔
435
    p.r() = position_[sr];
170,088✔
436
    p.r_last() = position_[sr];
170,088✔
437
    bool found = exhaustive_find_cell(p);
170,088✔
438

439
    // Loop over energy groups (so as to support energy filters)
440
    for (int g = 0; g < negroups_; g++) {
409,152✔
441

442
      // Set particle to the current energy
443
      p.g() = g;
239,064✔
444
      p.g_last() = g;
239,064✔
445
      p.E() = data::mg.energy_bin_avg_[p.g()];
239,064✔
446
      p.E_last() = p.E();
239,064✔
447

448
      int64_t source_element = sr * negroups_ + g;
239,064✔
449

450
      // If this task has already been populated, we don't need to do
451
      // it again.
452
      if (tally_task_[source_element].size() > 0) {
239,064✔
453
        continue;
454
      }
455

456
      // Loop over all active tallies. This logic is essentially identical
457
      // to what happens when scanning for applicable tallies during
458
      // MC transport.
459
      for (auto i_tally : model::active_tallies) {
786,096✔
460
        Tally& tally {*model::tallies[i_tally]};
547,032✔
461

462
        // Initialize an iterator over valid filter bin combinations.
463
        // If there are no valid combinations, use a continue statement
464
        // to ensure we skip the assume_separate break below.
465
        auto filter_iter = FilterBinIter(tally, p);
547,032✔
466
        auto end = FilterBinIter(tally, true, &p.filter_matches());
547,032✔
467
        if (filter_iter == end)
547,032✔
468
          continue;
311,040✔
469

470
        // Loop over filter bins.
471
        for (; filter_iter != end; ++filter_iter) {
471,984✔
472
          auto filter_index = filter_iter.index_;
235,992✔
473
          auto filter_weight = filter_iter.weight_;
235,992✔
474

475
          // Loop over scores
476
          for (auto score_index = 0; score_index < tally.scores_.size();
632,928✔
477
               score_index++) {
478
            auto score_bin = tally.scores_[score_index];
396,936✔
479
            // If a valid tally, filter, and score combination has been found,
480
            // then add it to the list of tally tasks for this source element.
481
            TallyTask task(i_tally, filter_index, score_index, score_bin);
396,936✔
482
            tally_task_[source_element].push_back(task);
396,936✔
483

484
            // Also add this task to the list of volume tasks for this source
485
            // region.
486
            volume_task_[sr].insert(task);
396,936✔
487
          }
488
        }
489
      }
490
      // Reset all the filter matches for the next tally event.
491
      for (auto& match : p.filter_matches())
866,568✔
492
        match.bins_present_ = false;
627,504✔
493
    }
494
  }
170,088✔
495
  openmc::simulation::time_tallies.stop();
276✔
496

497
  mapped_all_tallies_ = all_source_regions_mapped;
276✔
498
}
276✔
499

500
// Set the volume accumulators to zero for all tallies
501
void FlatSourceDomain::reset_tally_volumes()
3,120✔
502
{
503
  if (volume_normalized_flux_tallies_) {
3,120✔
504
#pragma omp parallel for
1,200✔
505
    for (int i = 0; i < tally_volumes_.size(); i++) {
4,140✔
506
      auto& tensor = tally_volumes_[i];
2,940✔
507
      tensor.fill(0.0); // Set all elements of the tensor to 0.0
2,940✔
508
    }
509
  }
510
}
3,120✔
511

512
// In fixed source mode, due to the way that volumetric fixed sources are
513
// converted and applied as volumetric sources in one or more source regions,
514
// we need to perform an additional normalization step to ensure that the
515
// reported scalar fluxes are in units per source neutron. This allows for
516
// direct comparison of reported tallies to Monte Carlo flux results.
517
// This factor needs to be computed at each iteration, as it is based on the
518
// volume estimate of each FSR, which improves over the course of the
519
// simulation
520
double FlatSourceDomain::compute_fixed_source_normalization_factor() const
3,477✔
521
{
522
  // If we are not in fixed source mode, then there are no external sources
523
  // so no normalization is needed.
524
  if (settings::run_mode != RunMode::FIXED_SOURCE || adjoint_) {
3,477✔
525
    return 1.0;
865✔
526
  }
527

528
  // Step 1 is to sum over all source regions and energy groups to get the
529
  // total external source strength in the simulation.
530
  double simulation_external_source_strength = 0.0;
2,612✔
531
#pragma omp parallel for reduction(+ : simulation_external_source_strength)
1,314✔
532
  for (int sr = 0; sr < n_source_regions_; sr++) {
1,808,162✔
533
    int material = material_[sr];
1,806,864✔
534
    double volume = volume_[sr] * simulation_volume_;
1,806,864✔
535
    for (int g = 0; g < negroups_; g++) {
4,273,536✔
536
      double sigma_t = sigma_t_[material * negroups_ + g];
2,466,672✔
537
      simulation_external_source_strength +=
2,466,672✔
538
        external_source_[sr * negroups_ + g] * sigma_t * volume;
2,466,672✔
539
    }
540
  }
541

542
  // Step 2 is to determine the total user-specified external source strength
543
  double user_external_source_strength = 0.0;
2,612✔
544
  for (auto& ext_source : model::external_sources) {
5,224✔
545
    user_external_source_strength += ext_source->strength();
2,612✔
546
  }
547

548
  // The correction factor is the ratio of the user-specified external source
549
  // strength to the simulation external source strength.
550
  double source_normalization_factor =
2,612✔
551
    user_external_source_strength / simulation_external_source_strength;
552

553
  return source_normalization_factor;
2,612✔
554
}
555

556
// Tallying in random ray is not done directly during transport, rather,
557
// it is done only once after each power iteration. This is made possible
558
// by way of a mapping data structure that relates spatial source regions
559
// (FSRs) to tally/filter/score combinations. The mechanism by which the
560
// mapping is done (and the limitations incurred) is documented in the
561
// "convert_source_regions_to_tallies()" function comments above. The present
562
// tally function simply traverses the mapping data structure and executes
563
// the scoring operations to OpenMC's native tally result arrays.
564

565
void FlatSourceDomain::random_ray_tally()
3,120✔
566
{
567
  openmc::simulation::time_tallies.start();
3,120✔
568

569
  // Reset our tally volumes to zero
570
  reset_tally_volumes();
3,120✔
571

572
  double source_normalization_factor =
573
    compute_fixed_source_normalization_factor();
3,120✔
574

575
// We loop over all source regions and energy groups. For each
576
// element, we check if there are any scores needed and apply
577
// them.
578
#pragma omp parallel for
1,560✔
579
  for (int sr = 0; sr < n_source_regions_; sr++) {
1,757,040✔
580
    // The fsr.volume_ is the unitless fractional simulation averaged volume
581
    // (i.e., it is the FSR's fraction of the overall simulation volume). The
582
    // simulation_volume_ is the total 3D physical volume in cm^3 of the
583
    // entire global simulation domain (as defined by the ray source box).
584
    // Thus, the FSR's true 3D spatial volume in cm^3 is found by multiplying
585
    // its fraction of the total volume by the total volume. Not important in
586
    // eigenvalue solves, but useful in fixed source solves for returning the
587
    // flux shape with a magnitude that makes sense relative to the fixed
588
    // source strength.
589
    double volume = volume_[sr] * simulation_volume_;
1,755,480✔
590

591
    double material = material_[sr];
1,755,480✔
592
    for (int g = 0; g < negroups_; g++) {
4,620,480✔
593
      int idx = sr * negroups_ + g;
2,865,000✔
594
      double flux = scalar_flux_new_[idx] * source_normalization_factor;
2,865,000✔
595

596
      // Determine numerical score value
597
      for (auto& task : tally_task_[idx]) {
8,303,520✔
598
        double score;
599
        switch (task.score_type) {
5,438,520✔
600

601
        case SCORE_FLUX:
2,849,640✔
602
          score = flux * volume;
2,849,640✔
603
          break;
2,849,640✔
604

605
        case SCORE_TOTAL:
606
          score = flux * volume * sigma_t_[material * negroups_ + g];
607
          break;
608

609
        case SCORE_FISSION:
1,294,440✔
610
          score = flux * volume * sigma_f_[material * negroups_ + g];
1,294,440✔
611
          break;
1,294,440✔
612

613
        case SCORE_NU_FISSION:
1,294,440✔
614
          score = flux * volume * nu_sigma_f_[material * negroups_ + g];
1,294,440✔
615
          break;
1,294,440✔
616

617
        case SCORE_EVENTS:
618
          score = 1.0;
619
          break;
620

621
        default:
622
          fatal_error("Invalid score specified in tallies.xml. Only flux, "
623
                      "total, fission, nu-fission, and events are supported in "
624
                      "random ray mode.");
625
          break;
626
        }
627

628
        // Apply score to the appropriate tally bin
629
        Tally& tally {*model::tallies[task.tally_idx]};
5,438,520✔
630
#pragma omp atomic
631
        tally.results_(task.filter_idx, task.score_idx, TallyResult::VALUE) +=
5,438,520✔
632
          score;
633
      }
634
    }
635

636
    // For flux tallies, the total volume of the spatial region is needed
637
    // for normalizing the flux. We store this volume in a separate tensor.
638
    // We only contribute to each volume tally bin once per FSR.
639
    if (volume_normalized_flux_tallies_) {
1,755,480✔
640
      for (const auto& task : volume_task_[sr]) {
4,778,160✔
641
        if (task.score_type == SCORE_FLUX) {
3,194,280✔
642
#pragma omp atomic
643
          tally_volumes_[task.tally_idx](task.filter_idx, task.score_idx) +=
2,067,000✔
644
            volume;
645
        }
646
      }
647
    }
648
  } // end FSR loop
649

650
  // Normalize any flux scores by the total volume of the FSRs scoring to that
651
  // bin. To do this, we loop over all tallies, and then all filter bins,
652
  // and then scores. For each score, we check the tally data structure to
653
  // see what index that score corresponds to. If that score is a flux score,
654
  // then we divide it by volume.
655
  if (volume_normalized_flux_tallies_) {
3,120✔
656
    for (int i = 0; i < model::tallies.size(); i++) {
8,280✔
657
      Tally& tally {*model::tallies[i]};
5,880✔
658
#pragma omp parallel for
2,940✔
659
      for (int bin = 0; bin < tally.n_filter_bins(); bin++) {
14,790✔
660
        for (int score_idx = 0; score_idx < tally.n_scores(); score_idx++) {
42,180✔
661
          auto score_type = tally.scores_[score_idx];
30,330✔
662
          if (score_type == SCORE_FLUX) {
30,330✔
663
            double vol = tally_volumes_[i](bin, score_idx);
11,850✔
664
            if (vol > 0.0) {
11,850✔
665
              tally.results_(bin, score_idx, TallyResult::VALUE) /= vol;
11,850✔
666
            }
667
          }
668
        }
669
      }
670
    }
671
  }
672

673
  openmc::simulation::time_tallies.stop();
3,120✔
674
}
3,120✔
675

676
void FlatSourceDomain::all_reduce_replicated_source_regions()
8,555✔
677
{
678
#ifdef OPENMC_MPI
679

680
  // If we only have 1 MPI rank, no need
681
  // to reduce anything.
682
  if (mpi::n_procs <= 1)
6,700✔
683
    return;
684

685
  simulation::time_bank_sendrecv.start();
6,700✔
686

687
  // The "position_recorded" variable needs to be allreduced (and maxed),
688
  // as whether or not a cell was hit will affect some decisions in how the
689
  // source is calculated in the next iteration so as to avoid dividing
690
  // by zero. We take the max rather than the sum as the hit values are
691
  // expected to be zero or 1.
692
  MPI_Allreduce(MPI_IN_PLACE, position_recorded_.data(), n_source_regions_,
6,700✔
693
    MPI_INT, MPI_MAX, mpi::intracomm);
694

695
  // The position variable is more complicated to reduce than the others,
696
  // as we do not want the sum of all positions in each cell, rather, we
697
  // want to just pick any single valid position. Thus, we perform a gather
698
  // and then pick the first valid position we find for all source regions
699
  // that have had a position recorded. This operation does not need to
700
  // be broadcast back to other ranks, as this value is only used for the
701
  // tally conversion operation, which is only performed on the master rank.
702
  // While this is expensive, it only needs to be done for active batches,
703
  // and only if we have not mapped all the tallies yet. Once tallies are
704
  // fully mapped, then the position vector is fully populated, so this
705
  // operation can be skipped.
706

707
  // First, we broadcast the fully mapped tally status variable so that
708
  // all ranks are on the same page
709
  int mapped_all_tallies_i = static_cast<int>(mapped_all_tallies_);
6,700✔
710
  MPI_Bcast(&mapped_all_tallies_i, 1, MPI_INT, 0, mpi::intracomm);
6,700✔
711

712
  // Then, we perform the gather of position data, if needed
713
  if (simulation::current_batch > settings::n_inactive &&
6,700✔
714
      !mapped_all_tallies_i) {
2,600✔
715

716
    // Master rank will gather results and pick valid positions
717
    if (mpi::master) {
230✔
718
      // Initialize temporary vector for receiving positions
719
      vector<vector<Position>> all_position;
115✔
720
      all_position.resize(mpi::n_procs);
115✔
721
      for (int i = 0; i < mpi::n_procs; i++) {
345✔
722
        all_position[i].resize(n_source_regions_);
230✔
723
      }
724

725
      // Copy master rank data into gathered vector for convenience
726
      all_position[0] = position_;
115✔
727

728
      // Receive all data into gather vector
729
      for (int i = 1; i < mpi::n_procs; i++) {
230✔
730
        MPI_Recv(all_position[i].data(), n_source_regions_ * 3, MPI_DOUBLE, i,
115✔
731
          0, mpi::intracomm, MPI_STATUS_IGNORE);
732
      }
733

734
      // Scan through gathered data and pick first valid cell posiiton
735
      for (int sr = 0; sr < n_source_regions_; sr++) {
141,855✔
736
        if (position_recorded_[sr] == 1) {
141,740✔
737
          for (int i = 0; i < mpi::n_procs; i++) {
141,885✔
738
            if (all_position[i][sr].x != 0.0 || all_position[i][sr].y != 0.0 ||
142,030✔
739
                all_position[i][sr].z != 0.0) {
145✔
740
              position_[sr] = all_position[i][sr];
141,740✔
741
              break;
141,740✔
742
            }
743
          }
744
        }
745
      }
746
    } else {
115✔
747
      // Other ranks just send in their data
748
      MPI_Send(position_.data(), n_source_regions_ * 3, MPI_DOUBLE, 0, 0,
115✔
749
        mpi::intracomm);
750
    }
751
  }
752

753
  // For the rest of the source region data, we simply perform an all reduce,
754
  // as these values will be needed on all ranks for transport during the
755
  // next iteration.
756
  MPI_Allreduce(MPI_IN_PLACE, volume_.data(), n_source_regions_, MPI_DOUBLE,
6,700✔
757
    MPI_SUM, mpi::intracomm);
758

759
  MPI_Allreduce(MPI_IN_PLACE, scalar_flux_new_.data(), n_source_elements_,
6,700✔
760
    MPI_DOUBLE, MPI_SUM, mpi::intracomm);
761

762
  simulation::time_bank_sendrecv.stop();
6,700✔
763
#endif
764
}
1,855✔
765

UNCOV
766
double FlatSourceDomain::evaluate_flux_at_point(
×
767
  Position r, int64_t sr, int g) const
768
{
UNCOV
769
  return scalar_flux_final_[sr * negroups_ + g] /
×
UNCOV
770
         (settings::n_batches - settings::n_inactive);
×
771
}
772

773
// Outputs all basic material, FSR ID, multigroup flux, and
774
// fission source data to .vtk file that can be directly
775
// loaded and displayed by Paraview. Note that .vtk binary
776
// files require big endian byte ordering, so endianness
777
// is checked and flipped if necessary.
UNCOV
778
void FlatSourceDomain::output_to_vtk() const
×
779
{
780
  // Rename .h5 plot filename(s) to .vtk filenames
UNCOV
781
  for (int p = 0; p < model::plots.size(); p++) {
×
UNCOV
782
    PlottableInterface* plot = model::plots[p].get();
×
UNCOV
783
    plot->path_plot() =
×
UNCOV
784
      plot->path_plot().substr(0, plot->path_plot().find_last_of('.')) + ".vtk";
×
785
  }
786

787
  // Print header information
UNCOV
788
  print_plot();
×
789

790
  // Outer loop over plots
UNCOV
791
  for (int p = 0; p < model::plots.size(); p++) {
×
792

793
    // Get handle to OpenMC plot object and extract params
UNCOV
794
    Plot* openmc_plot = dynamic_cast<Plot*>(model::plots[p].get());
×
795

796
    // Random ray plots only support voxel plots
UNCOV
797
    if (!openmc_plot) {
×
UNCOV
798
      warning(fmt::format("Plot {} is invalid plot type -- only voxel plotting "
×
799
                          "is allowed in random ray mode.",
800
        p));
UNCOV
801
      continue;
×
UNCOV
802
    } else if (openmc_plot->type_ != Plot::PlotType::voxel) {
×
UNCOV
803
      warning(fmt::format("Plot {} is invalid plot type -- only voxel plotting "
×
804
                          "is allowed in random ray mode.",
805
        p));
UNCOV
806
      continue;
×
807
    }
808

UNCOV
809
    int Nx = openmc_plot->pixels_[0];
×
UNCOV
810
    int Ny = openmc_plot->pixels_[1];
×
UNCOV
811
    int Nz = openmc_plot->pixels_[2];
×
UNCOV
812
    Position origin = openmc_plot->origin_;
×
813
    Position width = openmc_plot->width_;
×
UNCOV
814
    Position ll = origin - width / 2.0;
×
UNCOV
815
    double x_delta = width.x / Nx;
×
816
    double y_delta = width.y / Ny;
×
817
    double z_delta = width.z / Nz;
×
UNCOV
818
    std::string filename = openmc_plot->path_plot();
×
819

820
    // Perform sanity checks on file size
UNCOV
821
    uint64_t bytes = Nx * Ny * Nz * (negroups_ + 1 + 1 + 1) * sizeof(float);
×
UNCOV
822
    write_message(5, "Processing plot {}: {}... (Estimated size is {} MB)",
×
UNCOV
823
      openmc_plot->id(), filename, bytes / 1.0e6);
×
UNCOV
824
    if (bytes / 1.0e9 > 1.0) {
×
825
      warning("Voxel plot specification is very large (>1 GB). Plotting may be "
×
826
              "slow.");
UNCOV
827
    } else if (bytes / 1.0e9 > 100.0) {
×
828
      fatal_error("Voxel plot specification is too large (>100 GB). Exiting.");
×
829
    }
830

831
    // Relate voxel spatial locations to random ray source regions
UNCOV
832
    vector<int> voxel_indices(Nx * Ny * Nz);
×
UNCOV
833
    vector<Position> voxel_positions(Nx * Ny * Nz);
×
834

835
#pragma omp parallel for collapse(3)
836
    for (int z = 0; z < Nz; z++) {
837
      for (int y = 0; y < Ny; y++) {
838
        for (int x = 0; x < Nx; x++) {
839
          Position sample;
840
          sample.z = ll.z + z_delta / 2.0 + z * z_delta;
841
          sample.y = ll.y + y_delta / 2.0 + y * y_delta;
842
          sample.x = ll.x + x_delta / 2.0 + x * x_delta;
843
          Particle p;
844
          p.r() = sample;
845
          bool found = exhaustive_find_cell(p);
846
          int i_cell = p.lowest_coord().cell;
847
          int64_t source_region_idx =
848
            source_region_offsets_[i_cell] + p.cell_instance();
849
          voxel_indices[z * Ny * Nx + y * Nx + x] = source_region_idx;
850
          voxel_positions[z * Ny * Nx + y * Nx + x] = sample;
851
        }
852
      }
853
    }
854

855
    double source_normalization_factor =
856
      compute_fixed_source_normalization_factor();
×
857

858
    // Open file for writing
859
    std::FILE* plot = std::fopen(filename.c_str(), "wb");
×
860

861
    // Write vtk metadata
862
    std::fprintf(plot, "# vtk DataFile Version 2.0\n");
×
863
    std::fprintf(plot, "Dataset File\n");
×
864
    std::fprintf(plot, "BINARY\n");
×
865
    std::fprintf(plot, "DATASET STRUCTURED_POINTS\n");
×
UNCOV
866
    std::fprintf(plot, "DIMENSIONS %d %d %d\n", Nx, Ny, Nz);
×
UNCOV
867
    std::fprintf(plot, "ORIGIN 0 0 0\n");
×
868
    std::fprintf(plot, "SPACING %lf %lf %lf\n", x_delta, y_delta, z_delta);
×
869
    std::fprintf(plot, "POINT_DATA %d\n", Nx * Ny * Nz);
×
870

871
    // Plot multigroup flux data
872
    for (int g = 0; g < negroups_; g++) {
×
UNCOV
873
      std::fprintf(plot, "SCALARS flux_group_%d float\n", g);
×
874
      std::fprintf(plot, "LOOKUP_TABLE default\n");
×
875
      for (int i = 0; i < Nx * Ny * Nz; i++) {
×
UNCOV
876
        int64_t fsr = voxel_indices[i];
×
UNCOV
877
        int64_t source_element = fsr * negroups_ + g;
×
UNCOV
878
        float flux = evaluate_flux_at_point(voxel_positions[i], fsr, g);
×
879
        flux = convert_to_big_endian<float>(flux);
×
880
        std::fwrite(&flux, sizeof(float), 1, plot);
×
881
      }
882
    }
883

884
    // Plot FSRs
UNCOV
885
    std::fprintf(plot, "SCALARS FSRs float\n");
×
UNCOV
886
    std::fprintf(plot, "LOOKUP_TABLE default\n");
×
UNCOV
887
    for (int fsr : voxel_indices) {
×
UNCOV
888
      float value = future_prn(10, fsr);
×
UNCOV
889
      value = convert_to_big_endian<float>(value);
×
UNCOV
890
      std::fwrite(&value, sizeof(float), 1, plot);
×
891
    }
892

893
    // Plot Materials
UNCOV
894
    std::fprintf(plot, "SCALARS Materials int\n");
×
UNCOV
895
    std::fprintf(plot, "LOOKUP_TABLE default\n");
×
UNCOV
896
    for (int fsr : voxel_indices) {
×
UNCOV
897
      int mat = material_[fsr];
×
UNCOV
898
      mat = convert_to_big_endian<int>(mat);
×
UNCOV
899
      std::fwrite(&mat, sizeof(int), 1, plot);
×
900
    }
901

902
    // Plot fission source
903
    std::fprintf(plot, "SCALARS total_fission_source float\n");
×
UNCOV
904
    std::fprintf(plot, "LOOKUP_TABLE default\n");
×
UNCOV
905
    for (int i = 0; i < Nx * Ny * Nz; i++) {
×
906
      int64_t fsr = voxel_indices[i];
×
907

UNCOV
908
      float total_fission = 0.0;
×
909
      int mat = material_[fsr];
×
910
      for (int g = 0; g < negroups_; g++) {
×
911
        int64_t source_element = fsr * negroups_ + g;
×
912
        float flux = evaluate_flux_at_point(voxel_positions[i], fsr, g);
×
913
        double sigma_f = sigma_f_[mat * negroups_ + g];
×
914
        total_fission += sigma_f * flux;
×
915
      }
916
      total_fission = convert_to_big_endian<float>(total_fission);
×
UNCOV
917
      std::fwrite(&total_fission, sizeof(float), 1, plot);
×
918
    }
919

920
    std::fclose(plot);
×
921
  }
922
}
923

924
void FlatSourceDomain::apply_external_source_to_source_region(
1,938✔
925
  Discrete* discrete, double strength_factor, int64_t source_region)
926
{
927
  external_source_present_[source_region] = true;
1,938✔
928

929
  const auto& discrete_energies = discrete->x();
1,938✔
930
  const auto& discrete_probs = discrete->prob();
1,938✔
931

932
  for (int i = 0; i < discrete_energies.size(); i++) {
4,080✔
933
    int g = data::mg.get_group_index(discrete_energies[i]);
2,142✔
934
    external_source_[source_region * negroups_ + g] +=
2,142✔
935
      discrete_probs[i] * strength_factor;
2,142✔
936
  }
937
}
1,938✔
938

939
void FlatSourceDomain::apply_external_source_to_cell_instances(int32_t i_cell,
306✔
940
  Discrete* discrete, double strength_factor, int target_material_id,
941
  const vector<int32_t>& instances)
942
{
943
  Cell& cell = *model::cells[i_cell];
306✔
944

945
  if (cell.type_ != Fill::MATERIAL)
306✔
946
    return;
×
947

948
  for (int j : instances) {
31,484✔
949
    int cell_material_idx = cell.material(j);
31,178✔
950
    int cell_material_id = model::materials[cell_material_idx]->id();
31,178✔
951
    if (target_material_id == C_NONE ||
31,178✔
952
        cell_material_id == target_material_id) {
953
      int64_t source_region = source_region_offsets_[i_cell] + j;
1,938✔
954
      apply_external_source_to_source_region(
1,938✔
955
        discrete, strength_factor, source_region);
956
    }
957
  }
958
}
959

960
void FlatSourceDomain::apply_external_source_to_cell_and_children(
340✔
961
  int32_t i_cell, Discrete* discrete, double strength_factor,
962
  int32_t target_material_id)
963
{
964
  Cell& cell = *model::cells[i_cell];
340✔
965

966
  if (cell.type_ == Fill::MATERIAL) {
340✔
967
    vector<int> instances(cell.n_instances_);
306✔
968
    std::iota(instances.begin(), instances.end(), 0);
306✔
969
    apply_external_source_to_cell_instances(
306✔
970
      i_cell, discrete, strength_factor, target_material_id, instances);
971
  } else if (target_material_id == C_NONE) {
340✔
972
    std::unordered_map<int32_t, vector<int32_t>> cell_instance_list =
UNCOV
973
      cell.get_contained_cells(0, nullptr);
×
UNCOV
974
    for (const auto& pair : cell_instance_list) {
×
UNCOV
975
      int32_t i_child_cell = pair.first;
×
UNCOV
976
      apply_external_source_to_cell_instances(i_child_cell, discrete,
×
UNCOV
977
        strength_factor, target_material_id, pair.second);
×
978
    }
979
  }
980
}
340✔
981

982
void FlatSourceDomain::count_external_source_regions()
272✔
983
{
984
  n_external_source_regions_ = 0;
272✔
985
#pragma omp parallel for reduction(+ : n_external_source_regions_)
144✔
986
  for (int sr = 0; sr < n_source_regions_; sr++) {
199,232✔
987
    if (external_source_present_[sr]) {
199,104✔
988
      n_external_source_regions_++;
912✔
989
    }
990
  }
991
}
272✔
992

993
void FlatSourceDomain::convert_external_sources()
272✔
994
{
995
  // Loop over external sources
996
  for (int es = 0; es < model::external_sources.size(); es++) {
544✔
997
    Source* s = model::external_sources[es].get();
272✔
998
    IndependentSource* is = dynamic_cast<IndependentSource*>(s);
272✔
999
    Discrete* energy = dynamic_cast<Discrete*>(is->energy());
272✔
1000
    const std::unordered_set<int32_t>& domain_ids = is->domain_ids();
272✔
1001

1002
    double strength_factor = is->strength();
272✔
1003

1004
    if (is->domain_type() == Source::DomainType::MATERIAL) {
272✔
1005
      for (int32_t material_id : domain_ids) {
34✔
1006
        for (int i_cell = 0; i_cell < model::cells.size(); i_cell++) {
102✔
1007
          apply_external_source_to_cell_and_children(
85✔
1008
            i_cell, energy, strength_factor, material_id);
1009
        }
1010
      }
1011
    } else if (is->domain_type() == Source::DomainType::CELL) {
255✔
1012
      for (int32_t cell_id : domain_ids) {
34✔
1013
        int32_t i_cell = model::cell_map[cell_id];
17✔
1014
        apply_external_source_to_cell_and_children(
17✔
1015
          i_cell, energy, strength_factor, C_NONE);
1016
      }
1017
    } else if (is->domain_type() == Source::DomainType::UNIVERSE) {
238✔
1018
      for (int32_t universe_id : domain_ids) {
476✔
1019
        int32_t i_universe = model::universe_map[universe_id];
238✔
1020
        Universe& universe = *model::universes[i_universe];
238✔
1021
        for (int32_t i_cell : universe.cells_) {
476✔
1022
          apply_external_source_to_cell_and_children(
238✔
1023
            i_cell, energy, strength_factor, C_NONE);
1024
        }
1025
      }
1026
    }
1027
  } // End loop over external sources
1028

1029
// Divide the fixed source term by sigma t (to save time when applying each
1030
// iteration)
1031
#pragma omp parallel for
144✔
1032
  for (int sr = 0; sr < n_source_regions_; sr++) {
199,232✔
1033
    int material = material_[sr];
199,104✔
1034
    for (int g = 0; g < negroups_; g++) {
431,616✔
1035
      double sigma_t = sigma_t_[material * negroups_ + g];
232,512✔
1036
      external_source_[sr * negroups_ + g] /= sigma_t;
232,512✔
1037
    }
1038
  }
1039
}
272✔
1040

1041
void FlatSourceDomain::flux_swap()
11,390✔
1042
{
1043
  scalar_flux_old_.swap(scalar_flux_new_);
11,390✔
1044
}
11,390✔
1045

1046
void FlatSourceDomain::flatten_xs()
391✔
1047
{
1048
  // Temperature and angle indices, if using multiple temperature
1049
  // data sets and/or anisotropic data sets.
1050
  // TODO: Currently assumes we are only using single temp/single angle data.
1051
  const int t = 0;
391✔
1052
  const int a = 0;
391✔
1053

1054
  n_materials_ = data::mg.macro_xs_.size();
391✔
1055
  for (auto& m : data::mg.macro_xs_) {
1,411✔
1056
    for (int g_out = 0; g_out < negroups_; g_out++) {
3,468✔
1057
      if (m.exists_in_model) {
2,448✔
1058
        double sigma_t =
1059
          m.get_xs(MgxsType::TOTAL, g_out, NULL, NULL, NULL, t, a);
2,448✔
1060
        sigma_t_.push_back(sigma_t);
2,448✔
1061

1062
        double nu_Sigma_f =
1063
          m.get_xs(MgxsType::NU_FISSION, g_out, NULL, NULL, NULL, t, a);
2,448✔
1064
        nu_sigma_f_.push_back(nu_Sigma_f);
2,448✔
1065

1066
        double sigma_f =
1067
          m.get_xs(MgxsType::FISSION, g_out, NULL, NULL, NULL, t, a);
2,448✔
1068
        sigma_f_.push_back(sigma_f);
2,448✔
1069

1070
        double chi =
1071
          m.get_xs(MgxsType::CHI_PROMPT, g_out, &g_out, NULL, NULL, t, a);
2,448✔
1072
        chi_.push_back(chi);
2,448✔
1073

1074
        for (int g_in = 0; g_in < negroups_; g_in++) {
14,892✔
1075
          double sigma_s =
1076
            m.get_xs(MgxsType::NU_SCATTER, g_in, &g_out, NULL, NULL, t, a);
12,444✔
1077
          sigma_s_.push_back(sigma_s);
12,444✔
1078
        }
1079
      } else {
UNCOV
1080
        sigma_t_.push_back(0);
×
UNCOV
1081
        nu_sigma_f_.push_back(0);
×
UNCOV
1082
        sigma_f_.push_back(0);
×
UNCOV
1083
        chi_.push_back(0);
×
UNCOV
1084
        for (int g_in = 0; g_in < negroups_; g_in++) {
×
UNCOV
1085
          sigma_s_.push_back(0);
×
1086
        }
1087
      }
1088
    }
1089
  }
1090
}
391✔
1091

1092
void FlatSourceDomain::set_adjoint_sources(const vector<double>& forward_flux)
17✔
1093
{
1094
  // Set the external source to 1/forward_flux
1095
  // The forward flux is given in terms of total for the forward simulation
1096
  // so we must convert it to a "per batch" quantity
1097
#pragma omp parallel for
9✔
1098
  for (int64_t se = 0; se < n_source_elements_; se++) {
13,832✔
1099
    external_source_[se] = 1.0 / forward_flux[se];
13,824✔
1100
  }
1101

1102
  // Divide the fixed source term by sigma t (to save time when applying each
1103
  // iteration)
1104
#pragma omp parallel for
9✔
1105
  for (int sr = 0; sr < n_source_regions_; sr++) {
13,832✔
1106
    int material = material_[sr];
13,824✔
1107
    for (int g = 0; g < negroups_; g++) {
27,648✔
1108
      double sigma_t = sigma_t_[material * negroups_ + g];
13,824✔
1109
      external_source_[sr * negroups_ + g] /= sigma_t;
13,824✔
1110
    }
1111
  }
1112
}
17✔
1113

1114
void FlatSourceDomain::transpose_scattering_matrix()
34✔
1115
{
1116
  // Transpose the inner two dimensions for each material
1117
  for (int m = 0; m < n_materials_; ++m) {
119✔
1118
    int material_offset = m * negroups_ * negroups_;
85✔
1119
    for (int i = 0; i < negroups_; ++i) {
374✔
1120
      for (int j = i + 1; j < negroups_; ++j) {
1,003✔
1121
        // Calculate indices of the elements to swap
1122
        int idx1 = material_offset + i * negroups_ + j;
714✔
1123
        int idx2 = material_offset + j * negroups_ + i;
714✔
1124

1125
        // Swap the elements to transpose the matrix
1126
        std::swap(sigma_s_[idx1], sigma_s_[idx2]);
714✔
1127
      }
1128
    }
1129
  }
1130
}
34✔
1131

1132
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc