• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 8745330834

18 Apr 2024 10:10PM UTC coverage: 84.629% (-0.01%) from 84.643%
8745330834

push

github

web-flow
Random Ray Transport (#2823)

Co-authored-by: Gavin Ridley <gavin.keith.ridley@gmail.com>
Co-authored-by: Paul Romano <paul.k.romano@gmail.com>

577 of 700 new or added lines in 16 files covered. (82.43%)

3 existing lines in 2 files now uncovered.

48402 of 57193 relevant lines covered (84.63%)

34065627.92 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

72.44
/src/random_ray/flat_source_domain.cpp
1
#include "openmc/random_ray/flat_source_domain.h"
2

3
#include "openmc/cell.h"
4
#include "openmc/geometry.h"
5
#include "openmc/message_passing.h"
6
#include "openmc/mgxs_interface.h"
7
#include "openmc/output.h"
8
#include "openmc/plot.h"
9
#include "openmc/random_ray/random_ray.h"
10
#include "openmc/simulation.h"
11
#include "openmc/tallies/filter.h"
12
#include "openmc/tallies/tally.h"
13
#include "openmc/tallies/tally_scoring.h"
14
#include "openmc/timer.h"
15

16
#include <cstdio>
17

18
namespace openmc {
19

20
//==============================================================================
21
// FlatSourceDomain implementation
22
//==============================================================================
23

24
FlatSourceDomain::FlatSourceDomain() : negroups_(data::mg.num_energy_groups_)
38✔
25
{
26
  // Count the number of source regions, compute the cell offset
27
  // indices, and store the material type The reason for the offsets is that
28
  // some cell types may not have material fills, and therefore do not
29
  // produce FSRs. Thus, we cannot index into the global arrays directly
30
  for (const auto& c : model::cells) {
684✔
31
    if (c->type_ != Fill::MATERIAL) {
646✔
32
      source_region_offsets_.push_back(-1);
380✔
33
    } else {
34
      source_region_offsets_.push_back(n_source_regions_);
266✔
35
      n_source_regions_ += c->n_instances_;
266✔
36
      n_source_elements_ += c->n_instances_ * negroups_;
266✔
37
    }
38
  }
39

40
  // Initialize cell-wise arrays
41
  lock_.resize(n_source_regions_);
38✔
42
  material_.resize(n_source_regions_);
38✔
43
  position_recorded_.assign(n_source_regions_, 0);
38✔
44
  position_.resize(n_source_regions_);
38✔
45
  volume_.assign(n_source_regions_, 0.0);
38✔
46
  volume_t_.assign(n_source_regions_, 0.0);
38✔
47
  was_hit_.assign(n_source_regions_, 0);
38✔
48

49
  // Initialize element-wise arrays
50
  scalar_flux_new_.assign(n_source_elements_, 0.0);
38✔
51
  scalar_flux_old_.assign(n_source_elements_, 1.0);
38✔
52
  scalar_flux_final_.assign(n_source_elements_, 0.0);
38✔
53
  source_.resize(n_source_elements_);
38✔
54
  tally_task_.resize(n_source_elements_);
38✔
55

56
  // Initialize material array
57
  int64_t source_region_id = 0;
38✔
58
  for (int i = 0; i < model::cells.size(); i++) {
684✔
59
    Cell& cell = *model::cells[i];
646✔
60
    if (cell.type_ == Fill::MATERIAL) {
646✔
61
      for (int j = 0; j < cell.n_instances_; j++) {
9,538✔
62
        material_[source_region_id++] = cell.material(j);
9,272✔
63
      }
64
    }
65
  }
66

67
  // Sanity check
68
  if (source_region_id != n_source_regions_) {
38✔
NEW
69
    fatal_error("Unexpected number of source regions");
×
70
  }
71
}
38✔
72

73
void FlatSourceDomain::batch_reset()
380✔
74
{
75
  // Reset scalar fluxes, iteration volume tallies, and region hit flags to
76
  // zero
77
  parallel_fill<float>(scalar_flux_new_, 0.0f);
380✔
78
  parallel_fill<double>(volume_, 0.0);
380✔
79
  parallel_fill<int>(was_hit_, 0);
380✔
80
}
380✔
81

82
void FlatSourceDomain::accumulate_iteration_flux()
140✔
83
{
84
#pragma omp parallel for
60✔
85
  for (int64_t se = 0; se < n_source_elements_; se++) {
136,720✔
86
    scalar_flux_final_[se] += scalar_flux_new_[se];
136,640✔
87
  }
88
}
140✔
89

90
// Compute new estimate of scattering + fission sources in each source region
91
// based on the flux estimate from the previous iteration.
92
void FlatSourceDomain::update_neutron_source(double k_eff)
380✔
93
{
94
  simulation::time_update_src.start();
380✔
95

96
  double inverse_k_eff = 1.0 / k_eff;
380✔
97

98
  // Temperature and angle indices, if using multiple temperature
99
  // data sets and/or anisotropic data sets.
100
  // TODO: Currently assumes we are only using single temp/single
101
  // angle data.
102
  const int t = 0;
380✔
103
  const int a = 0;
380✔
104

105
#pragma omp parallel for
180✔
106
  for (int sr = 0; sr < n_source_regions_; sr++) {
49,000✔
107
    int material = material_[sr];
48,800✔
108

109
    for (int e_out = 0; e_out < negroups_; e_out++) {
390,400✔
110
      float sigma_t = data::mg.macro_xs_[material].get_xs(
341,600✔
111
        MgxsType::TOTAL, e_out, nullptr, nullptr, nullptr, t, a);
341,600✔
112
      float scatter_source = 0.0f;
341,600✔
113
      float fission_source = 0.0f;
341,600✔
114

115
      for (int e_in = 0; e_in < negroups_; e_in++) {
2,732,800✔
116
        float scalar_flux = scalar_flux_old_[sr * negroups_ + e_in];
2,391,200✔
117
        float sigma_s = data::mg.macro_xs_[material].get_xs(
2,391,200✔
118
          MgxsType::NU_SCATTER, e_in, &e_out, nullptr, nullptr, t, a);
2,391,200✔
119
        float nu_sigma_f = data::mg.macro_xs_[material].get_xs(
2,391,200✔
120
          MgxsType::NU_FISSION, e_in, nullptr, nullptr, nullptr, t, a);
2,391,200✔
121
        float chi = data::mg.macro_xs_[material].get_xs(
2,391,200✔
122
          MgxsType::CHI_PROMPT, e_in, &e_out, nullptr, nullptr, t, a);
2,391,200✔
123
        scatter_source += sigma_s * scalar_flux;
2,391,200✔
124
        fission_source += nu_sigma_f * scalar_flux * chi;
2,391,200✔
125
      }
126

127
      fission_source *= inverse_k_eff;
341,600✔
128
      float new_isotropic_source = (scatter_source + fission_source) / sigma_t;
341,600✔
129
      source_[sr * negroups_ + e_out] = new_isotropic_source;
341,600✔
130
    }
131
  }
132

133
  simulation::time_update_src.stop();
380✔
134
}
380✔
135

136
// Normalizes flux and updates simulation-averaged volume estimate
137
void FlatSourceDomain::normalize_scalar_flux_and_volumes(
380✔
138
  double total_active_distance_per_iteration)
139
{
140
  float normalization_factor = 1.0 / total_active_distance_per_iteration;
380✔
141
  double volume_normalization_factor =
380✔
142
    1.0 / (total_active_distance_per_iteration * simulation::current_batch);
380✔
143

144
// Normalize scalar flux to total distance travelled by all rays this iteration
145
#pragma omp parallel for
180✔
146
  for (int64_t e = 0; e < scalar_flux_new_.size(); e++) {
341,800✔
147
    scalar_flux_new_[e] *= normalization_factor;
341,600✔
148
  }
149

150
// Accumulate cell-wise ray length tallies collected this iteration, then
151
// update the simulation-averaged cell-wise volume estimates
152
#pragma omp parallel for
180✔
153
  for (int64_t sr = 0; sr < n_source_regions_; sr++) {
49,000✔
154
    volume_t_[sr] += volume_[sr];
48,800✔
155
    volume_[sr] = volume_t_[sr] * volume_normalization_factor;
48,800✔
156
  }
157
}
380✔
158

159
// Combine transport flux contributions and flat source contributions from the
160
// previous iteration to generate this iteration's estimate of scalar flux.
161
int64_t FlatSourceDomain::add_source_to_scalar_flux()
380✔
162
{
163
  int64_t n_hits = 0;
380✔
164

165
  // Temperature and angle indices, if using multiple temperature
166
  // data sets and/or anisotropic data sets.
167
  // TODO: Currently assumes we are only using single temp/single
168
  // angle data.
169
  const int t = 0;
380✔
170
  const int a = 0;
380✔
171

172
#pragma omp parallel for reduction(+ : n_hits)
180✔
173
  for (int sr = 0; sr < n_source_regions_; sr++) {
49,000✔
174

175
    // Check if this cell was hit this iteration
176
    int was_cell_hit = was_hit_[sr];
48,800✔
177
    if (was_cell_hit) {
48,800✔
178
      n_hits++;
48,800✔
179
    }
180

181
    double volume = volume_[sr];
48,800✔
182
    int material = material_[sr];
48,800✔
183
    for (int g = 0; g < negroups_; g++) {
390,400✔
184
      int64_t idx = (sr * negroups_) + g;
341,600✔
185

186
      // There are three scenarios we need to consider:
187
      if (was_cell_hit) {
341,600✔
188
        // 1. If the FSR was hit this iteration, then the new flux is equal to
189
        // the flat source from the previous iteration plus the contributions
190
        // from rays passing through the source region (computed during the
191
        // transport sweep)
192
        float sigma_t = data::mg.macro_xs_[material].get_xs(
341,600✔
193
          MgxsType::TOTAL, g, nullptr, nullptr, nullptr, t, a);
341,600✔
194
        scalar_flux_new_[idx] /= (sigma_t * volume);
341,600✔
195
        scalar_flux_new_[idx] += source_[idx];
341,600✔
196
      } else if (volume > 0.0) {
197
        // 2. If the FSR was not hit this iteration, but has been hit some
198
        // previous iteration, then we simply set the new scalar flux to be
199
        // equal to the contribution from the flat source alone.
200
        scalar_flux_new_[idx] = source_[idx];
201
      } else {
202
        // If the FSR was not hit this iteration, and it has never been hit in
203
        // any iteration (i.e., volume is zero), then we want to set this to 0
204
        // to avoid dividing anything by a zero volume.
205
        scalar_flux_new_[idx] = 0.0f;
206
      }
207
    }
208
  }
209

210
  // Return the number of source regions that were hit this iteration
211
  return n_hits;
380✔
212
}
213

214
// Generates new estimate of k_eff based on the differences between this
215
// iteration's estimate of the scalar flux and the last iteration's estimate.
216
double FlatSourceDomain::compute_k_eff(double k_eff_old) const
380✔
217
{
218
  double fission_rate_old = 0;
380✔
219
  double fission_rate_new = 0;
380✔
220

221
  // Temperature and angle indices, if using multiple temperature
222
  // data sets and/or anisotropic data sets.
223
  // TODO: Currently assumes we are only using single temp/single
224
  // angle data.
225
  const int t = 0;
380✔
226
  const int a = 0;
380✔
227

228
#pragma omp parallel for reduction(+ : fission_rate_old, fission_rate_new)
180✔
229
  for (int sr = 0; sr < n_source_regions_; sr++) {
49,000✔
230

231
    // If simulation averaged volume is zero, don't include this cell
232
    double volume = volume_[sr];
48,800✔
233
    if (volume == 0.0) {
48,800✔
234
      continue;
235
    }
236

237
    int material = material_[sr];
48,800✔
238

239
    double sr_fission_source_old = 0;
48,800✔
240
    double sr_fission_source_new = 0;
48,800✔
241

242
    for (int g = 0; g < negroups_; g++) {
390,400✔
243
      int64_t idx = (sr * negroups_) + g;
341,600✔
244
      double nu_sigma_f = data::mg.macro_xs_[material].get_xs(
341,600✔
245
        MgxsType::NU_FISSION, g, nullptr, nullptr, nullptr, t, a);
246
      sr_fission_source_old += nu_sigma_f * scalar_flux_old_[idx];
341,600✔
247
      sr_fission_source_new += nu_sigma_f * scalar_flux_new_[idx];
341,600✔
248
    }
249

250
    fission_rate_old += sr_fission_source_old * volume;
48,800✔
251
    fission_rate_new += sr_fission_source_new * volume;
48,800✔
252
  }
253

254
  double k_eff_new = k_eff_old * (fission_rate_new / fission_rate_old);
380✔
255

256
  return k_eff_new;
380✔
257
}
258

259
// This function is responsible for generating a mapping between random
260
// ray flat source regions (cell instances) and tally bins. The mapping
261
// takes the form of a "TallyTask" object, which accounts for one single
262
// score being applied to a single tally. Thus, a single source region
263
// may have anywhere from zero to many tally tasks associated with it ---
264
// meaning that the global "tally_task" data structure is in 2D. The outer
265
// dimension corresponds to the source element (i.e., each entry corresponds
266
// to a specific energy group within a specific source region), and the
267
// inner dimension corresponds to the tallying task itself. Mechanically,
268
// the mapping between FSRs and spatial filters is done by considering
269
// the location of a single known ray midpoint that passed through the
270
// FSR. I.e., during transport, the first ray to pass through a given FSR
271
// will write down its midpoint for use with this function. This is a cheap
272
// and easy way of mapping FSRs to spatial tally filters, but comes with
273
// the downside of adding the restriction that spatial tally filters must
274
// share boundaries with the physical geometry of the simulation (so as
275
// not to subdivide any FSR). It is acceptable for a spatial tally region
276
// to contain multiple FSRs, but not the other way around.
277

278
// TODO: In future work, it would be preferable to offer a more general
279
// (but perhaps slightly more expensive) option for handling arbitrary
280
// spatial tallies that would be allowed to subdivide FSRs.
281

282
// Besides generating the mapping structure, this function also keeps track
283
// of whether or not all flat source regions have been hit yet. This is
284
// required, as there is no guarantee that all flat source regions will
285
// be hit every iteration, such that in the first few iterations some FSRs
286
// may not have a known position within them yet to facilitate mapping to
287
// spatial tally filters. However, after several iterations, if all FSRs
288
// have been hit and have had a tally map generated, then this status will
289
// be passed back to the caller to alert them that this function doesn't
290
// need to be called for the remainder of the simulation.
291

292
void FlatSourceDomain::convert_source_regions_to_tallies()
28✔
293
{
294
  openmc::simulation::time_tallies.start();
28✔
295

296
  // Tracks if we've generated a mapping yet for all source regions.
297
  bool all_source_regions_mapped = true;
28✔
298

299
// Attempt to generate mapping for all source regions
300
#pragma omp parallel for
12✔
301
  for (int sr = 0; sr < n_source_regions_; sr++) {
3,920✔
302

303
    // If this source region has not been hit by a ray yet, then
304
    // we aren't going to be able to map it, so skip it.
305
    if (!position_recorded_[sr]) {
3,904✔
306
      all_source_regions_mapped = false;
307
      continue;
308
    }
309

310
    // A particle located at the recorded midpoint of a ray
311
    // crossing through this source region is used to estabilish
312
    // the spatial location of the source region
313
    Particle p;
3,904✔
314
    p.r() = position_[sr];
3,904✔
315
    p.r_last() = position_[sr];
3,904✔
316
    bool found = exhaustive_find_cell(p);
3,904✔
317

318
    // Loop over energy groups (so as to support energy filters)
319
    for (int g = 0; g < negroups_; g++) {
31,232✔
320

321
      // Set particle to the current energy
322
      p.g() = g;
27,328✔
323
      p.g_last() = g;
27,328✔
324
      p.E() = data::mg.energy_bin_avg_[p.g()];
27,328✔
325
      p.E_last() = p.E();
27,328✔
326

327
      int64_t source_element = sr * negroups_ + g;
27,328✔
328

329
      // If this task has already been populated, we don't need to do
330
      // it again.
331
      if (tally_task_[source_element].size() > 0) {
27,328✔
332
        continue;
333
      }
334

335
      // Loop over all active tallies. This logic is essentially identical
336
      // to what happens when scanning for applicable tallies during
337
      // MC transport.
338
      for (auto i_tally : model::active_tallies) {
54,656✔
339
        Tally& tally {*model::tallies[i_tally]};
27,328✔
340

341
        // Initialize an iterator over valid filter bin combinations.
342
        // If there are no valid combinations, use a continue statement
343
        // to ensure we skip the assume_separate break below.
344
        auto filter_iter = FilterBinIter(tally, p);
27,328✔
345
        auto end = FilterBinIter(tally, true, &p.filter_matches());
27,328✔
346
        if (filter_iter == end)
27,328✔
347
          continue;
348

349
        // Loop over filter bins.
350
        for (; filter_iter != end; ++filter_iter) {
54,656✔
351
          auto filter_index = filter_iter.index_;
27,328✔
352
          auto filter_weight = filter_iter.weight_;
27,328✔
353

354
          // Loop over scores
355
          for (auto score_index = 0; score_index < tally.scores_.size();
109,312✔
356
               score_index++) {
357
            auto score_bin = tally.scores_[score_index];
81,984✔
358
            // If a valid tally, filter, and score cobination has been found,
359
            // then add it to the list of tally tasks for this source element.
360
            tally_task_[source_element].emplace_back(
81,984✔
361
              i_tally, filter_index, score_index, score_bin);
362
          }
363
        }
364
      }
365
      // Reset all the filter matches for the next tally event.
366
      for (auto& match : p.filter_matches())
81,984✔
367
        match.bins_present_ = false;
54,656✔
368
    }
369
  }
3,904✔
370
  openmc::simulation::time_tallies.stop();
28✔
371

372
  mapped_all_tallies_ = all_source_regions_mapped;
28✔
373
}
28✔
374

375
// Tallying in random ray is not done directly during transport, rather,
376
// it is done only once after each power iteration. This is made possible
377
// by way of a mapping data structure that relates spatial source regions
378
// (FSRs) to tally/filter/score combinations. The mechanism by which the
379
// mapping is done (and the limitations incurred) is documented in the
380
// "convert_source_regions_to_tallies()" function comments above. The present
381
// tally function simply traverses the mapping data structure and executes
382
// the scoring operations to OpenMC's native tally result arrays.
383

384
void FlatSourceDomain::random_ray_tally() const
140✔
385
{
386
  openmc::simulation::time_tallies.start();
140✔
387

388
  // Temperature and angle indices, if using multiple temperature
389
  // data sets and/or anisotropic data sets.
390
  // TODO: Currently assumes we are only using single temp/single
391
  // angle data.
392
  const int t = 0;
140✔
393
  const int a = 0;
140✔
394

395
// We loop over all source regions and energy groups. For each
396
// element, we check if there are any scores needed and apply
397
// them.
398
#pragma omp parallel for
60✔
399
  for (int sr = 0; sr < n_source_regions_; sr++) {
19,600✔
400
    double volume = volume_[sr];
19,520✔
401
    double material = material_[sr];
19,520✔
402
    for (int g = 0; g < negroups_; g++) {
156,160✔
403
      int idx = sr * negroups_ + g;
136,640✔
404
      double flux = scalar_flux_new_[idx] * volume;
136,640✔
405
      for (auto& task : tally_task_[idx]) {
546,560✔
406
        double score;
407
        switch (task.score_type) {
409,920✔
408

409
        case SCORE_FLUX:
136,640✔
410
          score = flux;
136,640✔
411
          break;
136,640✔
412

413
        case SCORE_TOTAL:
414
          score = flux * data::mg.macro_xs_[material].get_xs(
415
                           MgxsType::TOTAL, g, NULL, NULL, NULL, t, a);
416
          break;
417

418
        case SCORE_FISSION:
136,640✔
419
          score = flux * data::mg.macro_xs_[material].get_xs(
136,640✔
420
                           MgxsType::FISSION, g, NULL, NULL, NULL, t, a);
421
          break;
136,640✔
422

423
        case SCORE_NU_FISSION:
136,640✔
424
          score = flux * data::mg.macro_xs_[material].get_xs(
136,640✔
425
                           MgxsType::NU_FISSION, g, NULL, NULL, NULL, t, a);
426
          break;
136,640✔
427

428
        case SCORE_EVENTS:
429
          score = 1.0;
430
          break;
431

432
        default:
433
          fatal_error("Invalid score specified in tallies.xml. Only flux, "
434
                      "total, fission, nu-fission, and events are supported in "
435
                      "random ray mode.");
436
          break;
437
        }
438
        Tally& tally {*model::tallies[task.tally_idx]};
409,920✔
439
#pragma omp atomic
440
        tally.results_(task.filter_idx, task.score_idx, TallyResult::VALUE) +=
409,920✔
441
          score;
442
      }
443
    }
444
  }
445
}
140✔
446

447
void FlatSourceDomain::all_reduce_replicated_source_regions()
380✔
448
{
449
#ifdef OPENMC_MPI
450

451
  // If we only have 1 MPI rank, no need
452
  // to reduce anything.
453
  if (mpi::n_procs <= 1)
200✔
454
    return;
455

456
  simulation::time_bank_sendrecv.start();
200✔
457

458
  // The "position_recorded" variable needs to be allreduced (and maxed),
459
  // as whether or not a cell was hit will affect some decisions in how the
460
  // source is calculated in the next iteration so as to avoid dividing
461
  // by zero. We take the max rather than the sum as the hit values are
462
  // expected to be zero or 1.
463
  MPI_Allreduce(MPI_IN_PLACE, position_recorded_.data(), n_source_regions_,
200✔
464
    MPI_INT, MPI_MAX, mpi::intracomm);
465

466
  // The position variable is more complicated to reduce than the others,
467
  // as we do not want the sum of all positions in each cell, rather, we
468
  // want to just pick any single valid position. Thus, we perform a gather
469
  // and then pick the first valid position we find for all source regions
470
  // that have had a position recorded. This operation does not need to
471
  // be broadcast back to other ranks, as this value is only used for the
472
  // tally conversion operation, which is only performed on the master rank.
473
  // While this is expensive, it only needs to be done for active batches,
474
  // and only if we have not mapped all the tallies yet. Once tallies are
475
  // fully mapped, then the position vector is fully populated, so this
476
  // operation can be skipped.
477

478
  // First, we broadcast the fully mapped tally status variable so that
479
  // all ranks are on the same page
480
  int mapped_all_tallies_i = static_cast<int>(mapped_all_tallies_);
200✔
481
  MPI_Bcast(&mapped_all_tallies_i, 1, MPI_INT, 0, mpi::intracomm);
200✔
482

483
  // Then, we perform the gather of position data, if needed
484
  if (simulation::current_batch > settings::n_inactive &&
200✔
485
      !mapped_all_tallies_i) {
100✔
486

487
    // Master rank will gather results and pick valid positions
488
    if (mpi::master) {
20✔
489
      // Initialize temporary vector for receiving positions
490
      vector<vector<Position>> all_position;
10✔
491
      all_position.resize(mpi::n_procs);
10✔
492
      for (int i = 0; i < mpi::n_procs; i++) {
30✔
493
        all_position[i].resize(n_source_regions_);
20✔
494
      }
495

496
      // Copy master rank data into gathered vector for convenience
497
      all_position[0] = position_;
10✔
498

499
      // Receive all data into gather vector
500
      for (int i = 1; i < mpi::n_procs; i++) {
20✔
501
        MPI_Recv(all_position[i].data(), n_source_regions_ * 3, MPI_DOUBLE, i,
10✔
502
          0, mpi::intracomm, MPI_STATUS_IGNORE);
503
      }
504

505
      // Scan through gathered data and pick first valid cell posiiton
506
      for (int sr = 0; sr < n_source_regions_; sr++) {
2,450✔
507
        if (position_recorded_[sr] == 1) {
2,440✔
508
          for (int i = 0; i < mpi::n_procs; i++) {
2,440✔
509
            if (all_position[i][sr].x != 0.0 || all_position[i][sr].y != 0.0 ||
2,440✔
510
                all_position[i][sr].z != 0.0) {
511
              position_[sr] = all_position[i][sr];
2,440✔
512
              break;
2,440✔
513
            }
514
          }
515
        }
516
      }
517
    } else {
10✔
518
      // Other ranks just send in their data
519
      MPI_Send(position_.data(), n_source_regions_ * 3, MPI_DOUBLE, 0, 0,
10✔
520
        mpi::intracomm);
521
    }
522
  }
523

524
  // For the rest of the source region data, we simply perform an all reduce,
525
  // as these values will be needed on all ranks for transport during the
526
  // next iteration.
527
  MPI_Allreduce(MPI_IN_PLACE, volume_.data(), n_source_regions_, MPI_DOUBLE,
200✔
528
    MPI_SUM, mpi::intracomm);
529

530
  MPI_Allreduce(MPI_IN_PLACE, was_hit_.data(), n_source_regions_, MPI_INT,
200✔
531
    MPI_SUM, mpi::intracomm);
532

533
  MPI_Allreduce(MPI_IN_PLACE, scalar_flux_new_.data(), n_source_elements_,
200✔
534
    MPI_FLOAT, MPI_SUM, mpi::intracomm);
535

536
  simulation::time_bank_sendrecv.stop();
200✔
537
#endif
538
}
180✔
539

540
// Outputs all basic material, FSR ID, multigroup flux, and
541
// fission source data to .vtk file that can be directly
542
// loaded and displayed by Paraview. Note that .vtk binary
543
// files require big endian byte ordering, so endianness
544
// is checked and flipped if necessary.
NEW
545
void FlatSourceDomain::output_to_vtk() const
×
546
{
547
  // Rename .h5 plot filename(s) to .vtk filenames
NEW
548
  for (int p = 0; p < model::plots.size(); p++) {
×
NEW
549
    PlottableInterface* plot = model::plots[p].get();
×
NEW
550
    plot->path_plot() =
×
NEW
551
      plot->path_plot().substr(0, plot->path_plot().find_last_of('.')) + ".vtk";
×
552
  }
553

554
  // Print header information
NEW
555
  print_plot();
×
556

557
  // Outer loop over plots
NEW
558
  for (int p = 0; p < model::plots.size(); p++) {
×
559

560
    // Get handle to OpenMC plot object and extract params
NEW
561
    Plot* openmc_plot = dynamic_cast<Plot*>(model::plots[p].get());
×
562

563
    // Random ray plots only support voxel plots
NEW
564
    if (!openmc_plot) {
×
NEW
565
      warning(fmt::format("Plot {} is invalid plot type -- only voxel plotting "
×
566
                          "is allowed in random ray mode.",
567
        p));
NEW
568
      continue;
×
NEW
569
    } else if (openmc_plot->type_ != Plot::PlotType::voxel) {
×
NEW
570
      warning(fmt::format("Plot {} is invalid plot type -- only voxel plotting "
×
571
                          "is allowed in random ray mode.",
572
        p));
NEW
573
      continue;
×
574
    }
575

NEW
576
    int Nx = openmc_plot->pixels_[0];
×
NEW
577
    int Ny = openmc_plot->pixels_[1];
×
NEW
578
    int Nz = openmc_plot->pixels_[2];
×
NEW
579
    Position origin = openmc_plot->origin_;
×
NEW
580
    Position width = openmc_plot->width_;
×
NEW
581
    Position ll = origin - width / 2.0;
×
NEW
582
    double x_delta = width.x / Nx;
×
NEW
583
    double y_delta = width.y / Ny;
×
NEW
584
    double z_delta = width.z / Nz;
×
NEW
585
    std::string filename = openmc_plot->path_plot();
×
586

587
    // Perform sanity checks on file size
NEW
588
    uint64_t bytes = Nx * Ny * Nz * (negroups_ + 1 + 1 + 1) * sizeof(float);
×
NEW
589
    write_message(5, "Processing plot {}: {}... (Estimated size is {} MB)",
×
NEW
590
      openmc_plot->id(), filename, bytes / 1.0e6);
×
NEW
591
    if (bytes / 1.0e9 > 1.0) {
×
NEW
592
      warning("Voxel plot specification is very large (>1 GB). Plotting may be "
×
593
              "slow.");
NEW
594
    } else if (bytes / 1.0e9 > 100.0) {
×
NEW
595
      fatal_error("Voxel plot specification is too large (>100 GB). Exiting.");
×
596
    }
597

598
    // Relate voxel spatial locations to random ray source regions
NEW
599
    vector<int> voxel_indices(Nx * Ny * Nz);
×
600

601
#pragma omp parallel for collapse(3)
602
    for (int z = 0; z < Nz; z++) {
603
      for (int y = 0; y < Ny; y++) {
604
        for (int x = 0; x < Nx; x++) {
605
          Position sample;
606
          sample.z = ll.z + z_delta / 2.0 + z * z_delta;
607
          sample.y = ll.y + y_delta / 2.0 + y * y_delta;
608
          sample.x = ll.x + x_delta / 2.0 + x * x_delta;
609
          Particle p;
610
          p.r() = sample;
611
          bool found = exhaustive_find_cell(p);
612
          int i_cell = p.lowest_coord().cell;
613
          int64_t source_region_idx =
614
            source_region_offsets_[i_cell] + p.cell_instance();
615
          voxel_indices[z * Ny * Nx + y * Nx + x] = source_region_idx;
616
        }
617
      }
618
    }
619

620
    // Open file for writing
NEW
621
    std::FILE* plot = std::fopen(filename.c_str(), "wb");
×
622

623
    // Write vtk metadata
NEW
624
    std::fprintf(plot, "# vtk DataFile Version 2.0\n");
×
NEW
625
    std::fprintf(plot, "Dataset File\n");
×
NEW
626
    std::fprintf(plot, "BINARY\n");
×
NEW
627
    std::fprintf(plot, "DATASET STRUCTURED_POINTS\n");
×
NEW
628
    std::fprintf(plot, "DIMENSIONS %d %d %d\n", Nx, Ny, Nz);
×
NEW
629
    std::fprintf(plot, "ORIGIN 0 0 0\n");
×
NEW
630
    std::fprintf(plot, "SPACING %lf %lf %lf\n", x_delta, y_delta, z_delta);
×
NEW
631
    std::fprintf(plot, "POINT_DATA %d\n", Nx * Ny * Nz);
×
632

633
    // Plot multigroup flux data
NEW
634
    for (int g = 0; g < negroups_; g++) {
×
NEW
635
      std::fprintf(plot, "SCALARS flux_group_%d float\n", g);
×
NEW
636
      std::fprintf(plot, "LOOKUP_TABLE default\n");
×
NEW
637
      for (int fsr : voxel_indices) {
×
NEW
638
        int64_t source_element = fsr * negroups_ + g;
×
NEW
639
        float flux = scalar_flux_final_[source_element];
×
NEW
640
        flux /= (settings::n_batches - settings::n_inactive);
×
NEW
641
        flux = convert_to_big_endian<float>(flux);
×
NEW
642
        std::fwrite(&flux, sizeof(float), 1, plot);
×
643
      }
644
    }
645

646
    // Plot FSRs
NEW
647
    std::fprintf(plot, "SCALARS FSRs float\n");
×
NEW
648
    std::fprintf(plot, "LOOKUP_TABLE default\n");
×
NEW
649
    for (int fsr : voxel_indices) {
×
NEW
650
      float value = future_prn(10, fsr);
×
NEW
651
      value = convert_to_big_endian<float>(value);
×
NEW
652
      std::fwrite(&value, sizeof(float), 1, plot);
×
653
    }
654

655
    // Plot Materials
NEW
656
    std::fprintf(plot, "SCALARS Materials int\n");
×
NEW
657
    std::fprintf(plot, "LOOKUP_TABLE default\n");
×
NEW
658
    for (int fsr : voxel_indices) {
×
NEW
659
      int mat = material_[fsr];
×
NEW
660
      mat = convert_to_big_endian<int>(mat);
×
NEW
661
      std::fwrite(&mat, sizeof(int), 1, plot);
×
662
    }
663

664
    // Plot fission source
NEW
665
    std::fprintf(plot, "SCALARS total_fission_source float\n");
×
NEW
666
    std::fprintf(plot, "LOOKUP_TABLE default\n");
×
NEW
667
    for (int fsr : voxel_indices) {
×
NEW
668
      float total_fission = 0.0;
×
NEW
669
      int mat = material_[fsr];
×
NEW
670
      for (int g = 0; g < negroups_; g++) {
×
NEW
671
        int64_t source_element = fsr * negroups_ + g;
×
NEW
672
        float flux = scalar_flux_final_[source_element];
×
NEW
673
        flux /= (settings::n_batches - settings::n_inactive);
×
NEW
674
        float Sigma_f = data::mg.macro_xs_[mat].get_xs(
×
NEW
675
          MgxsType::FISSION, g, nullptr, nullptr, nullptr, 0, 0);
×
NEW
676
        total_fission += Sigma_f * flux;
×
677
      }
NEW
678
      total_fission = convert_to_big_endian<float>(total_fission);
×
NEW
679
      std::fwrite(&total_fission, sizeof(float), 1, plot);
×
680
    }
681

NEW
682
    std::fclose(plot);
×
683
  }
684
}
685

686
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc