• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 21031170065

15 Jan 2026 12:23PM UTC coverage: 80.796% (-1.2%) from 82.044%
21031170065

Pull #3732

github

web-flow
Merge 6dea7cf60 into 179048b80
Pull Request #3732: Volume Calculation enhancement including refactoring and real-valued scoring implementation

15979 of 22241 branches covered (71.84%)

Branch coverage included in aggregate %.

230 of 256 new or added lines in 2 files covered. (89.84%)

1019 existing lines in 51 files now uncovered.

53676 of 63970 relevant lines covered (83.91%)

13908816.79 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

88.7
/src/volume_calc.cpp
1
#include "openmc/volume_calc.h"
2

3
#include "openmc/capi.h"
4
#include "openmc/cell.h"
5
#include "openmc/constants.h"
6
#include "openmc/distribution_multi.h"
7
#include "openmc/error.h"
8
#include "openmc/geometry.h"
9
#include "openmc/hdf5_interface.h"
10
#include "openmc/material.h"
11
#include "openmc/message_passing.h"
12
#include "openmc/mgxs_interface.h"
13
#include "openmc/nuclide.h"
14
#include "openmc/openmp_interface.h"
15
#include "openmc/output.h"
16
#include "openmc/random_dist.h"
17
#include "openmc/random_lcg.h"
18
#include "openmc/settings.h"
19
#include "openmc/timer.h"
20
#include "openmc/xml_interface.h"
21

22
#include "xtensor/xadapt.hpp"
23
#include "xtensor/xview.hpp"
24
#include <fmt/core.h>
25

26
#include <algorithm> // for copy
27
#include <cmath>     // for pow, sqrt
28
#include <unordered_set>
29

30
namespace openmc {
31

32
//==============================================================================
33
// Global variables
34
//==============================================================================
35

36
namespace model {
37
vector<VolumeCalculation> volume_calcs;
38
} // namespace model
39

40
#ifdef OPENMC_MPI
41
static MPI_Datatype mpi_vol_results;  //!< MPI struct for CalcResults
42
static MPI_Datatype mpi_volume_tally; //!< MPI struct for VolTally
43
#endif
44

45
//==============================================================================
46
// VolumeCalculation implementation
47
//==============================================================================
48

49
VolumeCalculation::VolumeCalculation(pugi::xml_node node)
50✔
50
{
51
  // Read domain type (cell, material or universe)
52
  std::string domain_type = get_node_value(node, "domain_type");
50✔
53
  if (domain_type == "cell") {
50✔
54
    domain_type_ = TallyDomain::CELL;
22✔
55
  } else if (domain_type == "material") {
28✔
56
    domain_type_ = TallyDomain::MATERIAL;
20✔
57
  } else if (domain_type == "universe") {
8!
58
    domain_type_ = TallyDomain::UNIVERSE;
8✔
59
  } else {
60
    fatal_error(std::string("Unrecognized domain type for stochastic "
×
61
                            "volume calculation: " +
62
                            domain_type));
63
  }
64

65
  // Read domain IDs, bounding corodinates and number of samples
66
  domain_ids_ = get_node_array<int>(node, "domain_ids");
50✔
67
  lower_left_ = get_node_array<double>(node, "lower_left");
50✔
68
  upper_right_ = get_node_array<double>(node, "upper_right");
50✔
69
  n_samples_ = std::stoull(get_node_value(node, "samples"));
50✔
70

71
  // Determine volume of bounding box
72
  const Position d {upper_right_ - lower_left_};
50✔
73
  volume_sample_ = d.x * d.y * d.z;
50✔
74

75
  if (check_for_node(node, "threshold")) {
50✔
76
    pugi::xml_node threshold_node = node.child("threshold");
16✔
77

78
    threshold_ = std::stod(get_node_value(threshold_node, "threshold"));
16✔
79
    if (threshold_ <= 0.0) {
16!
80
      fatal_error(fmt::format("Invalid error threshold {} provided for a "
×
81
                              "volume calculation.",
82
        threshold_));
×
83
    }
84

85
    // Prior calculation of the trigger volume fractions-related values t'
86
    // deriviated from the given volumes-related values t to prevent excessive
87
    // repeated computations during Monte Carlo execution. The values of t' are
88
    // computed via the sample mean \bar{x} and the adjusted sample variance
89
    // s^2.
90
    std::string tmp = get_node_value(threshold_node, "type");
16✔
91
    if (tmp == "variance") {
16✔
92
      trigger_type_ = TriggerMetric::variance;
4✔
93
      // Condition: s^2 < t' / N
94
      // t' = s^2 = t / (V_b)^2, in sq. volume fraction units
95
      threshold_cnd_ = threshold_ / std::pow(volume_sample_, 2);
4✔
96
      ;
97
    } else if (tmp == "std_dev") {
12✔
98
      trigger_type_ = TriggerMetric::standard_deviation;
4✔
99
      // Condition: s^2 < t' / N
100
      // t' = s^2 = (t / V_b)^2, in sq. volume fraction units
101
      threshold_cnd_ = std::pow(threshold_ / volume_sample_, 2);
4✔
102
    } else if (tmp == "rel_err") {
8!
103
      trigger_type_ = TriggerMetric::relative_error;
8✔
104
      // Condition: s^2 / \bar{x}^2 < t' / N
105
      // t' = s^2 / \bar{x}^2 = t^2, in relative units
106
      threshold_cnd_ = threshold_ * threshold_;
8✔
107
    } else {
108
      fatal_error(fmt::format(
×
109
        "Invalid volume calculation trigger type '{}' provided.", tmp));
110
    }
111

112
    if (check_for_node(threshold_node, "max_iterations")) {
16✔
113
      max_iterations_ =
4✔
114
        std::stoi(get_node_value(threshold_node, "max_iterations"));
4✔
115
      if (max_iterations_ <= 0) {
4!
NEW
116
        fatal_error(fmt::format(
×
NEW
117
          "Invalid error max_iterations {} provided.", max_iterations_));
×
118
      }
119
    }
120
  }
16✔
121

122
  // Ensure there are no duplicates by copying elements to a set and then
123
  // comparing the length with the original vector
124
  std::unordered_set<int> unique_ids(domain_ids_.cbegin(), domain_ids_.cend());
50✔
125
  if (unique_ids.size() != domain_ids_.size()) {
50!
126
    throw std::runtime_error {"Domain IDs for a volume calculation "
×
127
                              "must be unique."};
×
128
  }
129
}
50✔
130

131
void VolumeCalculation::execute(CalcResults& master_results) const
50✔
132
{
133
  // Check to make sure domain IDs are valid
134
  for (auto uid : domain_ids_) {
134✔
135
    switch (domain_type_) {
90!
136
    case TallyDomain::CELL:
50✔
137
      if (model::cell_map.find(uid) == model::cell_map.end()) {
50✔
138
        throw std::runtime_error {fmt::format(
4✔
139
          "Cell {} in volume calculation does not exist in geometry.", uid)};
4✔
140
      }
141
      break;
48✔
142
    case TallyDomain::MATERIAL:
32✔
143
      if (model::material_map.find(uid) == model::material_map.end()) {
32✔
144
        throw std::runtime_error {fmt::format(
4✔
145
          "Material {} in volume calculation does not exist in geometry.",
146
          uid)};
4✔
147
      }
148
      break;
30✔
149
    case TallyDomain::UNIVERSE:
8✔
150
      if (model::universe_map.find(uid) == model::universe_map.end()) {
8✔
151
        throw std::runtime_error {fmt::format(
4✔
152
          "Universe {} in volume calculation does not exist in geometry.",
153
          uid)};
4✔
154
      }
155
    }
156
  }
157

158
  // Shared data that is collected from all threads
159
  const int n_domains = domain_ids_.size();
44✔
160

161
  // Divide work over MPI processes
162
  uint64_t min_samples = n_samples_ / mpi::n_procs;
44✔
163
  uint64_t remainder = n_samples_ % mpi::n_procs;
44✔
164
  uint64_t i_start, i_end;
165
  if (mpi::rank < remainder) {
44!
166
    i_start = (min_samples + 1) * mpi::rank;
×
167
    i_end = i_start + min_samples + 1;
×
168
  } else {
169
    i_start =
44✔
170
      (min_samples + 1) * remainder + (mpi::rank - remainder) * min_samples;
44✔
171
    i_end = i_start + min_samples;
44✔
172
  }
173

174
  while (true) {
175

176
#pragma omp parallel
177
    {
178
      // Temporary variables that are private to each thread
179
      CalcResults results(*this);
2,612✔
180
      results.sampling_time.start();
2,612✔
181
      Particle p;
2,612✔
182

183
      // Sample locations and count scores
184
#pragma omp for
185
      for (size_t i = i_start; i < i_end; i++) {
1,475,032✔
186
        uint64_t id = master_results.iterations * n_samples_ + i;
1,472,420✔
187
        uint64_t seed = init_seed(id, STREAM_VOLUME);
1,472,420✔
188

189
        p.n_coord() = 1;
1,472,420✔
190
        p.r() = {uniform_distribution(lower_left_.x, upper_right_.x, &seed),
2,944,840✔
191
          uniform_distribution(lower_left_.y, upper_right_.y, &seed),
1,472,420✔
192
          uniform_distribution(lower_left_.z, upper_right_.z, &seed)};
1,472,420✔
193
        constexpr double sqrt3_1 = 1. / std::sqrt(3.);
1,472,420✔
194
        p.u() = {sqrt3_1, sqrt3_1, sqrt3_1};
1,472,420✔
195

196
        // TO REVIEWER: THE SWITCH IS TRANSFERED TO score_hit()
197
        if (exhaustive_find_cell(p))
1,472,420✔
198
          this->score_hit(p, results);
1,093,294✔
199

200
        results.n_samples++;
1,472,420✔
201

202
        // This passing across all tallies after each sample can be
203
        // inefficient for the case of large number of domains and small
204
        // size of batch, but the batch size is currently assigned to 1
205
        // for keep the input format being unchanged
206
        const double batch_size_1 = 1. / static_cast<double>(1);
1,472,420✔
207
        for (auto& vt : results.vol_tallies) {
4,654,480✔
208
          for (auto& vol_tally : vt) {
9,842,566✔
209
            vol_tally.finalize_batch(batch_size_1);
6,660,506✔
210
          }
211
        }
212
      } // sample/batch loop
213

214
      results.sampling_time.stop();
2,612✔
215
      results.cost = results.sampling_time.elapsed();
2,612✔
216

217
      // At this point, each thread has its own volume tallies lists and we
218
      // now need to reduce them. OpenMP is not nearly smart enough to do
219
      // this on its own, so we have to manually reduce them
220
      reduce_results(results, master_results);
2,612✔
221
    } // omp parallel
2,612✔
222

223
    // bump iteration counter
224
    master_results.iterations++;
2,612✔
225

226
    // TO REVIEWER: THE MPI-RELATED PART IS MOVED TO collect_MPI()
227
#ifdef OPENMC_MPI
228
    master_results.collect_MPI(); // collect results to master process
229
#endif
230
    // Process volume estimation results in master process for the trigger state
231
    // determination
232
    bool stop_calc =
2,612✔
233
      mpi::master && (trigger_type_ == TriggerMetric::not_active ||
5,196!
234
                       master_results.iterations == max_iterations_);
2,584✔
235

236
    if (!stop_calc) {
2,612✔
237
      // Compute current trigger state across totals (0th elements) only
238
      for (const auto& vt : master_results.vol_tallies) {
2,612✔
239
        stop_calc = vt[0].trigger_state(
2,600✔
240
          trigger_type_, threshold_cnd_, master_results.n_samples);
2,600✔
241
        if (!stop_calc)
2,600✔
242
          break;
2,568✔
243
      }
244
    }
245

246
#ifdef OPENMC_MPI
247
    // Send the state of calculation continuation just obtained in master
248
    // process to all processes
249
    MPI_Bcast(&stop_calc, 1, MPI_CXX_BOOL, 0, mpi::intracomm);
250
#endif
251

252
    if (!stop_calc)
2,612✔
253
      continue; // while loop
2,568✔
254
    // No trigger is applied or the trigger condition is satisfied, we're
255
    // done
256

257
    if (mpi::master) {
44!
258
      // Normalize all results on the bounding primitive volume and compute
259
      // stddev
260
      for (auto& vt : master_results.vol_tallies) {
128✔
261
        for (auto& vol_tally : vt) {
256✔
262
          vol_tally.score_acc = get_tally_results(
172✔
263
            master_results.n_samples, volume_sample_, vol_tally);
172✔
264
        }
265
      }
266

267
      // Compute nuclides
268
      for (int i_domain = 0; i_domain < n_domains; ++i_domain) {
128✔
269
        // Create 2D array to store atoms/uncertainty for each nuclide. Later,
270
        // this is compressed into vectors storing only those nuclides that are
271
        // non-zero
272
        auto n_nuc =
273
          settings::run_CE ? data::nuclides.size() : data::mg.nuclides_.size();
84✔
274
        xt::xtensor<double, 2> atoms({n_nuc, 2}, 0.0);
84✔
275

276
        for (int j = 0; j < master_results.vol_tallies[i_domain].size(); ++j) {
256✔
277
          const int i_material = master_results.vol_tallies[i_domain][j].index;
172✔
278
          if (i_material == MATERIAL_VOID || i_material == _INDEX_TOTAL)
172✔
279
            continue;
86✔
280

281
          const auto& mat = model::materials[i_material];
86✔
282
          for (int k = 0; k < mat->nuclide_.size(); ++k) {
336✔
283
            auto& volume = master_results.vol_tallies[i_domain][j].score_acc;
250✔
284
            // Collect calculated nuclide amounts N [atoms] and stddev as
285
            // N = V [cm^3] * \rho [atoms/b-cm] * 1.e24 [b-cm/cm^3]
286
            atoms(mat->nuclide_[k], 0) +=
250✔
287
              volume[0] * mat->atom_density(k) * 1.0e24;
250✔
288
            atoms(mat->nuclide_[k], 1) +=
250✔
289
              volume[1] * mat->atom_density(k) * 1.0e24;
250✔
290
          }
291
        }
292

293
        // Get reference to result for this domain
294
        auto& result {master_results.nuc_results[i_domain]};
84✔
295
        // Convert full arrays to vectors
296
        for (int j = 0; j < n_nuc; ++j) {
608✔
297
          if (atoms(j, 0) > 0.0) {
524✔
298
            result.nuclides.push_back(j);
250✔
299
            result.atoms.push_back(atoms(j, 0));
250✔
300
            result.uncertainty.push_back(atoms(j, 1));
250✔
301
          }
302
        }
303
      } // end domains loop
84✔
304
    }
305

306
    return;
44✔
307

308
  } // end while
2,568✔
309
}
310

311
void VolumeCalculation::show_vol_stat(
132✔
312
  const std::string label, const std::string units, const double value) const
313
{
314
  fmt::print("{0:<20} = {2:10.4e} {1:<}\n", label, units, value);
132✔
315
}
132✔
316

317
void VolumeCalculation::show_volume(const std::string domain_type,
84✔
318
  const int domain_id, const std::string region_name, const double mean,
319
  const double stddev) const
320
{
321
  fmt::print(" {0:>9}{1:>6}: {2:10.4e} +/- {3:10.4e} cm^3", domain_type,
84✔
322
    domain_id, mean, stddev);
323
  if (!region_name.empty()) {
84✔
324
    fmt::print("   //{:<}", region_name);
12✔
325
  }
326
  fmt::print("\n");
84✔
327
}
84✔
328

329
// TO REVIEWER: I/O BLOCK BEGINS, I WOULD PREFFER TO PUT IT AT THE END OF FILE
330
// BUT KEEPT THE FILE STRUCTURE UNCHANGED
331
void VolumeCalculation::show_results(const CalcResults& results) const
44✔
332
{
333
  // Show tracing statistics
334
  write_message(5, " ");
44✔
335
  show_vol_stat(
44✔
336
    "Total sample size", "", static_cast<double>(results.n_samples));
44✔
337
  show_vol_stat("Running cost", "thread-sec", results.cost);
44✔
338
  show_vol_stat("Cost of hitting", "thread-sec/hit",
44✔
339
    static_cast<double>(results.cost) / static_cast<double>(results.n_samples));
44✔
340
  write_message(5, " ");
44✔
341

342
  std::string domain_type;
44✔
343
  if (domain_type_ == TallyDomain::CELL) {
44✔
344
    domain_type = "Cell";
20✔
345
  } else if (domain_type_ == TallyDomain::MATERIAL) {
24✔
346
    domain_type = "Material";
18✔
347
  } else {
348
    domain_type = "Universe";
6✔
349
  }
350

351
  // Display domain volumes
352
  for (int j = 0; j < domain_ids_.size(); j++) {
128✔
353
    std::string region_name {""};
84✔
354
    if (domain_type_ == TallyDomain::CELL) {
84✔
355
      int cell_idx = model::cell_map[domain_ids_[j]];
48✔
356
      region_name = model::cells[cell_idx]->name();
48✔
357
    } else if (domain_type_ == TallyDomain::MATERIAL) {
36✔
358
      int mat_idx = model::material_map[domain_ids_[j]];
30✔
359
      region_name = model::materials[mat_idx]->name();
30✔
360
    }
361
    if (region_name.size())
84✔
362
      region_name.insert(0, " "); // prepend space for formatting
12✔
363

364
    show_volume(domain_type, domain_ids_[j], region_name,
84✔
365
      results.vol_tallies[j][0].score_acc[0],
84✔
366
      results.vol_tallies[j][0].score_acc[1]);
84✔
367
  }
84✔
368
  write_message(4, " "); // Blank line afer results printed
44✔
369
}
44✔
370

371
void VolumeCalculation::to_hdf5(
44✔
372
  const std::string& filename, const CalcResults& results) const
373
{
374
  // Create HDF5 file
375
  hid_t file_id = file_open(filename, 'w');
44✔
376

377
  // Write header info
378
  write_attribute(file_id, "filetype", "volume");
44✔
379
  write_attribute(file_id, "version", VERSION_VOLUME);
44✔
380
  write_attribute(file_id, "openmc_version", VERSION);
44✔
381
#ifdef GIT_SHA1
382
  write_attribute(file_id, "git_sha1", GIT_SHA1);
383
#endif
384

385
  // Write current date and time
386
  write_attribute(file_id, "date_and_time", time_stamp());
44✔
387

388
  // Write basic metadata
389
  write_attribute(file_id, "samples", n_samples_);
44✔
390
  write_attribute(file_id, "lower_left", lower_left_);
44✔
391
  write_attribute(file_id, "upper_right", upper_right_);
44✔
392
  // Write trigger info
393
  if (trigger_type_ != TriggerMetric::not_active) {
44✔
394
    write_attribute(file_id, "iterations", results.iterations);
16✔
395
    write_attribute(file_id, "threshold", threshold_);
16✔
396
    std::string trigger_str;
16✔
397
    switch (trigger_type_) {
16!
398
    case TriggerMetric::variance:
4✔
399
      trigger_str = "variance";
4✔
400
      break;
4✔
401
    case TriggerMetric::standard_deviation:
4✔
402
      trigger_str = "std_dev";
4✔
403
      break;
4✔
404
    case TriggerMetric::relative_error:
8✔
405
      trigger_str = "rel_err";
8✔
406
      break;
8✔
407
    default:
×
408
      break;
×
409
    }
410
    write_attribute(file_id, "trigger_type", trigger_str);
16✔
411
    // check max_iterations on default value
412
    if (max_iterations_ !=
16✔
413
        std::numeric_limits<decltype(max_iterations_)>::max())
16✔
414
      write_attribute(file_id, "max_iterations", max_iterations_);
4✔
415
  } else {
16✔
416
    write_attribute(file_id, "iterations", 1);
28✔
417
  }
418

419
  if (domain_type_ == TallyDomain::CELL) {
44✔
420
    write_attribute(file_id, "domain_type", "cell");
20✔
421
  } else if (domain_type_ == TallyDomain::MATERIAL) {
24✔
422
    write_attribute(file_id, "domain_type", "material");
18✔
423
  } else if (domain_type_ == TallyDomain::UNIVERSE) {
6!
424
    write_attribute(file_id, "domain_type", "universe");
6✔
425
  }
426

427
  for (int i = 0; i < domain_ids_.size(); ++i) {
128✔
428
    hid_t group_id =
429
      create_group(file_id, fmt::format("domain_{}", domain_ids_[i]));
168✔
430

431
    // Write volume for domain
432
    const auto& result {results.nuc_results[i]};
84✔
433
    write_dataset(group_id, "volume", results.vol_tallies[i][0].score_acc);
84✔
434

435
    // Create array of nuclide names from the vector
436
    auto n_nuc = result.nuclides.size();
84✔
437

438
    vector<std::string> nucnames;
84✔
439
    for (int i_nuc : result.nuclides) {
334✔
440
      nucnames.push_back(settings::run_CE ? data::nuclides[i_nuc]->name_
338✔
441
                                          : data::mg.nuclides_[i_nuc].name);
88✔
442
    }
443

444
    // Create array of total # of atoms with uncertainty for each nuclide
445
    xt::xtensor<double, 2> atom_data({n_nuc, 2});
84✔
446
    xt::view(atom_data, xt::all(), 0) = xt::adapt(result.atoms);
84✔
447
    xt::view(atom_data, xt::all(), 1) = xt::adapt(result.uncertainty);
84✔
448

449
    // Write results
450
    write_dataset(group_id, "nuclides", nucnames);
84✔
451
    write_dataset(group_id, "atoms", atom_data);
84✔
452

453
    close_group(group_id);
84✔
454
  }
84✔
455

456
  file_close(file_id);
44✔
457
}
44✔
458

459
// TO REVIEWER: THIS IS THE SAME check_hit()
460
void VolumeCalculation::check_hit(const int32_t i_material,
1,086,282✔
461
  const double contrib, vector<VolTally>& vol_tallies) const
462
{
463
  // Contribute to entire domain result tally
464
  vol_tallies[0].score += contrib;
1,086,282✔
465

466
  // Check if this material was previously hit and if so, contribute score
467
  for (int j = 1; j < vol_tallies.size(); j++) {
1,122,002✔
468
    if (vol_tallies[j].index == i_material) {
1,114,298✔
469
      vol_tallies[j].score += contrib;
1,078,578✔
470
      return;
1,078,578✔
471
    }
472
  }
473

474
  // The material was not previously hit, append an entry to the material
475
  // indices and hits lists
476
  vol_tallies.push_back(VolTally(i_material, contrib));
7,704✔
477
}
478

479
// TO REVIEWER: THIS IS AN EQUIVALENT OF THE reduce_indicies_hits() TEMPLATE
480
// FROM volume_calc.h
481
void VolumeCalculation::reduce_results(
2,612✔
482
  const CalcResults& local_results, CalcResults& results) const
483
{
484
  auto n_threads = num_threads();
2,612✔
485

486
  // Collect scalar variables
487
#pragma omp for ordered schedule(static, 1)
488
  for (int i = 0; i < n_threads; ++i) {
5,224✔
489
#pragma omp ordered
490
    {
491
      results.n_samples += local_results.n_samples;
2,612✔
492
      results.cost += local_results.cost;
2,612✔
493
    }
494
  }
495

496
  // Collect vectored domain-wise results
497
  for (int i_domain = 0; i_domain < domain_ids_.size(); ++i_domain) {
10,336✔
498
    const vector<VolTally>& local_vol_tall =
499
      local_results.vol_tallies[i_domain];
7,724✔
500
    vector<VolTally>& vol_tall = results.vol_tallies[i_domain];
7,724✔
501

502
#pragma omp for ordered schedule(static, 1)
503
    for (int i = 0; i < n_threads; ++i) {
15,448✔
504
#pragma omp ordered
505
      for (int j = 0; j < local_vol_tall.size(); ++j) {
23,152✔
506
        // Check if this material has been added to the master list and if
507
        // so, accumulate scores
508
        const auto ind {local_vol_tall[j].index};
15,428✔
509
        const auto it = std::find_if(vol_tall.begin(), vol_tall.end(),
15,428✔
510
          [ind](const VolTally& vt) { return vt.index == ind; });
23,050✔
511
        if (it == vol_tall.end()) {
15,428✔
512
          vol_tall.push_back(local_vol_tall[j]);
88✔
513
        } else {
514
          vol_tall[it - vol_tall.begin()].append_tally(local_vol_tall[j]);
15,340✔
515
        }
516
      }
517
    }
518
  }
519
}
2,612✔
520

521
#ifdef OPENMC_MPI
522
void VolumeCalculation::initialize_MPI_struct() const
523
{
524
  // This code is a slightly modified replica of initialize_mpi() from
525
  // initialize.cpp
526
  CalcResults cr(*this);
527
  MPI_Aint cr_disp[1], cr_d;
528
  MPI_Get_address(&cr, &cr_d);
529
  MPI_Get_address(&cr.cost, &cr_disp[0]);
530
  for (int i = 0; i < 1; i++) {
531
    cr_disp[i] -= cr_d;
532
  }
533

534
  int cr_blocks[] {1};
535
  MPI_Datatype cr_types[] {MPI_DOUBLE};
536
  MPI_Type_create_struct(1, cr_blocks, cr_disp, cr_types, &mpi_vol_results);
537
  MPI_Type_commit(&mpi_vol_results);
538

539
  VolTally vt;
540
  MPI_Aint vt_disp[3], vt_d;
541
  MPI_Get_address(&vt, &vt_d);
542
  MPI_Get_address(&vt.score, &vt_disp[0]);
543
  MPI_Get_address(&vt.score_acc, &vt_disp[1]);
544
  MPI_Get_address(&vt.index, &vt_disp[2]);
545
  for (int i = 0; i < 3; i++) {
546
    vt_disp[i] -= vt_d;
547
  }
548

549
  int vt_blocks[] {1, 2, 1};
550
  MPI_Datatype vt_types[] {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_INT};
551
  MPI_Type_create_struct(3, vt_blocks, vt_disp, vt_types, &mpi_volume_tally);
552
  MPI_Type_commit(&mpi_volume_tally);
553
}
554

555
void VolumeCalculation::delete_MPI_struct() const
556
{
557
  MPI_Type_free(&mpi_vol_results);
558
  MPI_Type_free(&mpi_volume_tally);
559
}
560

561
// TO REVIEWER: THIS IS AN EQUIVALENT OF THE RELATED MPI-INTERCHANGE BLOCK OF
562
// execute()
563
void VolumeCalculation::CalcResults::collect_MPI()
564
{
565
  vector<int> domain_sizes(vol_tallies.size());
566

567
  if (!mpi::master) {
568
    // n_domain + 2 MPI messages will be send in total to node mpi::master
569

570
    // To determination of an unique tag for each MPI message (as below)
571
    int mpi_offset = mpi::rank * (vol_tallies.size() + 2) + 2;
572

573
    // Pass root data of the struct
574
    MPI_Send(
575
      (void*)this, 1, mpi_vol_results, 0, mpi_offset - 2, mpi::intracomm);
576

577
    // Pass sizes of domain-wise data
578
    for (int i_domain = 0; i_domain < vol_tallies.size(); i_domain++) {
579
      domain_sizes[i_domain] = vol_tallies[i_domain].size();
580
    }
581

582
    MPI_Send(domain_sizes.data(), domain_sizes.size(), MPI_INT, 0,
583
      mpi_offset - 1, mpi::intracomm);
584

585
    // Pass domain-wise data of struct
586
    for (int i_domain = 0; i_domain < vol_tallies.size(); i_domain++) {
587
      MPI_Send(vol_tallies[i_domain].data(), domain_sizes[i_domain],
588
        mpi_volume_tally, 0, mpi_offset + i_domain, mpi::intracomm);
589
    }
590

591
    this->reset(); // Delete passed to main process data
592

593
  } else {
594

595
    // n_domain + 2 MPI messages will be recieved in total from node mpi::master
596

597
    for (int i_proc = 1; i_proc < mpi::n_procs; i_proc++) {
598

599
      CalcResults res_buff(*this); // temporary storage for recived data
600

601
      // To determination of an unique tag for each MPI message (as above)
602
      int mpi_offset = i_proc * (vol_tallies.size() + 2) + 2;
603

604
      // Pass root data of struct
605
      MPI_Recv(&res_buff, 1, mpi_vol_results, i_proc, mpi_offset - 2,
606
        mpi::intracomm, MPI_STATUS_IGNORE);
607

608
      // Pass sizes of domain-wise data
609
      MPI_Recv(domain_sizes.data(), domain_sizes.size(), MPI_INT, i_proc,
610
        mpi_offset - 1, mpi::intracomm, MPI_STATUS_IGNORE);
611

612
      // Pass domain-wise data of struct
613
      for (int i_domain = 0; i_domain < vol_tallies.size(); i_domain++) {
614
        res_buff.vol_tallies[i_domain].resize(domain_sizes[i_domain]);
615
        MPI_Recv(res_buff.vol_tallies[i_domain].data(), domain_sizes[i_domain],
616
          mpi_volume_tally, i_proc, mpi_offset + i_domain, mpi::intracomm,
617
          MPI_STATUS_IGNORE);
618
      }
619

620
      this->append(res_buff);
621
    }
622
  }
623
}
624
#endif
625

626
// TO REVIEWER: STATISTICS AND TALLY MANIPULATION BLOCK BEGIN
627
VolumeCalculation::CalcResults::CalcResults(const VolumeCalculation& vol_calc)
2,662✔
628
{
629
  n_samples = 0;
2,662✔
630
  iterations = 0;
2,662✔
631
  cost = 0.;
2,662✔
632
  for (int i = 0; i < vol_calc.domain_ids_.size(); i++) {
10,476✔
633
    vector<VolTally> vt_vect; // Tally group for a domain
7,814✔
634
    vt_vect.push_back(
7,814✔
635
      VolTally(_INDEX_TOTAL)); // Zero-element tally for entire domain totals
7,814✔
636
    vol_tallies.push_back(vt_vect);
7,814✔
637
    nuc_results.push_back(NuclResult());
7,814✔
638
  }
7,814✔
639
}
2,662✔
640

NEW
641
void VolumeCalculation::CalcResults::reset()
×
642
{
NEW
643
  n_samples = 0;
×
NEW
644
  cost = 0.;
×
NEW
645
  for (auto& vt : vol_tallies) {
×
NEW
646
    std::fill(vt.begin(), vt.end(), VolTally());
×
647
  }
NEW
648
}
×
649

650
// TO REVIEWER: THIS IS A PART OF THE RELATED TO MPI-INTERCHANGE
651
// BLOCK OF execute()
NEW
652
void VolumeCalculation::CalcResults::append(const CalcResults& other)
×
653
{
NEW
654
  n_samples += other.n_samples;
×
NEW
655
  cost += other.cost;
×
656

657
  // The domain-wise vectors this.vol_tallies and other.vol_tallies are
658
  // conformed each to other by definition
NEW
659
  for (auto id = 0; id < vol_tallies.size(); id++) {
×
660
    // Merging current domain vector from other.vol_tallies into this via
661
    // pair-wise comparisons
NEW
662
    for (const auto& vt_other : other.vol_tallies[id]) {
×
NEW
663
      bool already_appended = false;
×
NEW
664
      for (auto& vt : vol_tallies[id]) {
×
NEW
665
        if (vt.index == vt_other.index) {
×
NEW
666
          vt.append_tally(vt_other);
×
NEW
667
          already_appended = true;
×
NEW
668
          break;
×
669
        }
670
      }
NEW
671
      if (!already_appended)
×
NEW
672
        vol_tallies[id].push_back(vt_other);
×
673
    }
674
  }
NEW
675
}
×
676

677
inline VolumeCalculation::VolTally::VolTally(const int i_material,
15,518✔
678
  const double contrib, const double score_acc_, const double score2_acc_)
15,518✔
679
{
680
  score = contrib;
15,518✔
681
  score_acc[0] = score_acc_;
15,518✔
682
  score_acc[1] = score2_acc_;
15,518✔
683
  index = i_material;
15,518✔
684
}
15,518✔
685

686
inline void VolumeCalculation::VolTally::finalize_batch(
6,660,506✔
687
  const double batch_size_1)
688
{
689
  if (score != 0.) {
6,660,506✔
690
    score *= batch_size_1;
2,172,564✔
691
    score_acc[0] += score;
2,172,564✔
692
    score_acc[1] += score * score;
2,172,564✔
693
    score = 0.;
2,172,564✔
694
  }
695
}
6,660,506✔
696

697
inline void VolumeCalculation::VolTally::assign_tally(const VolTally& vol_tally)
698
{
699
  score = vol_tally.score;
700
  score_acc = vol_tally.score_acc;
701
  index = vol_tally.index;
702
}
703

704
inline void VolumeCalculation::VolTally::append_tally(const VolTally& vol_tally)
15,340✔
705
{
706
  score += vol_tally.score;
15,340✔
707
  score_acc[0] += vol_tally.score_acc[0];
15,340✔
708
  score_acc[1] += vol_tally.score_acc[1];
15,340✔
709
}
15,340✔
710

711
// TO REVIEWER: IF THIS APPROACH WILL BE FOUND OBFUSCATED, IT CAN BE ADJUSTED TO
712
// LITERAL FORMULAE TRANSLATION WITH REPEATING MULTIPLY CALCULATIONS OF
713
// THRESHOLD SQUARE ROOTS AND DIVISIONS FOR EACH VOLUME DOMAIN IN THE END OF
714
// EACH BATCH
715
inline bool VolumeCalculation::VolTally::trigger_state(
2,600✔
716
  const TriggerMetric trigger_type, const double threshold,
717
  const size_t& n_samples) const
718
{
719
  // For sample contribution to volume fraction limited by 1, the maximal
720
  // allowed n_samples value is around 1.e102, but this is still much larger
721
  // than the size_t limit equal to ~1.8e19
722
  const double ns1 = static_cast<double>(n_samples - 1);
2,600✔
723
  const double ns = static_cast<double>(n_samples);
2,600✔
724
  const double mean_xi_sq = score_acc[0] * score_acc[0];
2,600✔
725

726
  // Adjusted sample variance: s^2 = (\bar{x^2} - \bar{x}^2) / (N-1)
727
  // \bar{x}^2 = mean_xi_sq / N^2, \bar{\x^2} = score_acc[1] / N, N = ns
728
  switch (trigger_type) {
2,600!
729
  case TriggerMetric::variance:
2,528✔
730
  case TriggerMetric::standard_deviation:
731
    // Condition: s^2 / N < t'
732
    // Equivalent implementation:
733
    // N^2 * (\bar{x^2} - \bar{x}^2) < t' * (N-1) * N^2
734
    return score_acc[1] * ns - mean_xi_sq < threshold * ns1 * ns * ns;
2,528✔
735
  case TriggerMetric::relative_error:
72✔
736
    // Condition: (s^2 / \mu^2) / N < t'
737
    // Equivalent implementation:
738
    // N^2 * (\bar{x^2} - \bar{x}^2) < t' * (N-1) * (N * \bar{x})^2
739
    return score_acc[1] * ns - mean_xi_sq < threshold * ns1 * mean_xi_sq;
72✔
NEW
740
  default:
×
NEW
741
    return true;
×
742
  }
743
}
744

745
array<double, 2> VolumeCalculation::get_tally_results(const size_t& n_samples,
172✔
746
  const double coeff_norm, const VolTally& vol_tally) const
747
{
748
  array<double, 2> volume;
749
  const double ns_1 = 1. / static_cast<double>(n_samples);
172✔
750
  volume[0] = vol_tally.score_acc[0] * ns_1;
172✔
751
  volume[1] = vol_tally.score_acc[1] * ns_1;
172✔
752
  volume[1] = std::sqrt(
344✔
753
    (volume[1] - volume[0] * volume[0]) / static_cast<double>(n_samples - 1));
172✔
754
  volume[0] *= coeff_norm;
172✔
755
  volume[1] *= coeff_norm;
172✔
756
  return volume;
172✔
757
}
758

759
// TO REVIEWER: THIS IS A FULL EQUIVALENT OF THE SWITCH FROM THE HISTORY LOOP
760
void VolumeCalculation::score_hit(const Particle& p, CalcResults& results) const
1,093,294✔
761
{
762
  const auto n_domains = domain_ids_.size();
1,093,294✔
763
  const auto id_mat = p.material();
1,093,294✔
764
  const auto score = 1.; // Floating-point score value
1,093,294✔
765

766
  switch (domain_type_) {
1,093,294!
767
  case VolumeCalculation::TallyDomain::MATERIAL:
307,246✔
768
    if (id_mat != MATERIAL_VOID) {
307,246✔
769
      for (auto i_domain = 0; i_domain < n_domains; i_domain++) {
573,836✔
770
        if (model::materials[id_mat]->id_ == domain_ids_[i_domain]) {
572,172✔
771
          this->check_hit(id_mat, score, results.vol_tallies[i_domain]);
304,676✔
772
          break;
304,676✔
773
        }
774
      }
775
    }
776
    break;
307,246✔
777
  case VolumeCalculation::TallyDomain::CELL:
488,078✔
778
    for (auto level = 0; level < p.n_coord_last(); ++level) {
976,156✔
779
      for (auto i_domain = 0; i_domain < n_domains; i_domain++) {
580,106✔
780
        if (model::cells[p.coord(level).cell()]->id_ == domain_ids_[i_domain]) {
575,664✔
781
          this->check_hit(id_mat, score, results.vol_tallies[i_domain]);
483,636✔
782
          break;
483,636✔
783
        }
784
      }
785
    }
786
    break;
488,078✔
787
  case VolumeCalculation::TallyDomain::UNIVERSE:
297,970✔
788
    for (auto level = 0; level < p.n_coord_last(); ++level) {
595,940✔
789
      for (auto i_domain = 0; i_domain < n_domains; ++i_domain) {
297,970!
790
        if (model::universes[model::cells[p.coord(level).cell()]->universe_]
297,970✔
791
              ->id_ == domain_ids_[i_domain]) {
297,970!
792
          this->check_hit(id_mat, score, results.vol_tallies[i_domain]);
297,970✔
793
          break;
297,970✔
794
        }
795
      }
796
    }
797
  }
798
}
1,093,294✔
799

800
void free_memory_volume()
1,252✔
801
{
802
  openmc::model::volume_calcs.clear();
1,252✔
803
}
1,252✔
804

805
} // namespace openmc
806

807
//==============================================================================
808
// OPENMC_CALCULATE_VOLUMES runs each of the stochastic volume calculations
809
// that the user has specified and writes results to HDF5 files
810
//==============================================================================
811

812
int openmc_calculate_volumes()
22✔
813
{
814
  using namespace openmc;
815

816
  if (mpi::master) {
22!
817
    header("STOCHASTIC VOLUME CALCULATION", 3);
22✔
818
  }
819
  Timer time_volume;
22✔
820
  time_volume.start();
22✔
821

822
  for (int i = 0; i < model::volume_calcs.size(); ++i) {
66✔
823
    write_message(4, "Running volume calculation {}", i + 1);
50✔
824

825
    // Run volume calculation
826
    const auto& vol_calc {model::volume_calcs[i]};
50✔
827
    VolumeCalculation::CalcResults results(vol_calc);
50✔
828

829
#ifdef OPENMC_MPI
830
    vol_calc.initialize_MPI_struct();
831
#endif
832

833
    try {
834
      vol_calc.execute(results);
50✔
835
    } catch (const std::exception& e) {
6!
836
      set_errmsg(e.what());
6✔
837
      return OPENMC_E_UNASSIGNED;
6✔
838
    }
6✔
839

840
#ifdef OPENMC_MPI
841
    vol_calc.delete_MPI_struct();
842
#endif
843

844
    if (mpi::master) {
44!
845

846
      // Output volume calculation results and statistics
847
      vol_calc.show_results(results);
44✔
848

849
      // Write volumes to HDF5 file
850
      std::string filename =
851
        fmt::format("{}volume_{}.h5", settings::path_output, i + 1);
88✔
852
      vol_calc.to_hdf5(filename, results);
44✔
853
    }
44✔
854
  }
50✔
855

856
  // Show elapsed time
857
  time_volume.stop();
16✔
858
  write_message(6, "Elapsed time: {} sec", time_volume.elapsed());
16✔
859

860
  return 0;
16✔
861
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc