• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 22210404096

20 Feb 2026 03:44AM UTC coverage: 81.804% (+0.08%) from 81.721%
22210404096

Pull #3809

github

web-flow
Merge d39f3220e into 53ce1910f
Pull Request #3809: Implement tally filter for filtering by reaction

17328 of 24423 branches covered (70.95%)

Branch coverage included in aggregate %.

125 of 149 new or added lines in 11 files covered. (83.89%)

1322 existing lines in 33 files now uncovered.

57670 of 67257 relevant lines covered (85.75%)

45506622.43 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.37
/src/volume_calc.cpp
1
#include "openmc/volume_calc.h"
2

3
#include "openmc/capi.h"
4
#include "openmc/cell.h"
5
#include "openmc/constants.h"
6
#include "openmc/error.h"
7
#include "openmc/geometry.h"
8
#include "openmc/hdf5_interface.h"
9
#include "openmc/material.h"
10
#include "openmc/message_passing.h"
11
#include "openmc/mgxs_interface.h"
12
#include "openmc/nuclide.h"
13
#include "openmc/openmp_interface.h"
14
#include "openmc/output.h"
15
#include "openmc/random_lcg.h"
16
#include "openmc/settings.h"
17
#include "openmc/timer.h"
18
#include "openmc/xml_interface.h"
19

20
#include "openmc/tensor.h"
21
#include <fmt/core.h>
22

23
#include <algorithm> // for copy
24
#include <cmath>     // for pow, sqrt
25
#include <unordered_set>
26

27
namespace openmc {
28

29
//==============================================================================
30
// Global variables
31
//==============================================================================
32

33
namespace model {
34
vector<VolumeCalculation> volume_calcs;
35
}
36

37
//==============================================================================
38
// VolumeCalculation implementation
39
//==============================================================================
40

41
VolumeCalculation::VolumeCalculation(pugi::xml_node node)
291✔
42
{
43
  // Read domain type (cell, material or universe)
44
  std::string domain_type = get_node_value(node, "domain_type");
291✔
45
  if (domain_type == "cell") {
291✔
46
    domain_type_ = TallyDomain::CELL;
141✔
47
  } else if (domain_type == "material") {
150✔
48
    domain_type_ = TallyDomain::MATERIAL;
104✔
49
  } else if (domain_type == "universe") {
46!
50
    domain_type_ = TallyDomain::UNIVERSE;
46✔
51
  } else {
UNCOV
52
    fatal_error(std::string("Unrecognized domain type for stochastic "
×
53
                            "volume calculation: " +
54
                            domain_type));
55
  }
56

57
  // Read domain IDs, bounding corodinates and number of samples
58
  domain_ids_ = get_node_array<int>(node, "domain_ids");
291✔
59
  lower_left_ = get_node_array<double>(node, "lower_left");
291✔
60
  upper_right_ = get_node_array<double>(node, "upper_right");
291✔
61
  n_samples_ = std::stoull(get_node_value(node, "samples"));
291✔
62

63
  if (check_for_node(node, "threshold")) {
291✔
64
    pugi::xml_node threshold_node = node.child("threshold");
84✔
65

66
    threshold_ = std::stod(get_node_value(threshold_node, "threshold"));
84✔
67
    if (threshold_ <= 0.0) {
84!
UNCOV
68
      fatal_error(fmt::format("Invalid error threshold {} provided for a "
×
69
                              "volume calculation.",
UNCOV
70
        threshold_));
×
71
    }
72

73
    std::string tmp = get_node_value(threshold_node, "type");
84✔
74
    if (tmp == "variance") {
84✔
75
      trigger_type_ = TriggerMetric::variance;
28✔
76
    } else if (tmp == "std_dev") {
56✔
77
      trigger_type_ = TriggerMetric::standard_deviation;
28✔
78
    } else if (tmp == "rel_err") {
28!
79
      trigger_type_ = TriggerMetric::relative_error;
28✔
80
    } else {
UNCOV
81
      fatal_error(fmt::format(
×
82
        "Invalid volume calculation trigger type '{}' provided.", tmp));
83
    }
84
  }
84✔
85

86
  // Ensure there are no duplicates by copying elements to a set and then
87
  // comparing the length with the original vector
88
  std::unordered_set<int> unique_ids(domain_ids_.cbegin(), domain_ids_.cend());
291✔
89
  if (unique_ids.size() != domain_ids_.size()) {
291!
UNCOV
90
    throw std::runtime_error {"Domain IDs for a volume calculation "
×
91
                              "must be unique."};
×
92
  }
93
}
291✔
94

95
vector<VolumeCalculation::Result> VolumeCalculation::execute() const
277✔
96
{
97
  // Check to make sure domain IDs are valid
98
  for (auto uid : domain_ids_) {
785✔
99
    switch (domain_type_) {
532!
100
    case TallyDomain::CELL:
322✔
101
      if (model::cell_map.find(uid) == model::cell_map.end()) {
322✔
102
        throw std::runtime_error {fmt::format(
16!
103
          "Cell {} in volume calculation does not exist in geometry.", uid)};
16✔
104
      }
105
      break;
314✔
106
    case TallyDomain::MATERIAL:
164✔
107
      if (model::material_map.find(uid) == model::material_map.end()) {
164✔
108
        throw std::runtime_error {fmt::format(
16!
109
          "Material {} in volume calculation does not exist in geometry.",
110
          uid)};
16✔
111
      }
112
      break;
156✔
113
    case TallyDomain::UNIVERSE:
46✔
114
      if (model::universe_map.find(uid) == model::universe_map.end()) {
46✔
115
        throw std::runtime_error {fmt::format(
16!
116
          "Universe {} in volume calculation does not exist in geometry.",
117
          uid)};
16✔
118
      }
119
    }
120
  }
121

122
  // Shared data that is collected from all threads
123
  int n = domain_ids_.size();
253✔
124
  vector<vector<uint64_t>> master_indices(
125
    n); // List of material indices for each domain
506✔
126
  vector<vector<uint64_t>> master_hits(
127
    n); // Number of hits for each material in each domain
506✔
128
  int iterations = 0;
253✔
129

130
  // Divide work over MPI processes
131
  uint64_t min_samples = n_samples_ / mpi::n_procs;
253✔
132
  uint64_t remainder = n_samples_ % mpi::n_procs;
253✔
133
  uint64_t i_start, i_end;
134
  if (mpi::rank < remainder) {
253!
UNCOV
135
    i_start = (min_samples + 1) * mpi::rank;
×
136
    i_end = i_start + min_samples + 1;
×
137
  } else {
138
    i_start =
253✔
139
      (min_samples + 1) * remainder + (mpi::rank - remainder) * min_samples;
253✔
140
    i_end = i_start + min_samples;
253✔
141
  }
142

143
  while (true) {
144

145
#pragma omp parallel
11,573✔
146
    {
147
      // Variables that are private to each thread
148
      vector<vector<uint64_t>> indices(n);
6,432✔
149
      vector<vector<uint64_t>> hits(n);
6,432✔
150
      Particle p;
6,432✔
151

152
// Sample locations and count hits
153
#pragma omp for
154
      for (size_t i = i_start; i < i_end; i++) {
2,944,072✔
155
        uint64_t id = iterations * n_samples_ + i;
2,937,640✔
156
        uint64_t seed = init_seed(id, STREAM_VOLUME);
2,937,640✔
157

158
        p.n_coord() = 1;
2,937,640✔
159
        Position xi {prn(&seed), prn(&seed), prn(&seed)};
2,937,640✔
160
        p.r() = lower_left_ + xi * (upper_right_ - lower_left_);
2,937,640✔
161
        p.u() = {1. / std::sqrt(3.), 1. / std::sqrt(3.), 1. / std::sqrt(3.)};
2,937,640✔
162

163
        // If this location is not in the geometry at all, move on to next block
164
        if (!exhaustive_find_cell(p))
2,937,640✔
165
          continue;
756,332✔
166

167
        if (domain_type_ == TallyDomain::MATERIAL) {
2,181,308✔
168
          if (p.material() != MATERIAL_VOID) {
609,212✔
169
            for (int i_domain = 0; i_domain < n; i_domain++) {
1,137,904✔
170
              if (model::materials[p.material()]->id_ ==
2,269,152✔
171
                  domain_ids_[i_domain]) {
1,134,576✔
172
                this->check_hit(
604,072✔
173
                  p.material(), indices[i_domain], hits[i_domain]);
604,072✔
174
                break;
604,072✔
175
              }
176
            }
177
          }
178
        } else if (domain_type_ == TallyDomain::CELL) {
1,572,096✔
179
          for (int level = 0; level < p.n_coord(); ++level) {
1,954,720✔
180
            for (int i_domain = 0; i_domain < n; i_domain++) {
1,165,028✔
181
              if (model::cells[p.coord(level).cell()]->id_ ==
2,307,472✔
182
                  domain_ids_[i_domain]) {
1,153,736✔
183
                this->check_hit(
967,272✔
184
                  p.material(), indices[i_domain], hits[i_domain]);
967,272✔
185
                break;
967,272✔
186
              }
187
            }
188
          }
189
        } else if (domain_type_ == TallyDomain::UNIVERSE) {
595,940!
190
          for (int level = 0; level < p.n_coord(); ++level) {
1,191,880✔
191
            for (int i_domain = 0; i_domain < n; ++i_domain) {
595,940!
192
              if (model::universes[p.coord(level).universe()]->id_ ==
1,191,880!
193
                  domain_ids_[i_domain]) {
595,940✔
194
                check_hit(p.material(), indices[i_domain], hits[i_domain]);
595,940✔
195
                break;
595,940✔
196
              }
197
            }
198
          }
199
        }
200
      }
201

202
      // At this point, each thread has its own pair of index/hits lists and we
203
      // now need to reduce them. OpenMP is not nearly smart enough to do this
204
      // on its own, so we have to manually reduce them
205
      for (int i_domain = 0; i_domain < n; ++i_domain) {
25,552✔
206
        reduce_indices_hits(indices[i_domain], hits[i_domain],
19,120✔
207
          master_indices[i_domain], master_hits[i_domain]);
19,120✔
208
      }
209
    } // omp parallel
6,432✔
210

211
    // Reduce hits onto master process
212

213
    // Determine volume of bounding box
214
    Position d {upper_right_ - lower_left_};
18,005✔
215
    double volume_sample = d.x * d.y * d.z;
18,005✔
216

217
    // bump iteration counter and get total number
218
    // of samples at this point
219
    iterations++;
18,005✔
220
    uint64_t total_samples = iterations * n_samples_;
18,005✔
221

222
    // warn user if total sample size is greater than what the uin64_t type can
223
    // represent
224
    if (total_samples == std::numeric_limits<uint64_t>::max()) {
18,005!
UNCOV
225
      warning("The number of samples has exceeded the type used to track hits. "
×
226
              "Volume results may be inaccurate.");
227
    }
228

229
    // reset
230
    double trigger_val = -INFTY;
18,005✔
231

232
    // Set size for members of the Result struct
233
    vector<Result> results(n);
18,005✔
234

235
    for (int i_domain = 0; i_domain < n; ++i_domain) {
71,545✔
236
      // Get reference to result for this domain
237
      auto& result {results[i_domain]};
53,540✔
238

239
      // Create 2D array to store atoms/uncertainty for each nuclide. Later this
240
      // is compressed into vectors storing only those nuclides that are
241
      // non-zero
242
      auto n_nuc =
243
        settings::run_CE ? data::nuclides.size() : data::mg.nuclides_.size();
53,540✔
244
      auto atoms =
245
        tensor::zeros<double>({static_cast<size_t>(n_nuc), size_t {2}});
53,540✔
246

247
#ifdef OPENMC_MPI
248
      if (mpi::master) {
30,584✔
249
        for (int j = 1; j < mpi::n_procs; j++) {
30,584✔
250
          int q;
251
          // retrieve results
252
          MPI_Recv(
15,264✔
253
            &q, 1, MPI_UINT64_T, j, 2 * j, mpi::intracomm, MPI_STATUS_IGNORE);
254
          vector<uint64_t> buffer(2 * q);
15,264✔
255
          MPI_Recv(buffer.data(), 2 * q, MPI_UINT64_T, j, 2 * j + 1,
15,264✔
256
            mpi::intracomm, MPI_STATUS_IGNORE);
257
          for (int k = 0; k < q; ++k) {
1,166,528✔
258
            bool already_added = false;
1,151,264✔
259
            for (int m = 0; m < master_indices[i_domain].size(); ++m) {
2,287,264✔
260
              if (buffer[2 * k] == master_indices[i_domain][m]) {
2,287,224✔
261
                master_hits[i_domain][m] += buffer[2 * k + 1];
1,151,224✔
262
                already_added = true;
1,151,224✔
263
                break;
1,151,224✔
264
              }
265
            }
266
            if (!already_added) {
1,151,264✔
267
              master_indices[i_domain].push_back(buffer[2 * k]);
40✔
268
              master_hits[i_domain].push_back(buffer[2 * k + 1]);
40✔
269
            }
270
          }
271
        }
15,264✔
272
      } else {
273
        int q = master_indices[i_domain].size();
15,264✔
274
        vector<uint64_t> buffer(2 * q);
15,264✔
275
        for (int k = 0; k < q; ++k) {
1,166,528✔
276
          buffer[2 * k] = master_indices[i_domain][k];
1,151,264✔
277
          buffer[2 * k + 1] = master_hits[i_domain][k];
1,151,264✔
278
        }
279

280
        MPI_Send(&q, 1, MPI_UINT64_T, 0, 2 * mpi::rank, mpi::intracomm);
15,264✔
281
        MPI_Send(buffer.data(), 2 * q, MPI_UINT64_T, 0, 2 * mpi::rank + 1,
15,264✔
282
          mpi::intracomm);
283
      }
15,264✔
284
#endif
285

286
      if (mpi::master) {
53,540✔
287
        size_t total_hits = 0;
38,276✔
288
        for (int j = 0; j < master_indices[i_domain].size(); ++j) {
81,636✔
289
          total_hits += master_hits[i_domain][j];
43,360✔
290
          double f =
291
            static_cast<double>(master_hits[i_domain][j]) / total_samples;
43,360✔
292
          double var_f = f * (1.0 - f) / total_samples;
43,360✔
293

294
          int i_material = master_indices[i_domain][j];
43,360✔
295
          if (i_material == MATERIAL_VOID)
43,360✔
296
            continue;
10✔
297

298
          const auto& mat = model::materials[i_material];
43,350✔
299
          for (int k = 0; k < mat->nuclide_.size(); ++k) {
160,700✔
300
            // Accumulate nuclide density
301
            int i_nuclide = mat->nuclide_[k];
117,350✔
302
            atoms(i_nuclide, 0) += mat->atom_density_[k] * f;
117,350✔
303
            atoms(i_nuclide, 1) += std::pow(mat->atom_density_[k], 2) * var_f;
117,350✔
304
          }
305
        }
306

307
        // Determine volume
308
        result.volume[0] =
76,552✔
309
          static_cast<double>(total_hits) / total_samples * volume_sample;
38,276✔
310
        result.volume[1] =
76,552✔
311
          std::sqrt(result.volume[0] * (volume_sample - result.volume[0]) /
38,276✔
312
                    total_samples);
313
        result.iterations = iterations;
38,276✔
314

315
        // update threshold value if needed
316
        if (trigger_type_ != TriggerMetric::not_active) {
38,276✔
317
          double val = 0.0;
38,040✔
318
          switch (trigger_type_) {
38,040!
319
          case TriggerMetric::standard_deviation:
31,380✔
320
            val = result.volume[1];
31,380✔
321
            break;
31,380✔
322
          case TriggerMetric::relative_error:
360✔
323
            val = result.volume[0] == 0.0 ? INFTY
360!
324
                                          : result.volume[1] / result.volume[0];
360✔
325
            break;
360✔
326
          case TriggerMetric::variance:
6,300✔
327
            val = result.volume[1] * result.volume[1];
6,300✔
328
            break;
6,300✔
329
          default:
×
330
            break;
×
331
          }
332
          // update max if entry is valid
333
          if (val > 0.0) {
38,040!
334
            trigger_val = std::max(trigger_val, val);
38,040✔
335
          }
336
        }
337

338
        for (int j = 0; j < n_nuc; ++j) {
230,176✔
339
          // Determine total number of atoms. At this point, we have values in
340
          // atoms/b-cm. To get to atoms we multiply by 10^24 V.
341
          double mean = 1.0e24 * volume_sample * atoms(j, 0);
191,900✔
342
          double stdev = 1.0e24 * volume_sample * std::sqrt(atoms(j, 1));
191,900✔
343

344
          // Convert full arrays to vectors
345
          if (mean > 0.0) {
191,900✔
346
            result.nuclides.push_back(j);
102,134✔
347
            result.atoms.push_back(mean);
102,134✔
348
            result.uncertainty.push_back(stdev);
102,134✔
349
          }
350
        }
351
      }
352
    } // end domain loop
53,540✔
353

354
    // if no trigger is applied, we're done
355
    if (trigger_type_ == TriggerMetric::not_active) {
18,005✔
356
      return results;
169✔
357
    }
358

359
#ifdef OPENMC_MPI
360
    // update maximum error value on all processes
361
    MPI_Bcast(&trigger_val, 1, MPI_DOUBLE, 0, mpi::intracomm);
10,192✔
362
#endif
363

364
    // return results of the calculation
365
    if (trigger_val < threshold_) {
17,836✔
366
      return results;
84✔
367
    }
368

369
#ifdef OPENMC_MPI
370
    // if iterating in an MPI run, need to zero indices and hits so they aren't
371
    // counted twice
372
    if (!mpi::master) {
10,144✔
373
      for (auto& v : master_indices) {
20,224✔
374
        std::fill(v.begin(), v.end(), 0);
15,152✔
375
      }
376
      for (auto& v : master_hits) {
20,224✔
377
        std::fill(v.begin(), v.end(), 0);
15,152✔
378
      }
379
    }
380
#endif
381

382
  } // end while
35,757✔
383
}
384

385
void VolumeCalculation::to_hdf5(
205✔
386
  const std::string& filename, const vector<Result>& results) const
387
{
388
  // Create HDF5 file
389
  hid_t file_id = file_open(filename, 'w');
205✔
390

391
  // Write header info
392
  write_attribute(file_id, "filetype", "volume");
205✔
393
  write_attribute(file_id, "version", VERSION_VOLUME);
205✔
394
  write_attribute(file_id, "openmc_version", VERSION);
205✔
395
#ifdef GIT_SHA1
396
  write_attribute(file_id, "git_sha1", GIT_SHA1);
397
#endif
398

399
  // Write current date and time
400
  write_attribute(file_id, "date_and_time", time_stamp());
205✔
401

402
  // Write basic metadata
403
  write_attribute(file_id, "samples", n_samples_);
205✔
404
  write_attribute(file_id, "lower_left", lower_left_);
205✔
405
  write_attribute(file_id, "upper_right", upper_right_);
205✔
406
  // Write trigger info
407
  if (trigger_type_ != TriggerMetric::not_active) {
205✔
408
    write_attribute(file_id, "iterations", results[0].iterations);
60✔
409
    write_attribute(file_id, "threshold", threshold_);
60✔
410
    std::string trigger_str;
60✔
411
    switch (trigger_type_) {
60!
412
    case TriggerMetric::variance:
20✔
413
      trigger_str = "variance";
20✔
414
      break;
20✔
415
    case TriggerMetric::standard_deviation:
20✔
416
      trigger_str = "std_dev";
20✔
417
      break;
20✔
418
    case TriggerMetric::relative_error:
20✔
419
      trigger_str = "rel_err";
20✔
420
      break;
20✔
421
    default:
×
422
      break;
×
423
    }
424
    write_attribute(file_id, "trigger_type", trigger_str);
60✔
425
  } else {
60✔
426
    write_attribute(file_id, "iterations", 1);
145✔
427
  }
428

429
  if (domain_type_ == TallyDomain::CELL) {
205✔
430
    write_attribute(file_id, "domain_type", "cell");
102✔
431
  } else if (domain_type_ == TallyDomain::MATERIAL) {
103✔
432
    write_attribute(file_id, "domain_type", "material");
73✔
433
  } else if (domain_type_ == TallyDomain::UNIVERSE) {
30!
434
    write_attribute(file_id, "domain_type", "universe");
30✔
435
  }
436

437
  for (int i = 0; i < domain_ids_.size(); ++i) {
601✔
438
    hid_t group_id =
439
      create_group(file_id, fmt::format("domain_{}", domain_ids_[i]));
792✔
440

441
    // Write volume for domain
442
    const auto& result {results[i]};
396✔
443
    write_dataset(group_id, "volume", result.volume);
396✔
444

445
    // Create array of nuclide names from the vector
446
    auto n_nuc = result.nuclides.size();
396✔
447

448
    vector<std::string> nucnames;
396✔
449
    for (int i_nuc : result.nuclides) {
1,570✔
450
      nucnames.push_back(settings::run_CE ? data::nuclides[i_nuc]->name_
1,564✔
451
                                          : data::mg.nuclides_[i_nuc].name);
390✔
452
    }
453

454
    // Create array of total # of atoms with uncertainty for each nuclide
455
    tensor::Tensor<double> atom_data({static_cast<size_t>(n_nuc), size_t {2}});
396✔
456
    for (size_t k = 0; k < static_cast<size_t>(n_nuc); ++k) {
1,570✔
457
      atom_data(k, 0) = result.atoms[k];
1,174✔
458
      atom_data(k, 1) = result.uncertainty[k];
1,174✔
459
    }
460

461
    // Write results
462
    write_dataset(group_id, "nuclides", nucnames);
396✔
463
    write_dataset(group_id, "atoms", atom_data);
396✔
464

465
    close_group(group_id);
396✔
466
  }
396✔
467

468
  file_close(file_id);
205✔
469
}
205✔
470

471
void VolumeCalculation::check_hit(
10,418,210✔
472
  int i_material, vector<uint64_t>& indices, vector<uint64_t>& hits) const
473
{
474

475
  // Check if this material was previously hit and if so, increment count
476
  bool already_hit = false;
10,418,210✔
477
  for (int j = 0; j < indices.size(); j++) {
22,249,438✔
478
    if (indices[j] == i_material) {
11,831,228✔
479
      hits[j]++;
10,341,622✔
480
      already_hit = true;
10,341,622✔
481
    }
482
  }
483

484
  // If the material was not previously hit, append an entry to the material
485
  // indices and hits lists
486
  if (!already_hit) {
10,418,210✔
487
    indices.push_back(i_material);
76,588✔
488
    hits.push_back(1);
76,588✔
489
  }
490
}
10,418,210✔
491

492
void free_memory_volume()
7,536✔
493
{
494
  openmc::model::volume_calcs.clear();
7,536✔
495
}
7,536✔
496

497
} // namespace openmc
498

499
//==============================================================================
500
// OPENMC_CALCULATE_VOLUMES runs each of the stochastic volume calculations
501
// that the user has specified and writes results to HDF5 files
502
//==============================================================================
503

504
int openmc_calculate_volumes()
115✔
505
{
506
  using namespace openmc;
507

508
  if (mpi::master) {
115✔
509
    header("STOCHASTIC VOLUME CALCULATION", 3);
107✔
510
  }
511
  Timer time_volume;
115✔
512
  time_volume.start();
115✔
513

514
  for (int i = 0; i < model::volume_calcs.size(); ++i) {
368✔
515
    write_message(4, "Running volume calculation {}", i + 1);
277✔
516

517
    // Run volume calculation
518
    const auto& vol_calc {model::volume_calcs[i]};
277✔
519
    std::vector<VolumeCalculation::Result> results;
277✔
520
    try {
521
      results = vol_calc.execute();
277✔
522
    } catch (const std::exception& e) {
24!
523
      set_errmsg(e.what());
24✔
524
      return OPENMC_E_UNASSIGNED;
24✔
525
    }
24✔
526

527
    if (mpi::master) {
253✔
528
      std::string domain_type;
205✔
529
      if (vol_calc.domain_type_ == VolumeCalculation::TallyDomain::CELL) {
205✔
530
        domain_type = "  Cell ";
102✔
531
      } else if (vol_calc.domain_type_ ==
103✔
532
                 VolumeCalculation::TallyDomain::MATERIAL) {
533
        domain_type = "  Material ";
73✔
534
      } else {
535
        domain_type = "  Universe ";
30✔
536
      }
537

538
      // Display domain volumes
539
      for (int j = 0; j < vol_calc.domain_ids_.size(); j++) {
601✔
540
        std::string region_name {""};
396✔
541
        if (vol_calc.domain_type_ == VolumeCalculation::TallyDomain::CELL) {
396✔
542
          int cell_idx = model::cell_map[vol_calc.domain_ids_[j]];
242✔
543
          region_name = model::cells[cell_idx]->name();
242✔
544
        } else if (vol_calc.domain_type_ ==
154✔
545
                   VolumeCalculation::TallyDomain::MATERIAL) {
546
          int mat_idx = model::material_map[vol_calc.domain_ids_[j]];
124✔
547
          region_name = model::materials[mat_idx]->name();
124✔
548
        }
549
        if (region_name.size())
396✔
550
          region_name.insert(0, " "); // prepend space for formatting
74✔
551

552
        write_message(4, "{}{}{}: {} +/- {} cm^3", domain_type,
396✔
553
          vol_calc.domain_ids_[j], region_name, results[j].volume[0],
396✔
554
          results[j].volume[1]);
396✔
555
      }
396✔
556

557
      // Write volumes to HDF5 file
558
      std::string filename =
559
        fmt::format("{}volume_{}.h5", settings::path_output, i + 1);
370✔
560
      vol_calc.to_hdf5(filename, results);
205✔
561
    }
205✔
562
  }
277✔
563

564
  // Show elapsed time
565
  time_volume.stop();
91✔
566
  write_message(6, "Elapsed time: {} s", time_volume.elapsed());
91✔
567

568
  return 0;
91✔
569
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc