• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 23910205296

02 Apr 2026 04:14PM UTC coverage: 81.224% (-0.3%) from 81.567%
23910205296

Pull #3766

github

web-flow
Merge 264dcb1ef into 8223099ed
Pull Request #3766: Approximate multigroup velocity

17579 of 25426 branches covered (69.14%)

Branch coverage included in aggregate %.

24 of 25 new or added lines in 4 files covered. (96.0%)

710 existing lines in 29 files now uncovered.

58015 of 67642 relevant lines covered (85.77%)

31291841.44 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.21
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "openmc/tensor.h"
8
#include <fmt/core.h>
9

10
#include "openmc/bank.h"
11
#include "openmc/bank_io.h"
12
#include "openmc/capi.h"
13
#include "openmc/constants.h"
14
#include "openmc/eigenvalue.h"
15
#include "openmc/error.h"
16
#include "openmc/file_utils.h"
17
#include "openmc/hdf5_interface.h"
18
#include "openmc/mcpl_interface.h"
19
#include "openmc/mesh.h"
20
#include "openmc/message_passing.h"
21
#include "openmc/mgxs_interface.h"
22
#include "openmc/nuclide.h"
23
#include "openmc/output.h"
24
#include "openmc/particle_type.h"
25
#include "openmc/settings.h"
26
#include "openmc/simulation.h"
27
#include "openmc/tallies/derivative.h"
28
#include "openmc/tallies/filter.h"
29
#include "openmc/tallies/filter_mesh.h"
30
#include "openmc/tallies/tally.h"
31
#include "openmc/timer.h"
32
#include "openmc/vector.h"
33

34
namespace openmc {
35

36
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
4,384✔
37
{
38
  simulation::time_statepoint.start();
4,384✔
39

40
  // If a nullptr is passed in, we assume that the user
41
  // wants a default name for this, of the form like output/statepoint.20.h5
42
  std::string filename_;
4,384✔
43
  if (filename) {
4,384✔
44
    filename_ = filename;
439✔
45
  } else {
46
    // Determine width for zero padding
47
    int w = std::to_string(settings::n_max_batches).size();
3,945✔
48

49
    // Set filename for state point
50
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
7,890✔
51
      simulation::current_batch, w);
3,945✔
52
  }
53

54
  // If a file name was specified, ensure it has .h5 file extension
55
  const auto extension = get_file_extension(filename_);
4,384✔
56
  if (extension != "h5") {
4,384!
57
    warning("openmc_statepoint_write was passed a file extension differing "
×
58
            "from .h5, but an hdf5 file will be written.");
59
  }
60

61
  // Determine whether or not to write the source bank
62
  bool write_source_ = write_source ? *write_source : true;
4,384!
63

64
  // Write message
65
  write_message("Creating state point " + filename_ + "...", 5);
8,768✔
66

67
  hid_t file_id;
4,384✔
68
  if (mpi::master) {
4,384✔
69
    // Create statepoint file
70
    file_id = file_open(filename_, 'w');
3,882✔
71

72
    // Write file type
73
    write_attribute(file_id, "filetype", "statepoint");
3,882✔
74

75
    // Write revision number for state point file
76
    write_attribute(file_id, "version", VERSION_STATEPOINT);
3,882✔
77

78
    // Write OpenMC version
79
    write_attribute(file_id, "openmc_version", VERSION);
3,882✔
80
#ifdef GIT_SHA1
81
    write_attribute(file_id, "git_sha1", GIT_SHA1);
82
#endif
83

84
    // Write current date and time
85
    write_attribute(file_id, "date_and_time", time_stamp());
7,764✔
86

87
    // Write path to input
88
    write_attribute(file_id, "path", settings::path_input);
3,882✔
89

90
    // Write out random number seed
91
    write_dataset(file_id, "seed", openmc_get_seed());
3,882✔
92

93
    // Write out random number stride
94
    write_dataset(file_id, "stride", openmc_get_stride());
3,882✔
95

96
    // Write run information
97
    write_dataset(file_id, "energy_mode",
4,424✔
98
      settings::run_CE ? "continuous-energy" : "multi-group");
99
    switch (settings::run_mode) {
3,882!
100
    case RunMode::FIXED_SOURCE:
1,524✔
101
      write_dataset(file_id, "run_mode", "fixed source");
1,524✔
102
      break;
103
    case RunMode::EIGENVALUE:
2,358✔
104
      write_dataset(file_id, "run_mode", "eigenvalue");
2,358✔
105
      break;
106
    default:
107
      break;
108
    }
109
    write_attribute(file_id, "photon_transport", settings::photon_transport);
3,882✔
110
    write_dataset(file_id, "n_particles", settings::n_particles);
3,882✔
111
    write_dataset(file_id, "n_batches", settings::n_batches);
3,882✔
112

113
    // Write out current batch number
114
    write_dataset(file_id, "current_batch", simulation::current_batch);
3,882✔
115

116
    // Indicate whether source bank is stored in statepoint
117
    write_attribute(file_id, "source_present", write_source_);
3,882✔
118

119
    // Write out information for eigenvalue run
120
    if (settings::run_mode == RunMode::EIGENVALUE)
3,882✔
121
      write_eigenvalue_hdf5(file_id);
2,358✔
122

123
    hid_t tallies_group = create_group(file_id, "tallies");
3,882✔
124

125
    // Write meshes
126
    meshes_to_hdf5(tallies_group);
3,882✔
127

128
    // Write information for derivatives
129
    if (!model::tally_derivs.empty()) {
3,882✔
130
      hid_t derivs_group = create_group(tallies_group, "derivatives");
6✔
131
      for (const auto& deriv : model::tally_derivs) {
36✔
132
        hid_t deriv_group =
30✔
133
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
30✔
134
        write_dataset(deriv_group, "material", deriv.diff_material);
30✔
135
        if (deriv.variable == DerivativeVariable::DENSITY) {
30✔
136
          write_dataset(deriv_group, "independent variable", "density");
12✔
137
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
18✔
138
          write_dataset(deriv_group, "independent variable", "nuclide_density");
12✔
139
          write_dataset(
24✔
140
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
12✔
141
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
6!
142
          write_dataset(deriv_group, "independent variable", "temperature");
6✔
143
        } else {
144
          fatal_error("Independent variable for derivative " +
×
145
                      std::to_string(deriv.id) +
×
146
                      " not defined in state_point.cpp");
147
        }
148
        close_group(deriv_group);
30✔
149
      }
150
      close_group(derivs_group);
6✔
151
    }
152

153
    // Write information for filters
154
    hid_t filters_group = create_group(tallies_group, "filters");
3,882✔
155
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
3,882✔
156
    if (!model::tally_filters.empty()) {
3,882✔
157
      // Write filter IDs
158
      vector<int32_t> filter_ids;
2,426✔
159
      filter_ids.reserve(model::tally_filters.size());
2,426✔
160
      for (const auto& filt : model::tally_filters)
8,674✔
161
        filter_ids.push_back(filt->id());
6,248✔
162
      write_attribute(filters_group, "ids", filter_ids);
2,426✔
163

164
      // Write info for each filter
165
      for (const auto& filt : model::tally_filters) {
8,674✔
166
        hid_t filter_group =
6,248✔
167
          create_group(filters_group, "filter " + std::to_string(filt->id()));
6,248✔
168
        filt->to_statepoint(filter_group);
6,248✔
169
        close_group(filter_group);
6,248✔
170
      }
171
    }
2,426✔
172
    close_group(filters_group);
3,882✔
173

174
    // Write information for tallies
175
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
3,882✔
176
    if (!model::tallies.empty()) {
3,882✔
177
      // Write tally IDs
178
      vector<int32_t> tally_ids;
2,726✔
179
      tally_ids.reserve(model::tallies.size());
2,726✔
180
      for (const auto& tally : model::tallies)
14,844✔
181
        tally_ids.push_back(tally->id_);
12,118✔
182
      write_attribute(tallies_group, "ids", tally_ids);
2,726✔
183

184
      // Write all tally information except results
185
      for (const auto& tally : model::tallies) {
14,844✔
186
        hid_t tally_group =
12,118✔
187
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
12,118✔
188

189
        write_dataset(tally_group, "name", tally->name_);
12,118✔
190

191
        if (tally->writable_) {
12,118✔
192
          write_attribute(tally_group, "internal", 0);
11,546✔
193
        } else {
194
          write_attribute(tally_group, "internal", 1);
572✔
195
          close_group(tally_group);
572✔
196
          continue;
572✔
197
        }
198

199
        if (tally->multiply_density()) {
11,546✔
200
          write_attribute(tally_group, "multiply_density", 1);
11,510✔
201
        } else {
202
          write_attribute(tally_group, "multiply_density", 0);
36✔
203
        }
204

205
        if (tally->higher_moments()) {
11,546✔
206
          write_attribute(tally_group, "higher_moments", 1);
6✔
207
        } else {
208
          write_attribute(tally_group, "higher_moments", 0);
11,540✔
209
        }
210

211
        if (tally->estimator_ == TallyEstimator::ANALOG) {
11,546✔
212
          write_dataset(tally_group, "estimator", "analog");
4,346✔
213
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
7,200✔
214
          write_dataset(tally_group, "estimator", "tracklength");
6,704✔
215
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
496!
216
          write_dataset(tally_group, "estimator", "collision");
496✔
217
        }
218

219
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
11,546✔
220

221
        // Write the ID of each filter attached to this tally
222
        write_dataset(tally_group, "n_filters", tally->filters().size());
11,546✔
223
        if (!tally->filters().empty()) {
11,546✔
224
          vector<int32_t> filter_ids;
10,892✔
225
          filter_ids.reserve(tally->filters().size());
10,892✔
226
          for (auto i_filt : tally->filters())
33,048✔
227
            filter_ids.push_back(model::tally_filters[i_filt]->id());
22,156✔
228
          write_dataset(tally_group, "filters", filter_ids);
10,892✔
229
        }
10,892✔
230

231
        // Write the nuclides this tally scores
232
        vector<std::string> nuclides;
11,546✔
233
        for (auto i_nuclide : tally->nuclides_) {
27,328✔
234
          if (i_nuclide == -1) {
15,782✔
235
            nuclides.push_back("total");
20,212✔
236
          } else {
237
            if (settings::run_CE) {
5,676✔
238
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
5,616✔
239
            } else {
240
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
60✔
241
            }
242
          }
243
        }
244
        write_dataset(tally_group, "nuclides", nuclides);
11,546✔
245

246
        if (tally->deriv_ != C_NONE)
11,546✔
247
          write_dataset(
120✔
248
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
120✔
249

250
        // Write the tally score bins
251
        vector<std::string> scores;
11,546✔
252
        for (auto sc : tally->scores_)
28,476✔
253
          scores.push_back(reaction_name(sc));
33,860✔
254
        write_dataset(tally_group, "n_score_bins", scores.size());
11,546✔
255
        write_dataset(tally_group, "score_bins", scores);
11,546✔
256

257
        close_group(tally_group);
11,546✔
258
      }
11,546✔
259
    }
2,726✔
260

261
    if (settings::reduce_tallies) {
3,882✔
262
      // Write global tallies
263
      write_dataset(file_id, "global_tallies", simulation::global_tallies);
3,876✔
264

265
      // Write tallies
266
      if (model::active_tallies.size() > 0) {
3,876✔
267
        // Indicate that tallies are on
268
        write_attribute(file_id, "tallies_present", 1);
2,600✔
269

270
        // Write all tally results
271
        for (const auto& tally : model::tallies) {
14,592✔
272
          if (!tally->writable_)
11,992✔
273
            continue;
452✔
274

275
          // Write results for each bin
276
          std::string name = "tally " + std::to_string(tally->id_);
11,540✔
277
          hid_t tally_group = open_group(tallies_group, name.c_str());
11,540✔
278
          auto& results = tally->results_;
11,540!
279
          write_tally_results(tally_group, results.shape(0), results.shape(1),
34,620!
280
            results.shape(2), results.data());
23,080!
281
          close_group(tally_group);
11,540✔
282
        }
11,540✔
283
      } else {
284
        // Indicate tallies are off
285
        write_attribute(file_id, "tallies_present", 0);
1,276✔
286
      }
287
    }
288

289
    close_group(tallies_group);
3,882✔
290
  }
291

292
  // Check for the no-tally-reduction method
293
  if (!settings::reduce_tallies) {
4,384✔
294
    // If using the no-tally-reduction method, we need to collect tally
295
    // results before writing them to the state point file.
296
    write_tally_results_nr(file_id);
8✔
297

298
  } else if (mpi::master) {
4,376✔
299
    // Write number of global realizations
300
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
3,876✔
301
  }
302

303
  if (mpi::master) {
4,384✔
304
    // Write out the runtime metrics.
305
    using namespace simulation;
3,882✔
306
    hid_t runtime_group = create_group(file_id, "runtime");
3,882✔
307
    write_dataset(
3,882✔
308
      runtime_group, "total initialization", time_initialize.elapsed());
309
    write_dataset(
3,882✔
310
      runtime_group, "reading cross sections", time_read_xs.elapsed());
311
    write_dataset(runtime_group, "simulation",
3,882✔
312
      time_inactive.elapsed() + time_active.elapsed());
3,882✔
313
    write_dataset(runtime_group, "transport", time_transport.elapsed());
3,882✔
314
    if (settings::run_mode == RunMode::EIGENVALUE) {
3,882✔
315
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
2,358✔
316
    }
317
    write_dataset(runtime_group, "active batches", time_active.elapsed());
3,882✔
318
    if (settings::run_mode == RunMode::EIGENVALUE) {
3,882✔
319
      write_dataset(
2,358✔
320
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
321
      write_dataset(
2,358✔
322
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
323
      write_dataset(
2,358✔
324
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
325
    }
326
    write_dataset(
3,882✔
327
      runtime_group, "accumulating tallies", time_tallies.elapsed());
328
    write_dataset(runtime_group, "total", time_total.elapsed());
3,882✔
329
    write_dataset(
3,882✔
330
      runtime_group, "writing statepoints", time_statepoint.elapsed());
331
    close_group(runtime_group);
3,882✔
332

333
    file_close(file_id);
3,882✔
334
  }
335

336
#ifdef PHDF5
337
  bool parallel = true;
1,811✔
338
#else
339
  bool parallel = false;
2,573✔
340
#endif
341

342
  // Write the source bank if desired
343
  if (write_source_) {
4,384✔
344
    if (mpi::master || parallel)
2,133!
345
      file_id = file_open(filename_, 'a', true);
2,133✔
346
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
2,133✔
347
    if (mpi::master || parallel)
2,133!
348
      file_close(file_id);
2,133✔
349
  }
350

351
#if defined(OPENMC_LIBMESH_ENABLED) || defined(OPENMC_DAGMC_ENABLED)
352
  // write unstructured mesh tally files
353
  write_unstructured_mesh_results();
2,461✔
354
#endif
355

356
  simulation::time_statepoint.stop();
4,384✔
357

358
  return 0;
4,384✔
359
}
4,384✔
360

361
void restart_set_keff()
34✔
362
{
363
  if (simulation::restart_batch > settings::n_inactive) {
34!
364
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
162✔
365
      simulation::k_sum[0] += simulation::k_generation[i];
128✔
366
      simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
128✔
367
    }
368
    int n = settings::gen_per_batch * simulation::n_realizations;
34✔
369
    simulation::keff = simulation::k_sum[0] / n;
34✔
370
  } else {
371
    simulation::keff = simulation::k_generation.back();
×
372
  }
373
}
34✔
374

375
void load_state_point()
34✔
376
{
377
  write_message(
34✔
378
    fmt::format("Loading state point {}...", settings::path_statepoint_c), 5);
34✔
379
  openmc_statepoint_load(settings::path_statepoint.c_str());
34✔
380
}
34✔
381

382
void statepoint_version_check(hid_t file_id)
34✔
383
{
384
  // Read revision number for state point file and make sure it matches with
385
  // current version
386
  array<int, 2> version_array;
34✔
387
  read_attribute(file_id, "version", version_array);
34✔
388
  if (version_array != VERSION_STATEPOINT) {
34!
389
    fatal_error(
×
390
      "State point version does not match current version in OpenMC.");
391
  }
392
}
34✔
393

394
extern "C" int openmc_statepoint_load(const char* filename)
34✔
395
{
396
  // Open file for reading
397
  hid_t file_id = file_open(filename, 'r', true);
34✔
398

399
  // Read filetype
400
  std::string word;
34✔
401
  read_attribute(file_id, "filetype", word);
34✔
402
  if (word != "statepoint") {
34!
403
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
404
  }
405

406
  statepoint_version_check(file_id);
34✔
407

408
  // Read and overwrite random number seed
409
  int64_t seed;
34✔
410
  read_dataset(file_id, "seed", seed);
34✔
411
  openmc_set_seed(seed);
34✔
412

413
  // Read and overwrite random number stride
414
  uint64_t stride;
34✔
415
  read_dataset(file_id, "stride", stride);
34✔
416
  openmc_set_stride(stride);
34✔
417

418
  // It is not impossible for a state point to be generated from a CE run but
419
  // to be loaded in to an MG run (or vice versa), check to prevent that.
420
  read_dataset(file_id, "energy_mode", word);
34✔
421
  if (word == "multi-group" && settings::run_CE) {
34!
422
    fatal_error("State point file is from multigroup run but current run is "
×
423
                "continous energy.");
424
  } else if (word == "continuous-energy" && !settings::run_CE) {
34!
425
    fatal_error("State point file is from continuous-energy run but current "
×
426
                "run is multigroup!");
427
  }
428

429
  // Read and overwrite run information except number of batches
430
  read_dataset(file_id, "run_mode", word);
34✔
431
  if (word == "fixed source") {
34!
432
    settings::run_mode = RunMode::FIXED_SOURCE;
×
433
  } else if (word == "eigenvalue") {
34!
434
    settings::run_mode = RunMode::EIGENVALUE;
34✔
435
  }
436
  read_attribute(file_id, "photon_transport", settings::photon_transport);
34✔
437
  read_dataset(file_id, "n_particles", settings::n_particles);
34✔
438
  int temp;
34✔
439
  read_dataset(file_id, "n_batches", temp);
34✔
440

441
  // Take maximum of statepoint n_batches and input n_batches
442
  settings::n_batches = std::max(settings::n_batches, temp);
34✔
443

444
  // Read batch number to restart at
445
  read_dataset(file_id, "current_batch", simulation::restart_batch);
34✔
446

447
  if (settings::restart_run &&
34!
448
      simulation::restart_batch >= settings::n_max_batches) {
34✔
449
    warning(fmt::format(
12✔
450
      "The number of batches specified for simulation ({}) is smaller "
451
      "than or equal to the number of batches in the restart statepoint file "
452
      "({})",
453
      settings::n_max_batches, simulation::restart_batch));
454
  }
455

456
  // Logical flag for source present in statepoint file
457
  bool source_present;
34✔
458
  read_attribute(file_id, "source_present", source_present);
34✔
459

460
  // Read information specific to eigenvalue run
461
  if (settings::run_mode == RunMode::EIGENVALUE) {
34!
462
    read_dataset(file_id, "n_inactive", temp);
34✔
463
    read_eigenvalue_hdf5(file_id);
34✔
464

465
    // Take maximum of statepoint n_inactive and input n_inactive
466
    settings::n_inactive = std::max(settings::n_inactive, temp);
34!
467

468
    // Check to make sure source bank is present
469
    if (settings::path_sourcepoint == settings::path_statepoint &&
34!
470
        !source_present) {
34!
471
      fatal_error("Source bank must be contained in statepoint restart file");
×
472
    }
473
  }
474

475
  // Read number of realizations for global tallies
476
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
34✔
477

478
  // Set k_sum, keff, and current_batch based on whether restart file is part
479
  // of active cycle or inactive cycle
480
  if (settings::run_mode == RunMode::EIGENVALUE) {
34!
481
    restart_set_keff();
34✔
482
  }
483

484
  // Set current batch number
485
  simulation::current_batch = simulation::restart_batch;
34✔
486

487
  // Read tallies to master. If we are using Parallel HDF5, all processes
488
  // need to be included in the HDF5 calls.
489
#ifdef PHDF5
490
  if (true) {
14✔
491
#else
492
  if (mpi::master) {
20!
493
#endif
494
    // Read global tally data
495
    read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
34✔
496
      false, simulation::global_tallies.data());
34✔
497

498
    // Check if tally results are present
499
    bool present;
34✔
500
    read_attribute(file_id, "tallies_present", present);
34✔
501

502
    // Read in sum and sum squared
503
    if (present) {
34!
504
      hid_t tallies_group = open_group(file_id, "tallies");
34✔
505

506
      for (auto& tally : model::tallies) {
118✔
507
        // Read sum, sum_sq, and N for each bin
508
        std::string name = "tally " + std::to_string(tally->id_);
84✔
509
        hid_t tally_group = open_group(tallies_group, name.c_str());
84✔
510

511
        int internal = 0;
84✔
512
        if (attribute_exists(tally_group, "internal")) {
84!
513
          read_attribute(tally_group, "internal", internal);
84✔
514
        }
515
        if (internal) {
84!
516
          tally->writable_ = false;
×
517
        } else {
518
          auto& results = tally->results_;
84!
519
          read_tally_results(tally_group, results.shape(0), results.shape(1),
252!
520
            results.shape(2), results.data());
84!
521

522
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
84✔
523
          close_group(tally_group);
84✔
524
        }
525
      }
84✔
526
      close_group(tallies_group);
34✔
527
    }
528
  }
529

530
  // Read source if in eigenvalue mode
531
  if (settings::run_mode == RunMode::EIGENVALUE) {
34!
532

533
    // Check if source was written out separately
534
    if (!source_present) {
34!
535

536
      // Close statepoint file
537
      file_close(file_id);
×
538

539
      // Write message
540
      write_message(
×
541
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
542

543
      // Open source file
544
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
545
    }
546

547
    // Read source
548
    read_source_bank(file_id, simulation::source_bank, true);
34✔
549
  }
550

551
  // Close file
552
  file_close(file_id);
34✔
553

554
  return 0;
34✔
555
}
34✔
556

557
hid_t h5banktype(bool memory)
5,664✔
558
{
559
  // Create compound type for position
560
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
5,664✔
561
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
5,664✔
562
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
5,664✔
563
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
5,664✔
564

565
  // Create bank datatype
566
  //
567
  // If you make changes to the compound datatype here, make sure you update:
568
  // - openmc/source.py
569
  // - openmc/statepoint.py
570
  // - docs/source/io_formats/statepoint.rst
571
  // - docs/source/io_formats/source.rst
572
  auto n = sizeof(SourceSite);
5,664✔
573
  if (!memory)
5,664✔
574
    n = 2 * sizeof(struct Position) + 3 * sizeof(double) + 3 * sizeof(int);
2,799✔
575
  hid_t banktype = H5Tcreate(H5T_COMPOUND, n);
5,664✔
576
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
5,664✔
577
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
5,664✔
578
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
5,664✔
579
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
5,664✔
580
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
5,664✔
581
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
5,664✔
582
    H5T_NATIVE_INT);
5,664✔
583
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
5,664✔
584
  H5Tinsert(
5,664✔
585
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
5,664✔
586

587
  H5Tclose(postype);
5,664✔
588
  return banktype;
5,664✔
589
}
590

591
void write_source_point(std::string filename, span<SourceSite> source_bank,
686✔
592
  const vector<int64_t>& bank_index, bool use_mcpl)
593
{
594
  std::string ext = use_mcpl ? "mcpl" : "h5";
1,352✔
595

596
  int total_surf_particles = source_bank.size();
686✔
597
#ifdef OPENMC_MPI
598
  int num_particles = source_bank.size();
250✔
599
  MPI_Allreduce(
250✔
600
    &num_particles, &total_surf_particles, 1, MPI_INT, MPI_SUM, mpi::intracomm);
601
#endif
602

603
  write_message("Creating source file {}.{} with {} particles ...", filename,
686✔
604
    ext, total_surf_particles, 5);
686✔
605

606
  // Dispatch to appropriate function based on file type
607
  if (use_mcpl) {
686✔
608
    filename.append(".mcpl");
20✔
609
    write_mcpl_source_point(filename.c_str(), source_bank, bank_index);
20✔
610
  } else {
611
    filename.append(".h5");
666✔
612
    write_h5_source_point(filename.c_str(), source_bank, bank_index);
666✔
613
  }
614
}
686✔
615

616
void write_h5_source_point(const char* filename, span<SourceSite> source_bank,
666✔
617
  const vector<int64_t>& bank_index)
618
{
619
  // When using parallel HDF5, the file is written to collectively by all
620
  // processes. With MPI-only, the file is opened and written by the master
621
  // (note that the call to write_source_bank is by all processes since slave
622
  // processes need to send source bank data to the master.
623
#ifdef PHDF5
624
  bool parallel = true;
242✔
625
#else
626
  bool parallel = false;
424✔
627
#endif
628

629
  if (!filename)
666!
UNCOV
630
    fatal_error("write_source_point filename needs a nonempty name.");
×
631

632
  std::string filename_(filename);
666✔
633
  const auto extension = get_file_extension(filename_);
666✔
634
  if (extension != "h5") {
666!
UNCOV
635
    warning("write_source_point was passed a file extension differing "
×
636
            "from .h5, but an hdf5 file will be written.");
637
  }
638

639
  hid_t file_id;
666✔
640
  if (mpi::master || parallel) {
666!
641
    file_id = file_open(filename_.c_str(), 'w', true);
666✔
642
    write_attribute(file_id, "filetype", "source");
666✔
643
    write_attribute(file_id, "version", VERSION_STATEPOINT);
666✔
644
  }
645

646
  // Get pointer to source bank and write to file
647
  write_source_bank(file_id, source_bank, bank_index);
666✔
648

649
  if (mpi::master || parallel)
666!
650
    file_close(file_id);
666✔
651
}
666✔
652

653
void write_source_bank(hid_t group_id, span<SourceSite> source_bank,
2,799✔
654
  const vector<int64_t>& bank_index)
655
{
656
  hid_t membanktype = h5banktype(true);
2,799✔
657
  hid_t filebanktype = h5banktype(false);
2,799✔
658

659
#ifdef OPENMC_MPI
660
  write_bank_dataset("source_bank", group_id, source_bank, bank_index,
1,154✔
661
    membanktype, filebanktype, mpi::source_site);
662
#else
663
  write_bank_dataset("source_bank", group_id, source_bank, bank_index,
1,645✔
664
    membanktype, filebanktype);
665
#endif
666

667
  H5Tclose(membanktype);
2,799✔
668
  H5Tclose(filebanktype);
2,799✔
669
}
2,799✔
670

671
// Determine member names of a compound HDF5 datatype
672
std::string dtype_member_names(hid_t dtype_id)
132✔
673
{
674
  int nmembers = H5Tget_nmembers(dtype_id);
132✔
675
  std::string names;
132✔
676
  for (int i = 0; i < nmembers; i++) {
1,168✔
677
    char* name = H5Tget_member_name(dtype_id, i);
1,036✔
678
    names = names.append(name);
1,036✔
679
    H5free_memory(name);
1,036✔
680
    if (i < nmembers - 1)
1,036✔
681
      names += ", ";
1,036✔
682
  }
683
  return names;
132✔
UNCOV
684
}
×
685

686
void read_source_bank(
66✔
687
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
688
{
689
  bool legacy_particle_codes = true;
66✔
690
  if (attribute_exists(group_id, "version")) {
66✔
691
    array<int, 2> version;
62✔
692
    read_attribute(group_id, "version", version);
62✔
693
    if (version[0] > VERSION_STATEPOINT[0] ||
62!
694
        (version[0] == VERSION_STATEPOINT[0] && version[1] >= 2)) {
62!
695
      legacy_particle_codes = false;
696
    }
697
  }
698

699
  hid_t banktype = h5banktype(true);
66✔
700

701
  // Open the dataset
702
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
66✔
703

704
  // Make sure number of members matches
705
  hid_t dtype = H5Dget_type(dset);
66✔
706
  auto file_member_names = dtype_member_names(dtype);
66✔
707
  auto bank_member_names = dtype_member_names(banktype);
66✔
708
  if (file_member_names != bank_member_names) {
66✔
709
    fatal_error(fmt::format(
4✔
710
      "Source site attributes in file do not match what is "
711
      "expected for this version of OpenMC. File attributes = ({}). Expected "
712
      "attributes = ({})",
713
      file_member_names, bank_member_names));
714
  }
715

716
  hid_t dspace = H5Dget_space(dset);
62✔
717
  hsize_t n_sites;
62✔
718
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
62✔
719

720
  // Make sure vector is big enough in case where we're reading entire source on
721
  // each process
722
  if (!distribute)
62✔
723
    sites.resize(n_sites);
28✔
724

725
  hid_t memspace;
62✔
726
  if (distribute) {
62✔
727
    if (simulation::work_index[mpi::n_procs] > n_sites) {
34!
UNCOV
728
      fatal_error("Number of source sites in source file is less "
×
729
                  "than number of source particles per generation.");
730
    }
731

732
    // Create another data space but for each proc individually
733
    hsize_t n_sites_local = simulation::work_per_rank;
34✔
734
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
34✔
735

736
    // Select hyperslab for each process
737
    hsize_t offset = simulation::work_index[mpi::rank];
34✔
738
    H5Sselect_hyperslab(
34✔
739
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
740
  } else {
741
    memspace = H5S_ALL;
742
  }
743

744
#ifdef PHDF5
745
  // Read data in parallel
746
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
26✔
747
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
26✔
748
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
26✔
749
  H5Pclose(plist);
26✔
750
#else
751
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
36✔
752
#endif
753

754
  // Close all ids
755
  H5Sclose(dspace);
62✔
756
  if (distribute)
62✔
757
    H5Sclose(memspace);
34✔
758
  H5Dclose(dset);
62✔
759
  H5Tclose(banktype);
62✔
760

761
  if (legacy_particle_codes) {
62!
UNCOV
762
    for (auto& site : sites) {
×
UNCOV
763
      site.particle = legacy_particle_index_to_type(site.particle.pdg_number());
×
764
    }
765
  }
766
}
62✔
767

768
void write_unstructured_mesh_results()
2,461✔
769
{
770

771
  for (auto& tally : model::tallies) {
11,350✔
772

773
    vector<std::string> tally_scores;
8,889✔
774
    for (auto filter_idx : tally->filters()) {
26,113✔
775
      auto& filter = model::tally_filters[filter_idx];
17,224!
776
      if (filter->type() != FilterType::MESH)
17,224!
777
        continue;
17,209✔
778

779
      // check if the filter uses an unstructured mesh
780
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
1,957!
781
      auto mesh_idx = mesh_filter->mesh();
1,957!
782
      auto umesh =
1,957✔
783
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
1,957!
784

785
      if (!umesh)
1,957✔
786
        continue;
1,922✔
787

788
      if (!umesh->output_)
35!
UNCOV
789
        continue;
×
790

791
      if (umesh->library() == "moab") {
70!
792
        if (mpi::master)
20✔
793
          warning(fmt::format(
20!
794
            "Output for a MOAB mesh (mesh {}) was "
795
            "requested but will not be written. Please use the Python "
796
            "API to generated the desired VTK tetrahedral mesh.",
797
            umesh->id_));
10!
798
        continue;
20✔
799
      }
800

801
      // if this tally has more than one filter, print
802
      // warning and skip writing the mesh
803
      if (tally->filters().size() > 1) {
15!
UNCOV
804
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
805
                            "{}. More than one filter is present on the tally.",
UNCOV
806
          tally->id_));
×
UNCOV
807
        break;
×
808
      }
809

810
      int n_realizations = tally->n_realizations_;
15✔
811

812
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
813
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
814
          // combine the score and nuclide into a name for the value
815
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30!
816
            tally->nuclide_name(nuc_idx));
45!
817
          // add this score to the mesh
818
          // (this is in a separate loop because all variables need to be added
819
          //  to libMesh's equation system before any are initialized, which
820
          //  happens in set_score_data)
821
          umesh->add_score(score_str);
15!
822
        }
15✔
823
      }
824

825
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
826
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
827
          // combine the score and nuclide into a name for the value
828
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30!
829
            tally->nuclide_name(nuc_idx));
45!
830

831
          // index for this nuclide and score
832
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
15!
833

834
          // construct result vectors
835
          vector<double> mean_vec(umesh->n_bins()),
15!
836
            std_dev_vec(umesh->n_bins());
15!
837
          for (int j = 0; j < tally->results_.shape(0); j++) {
293,598!
838
            // get the volume for this bin
839
            double volume = umesh->volume(j);
146,784!
840
            // compute the mean
841
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
146,784!
842
                          n_realizations;
146,784✔
843
            mean_vec.at(j) = mean / volume;
146,784!
844

845
            // compute the standard deviation
846
            double sum_sq =
146,784!
847
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
146,784✔
848
            double std_dev {0.0};
146,784✔
849
            if (n_realizations > 1) {
146,784!
850
              std_dev = sum_sq / n_realizations - mean * mean;
146,784✔
851
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
146,784✔
852
            }
853
            std_dev_vec[j] = std_dev / volume;
146,784✔
854
          }
855
#ifdef OPENMC_MPI
856
          MPI_Bcast(
10!
857
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
10!
858
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
10!
859
            mpi::intracomm);
860
#endif
861
          // set the data for this score
862
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
15!
863
        }
15✔
864
      }
865

866
      // Generate a file name based on the tally id
867
      // and the current batch number
868
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
15!
869
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
15!
870
        simulation::current_batch, batch_width);
15!
871

872
      // Write the unstructured mesh and data to file
873
      umesh->write(filename);
15!
874

875
      // remove score data added for this mesh write
876
      umesh->remove_scores();
15!
877
    }
15✔
878
  }
8,889✔
879
}
2,461✔
880

881
void write_tally_results_nr(hid_t file_id)
8✔
882
{
883
  // ==========================================================================
884
  // COLLECT AND WRITE GLOBAL TALLIES
885

886
  hid_t tallies_group;
8✔
887
  if (mpi::master) {
8✔
888
    // Write number of realizations
889
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
6✔
890

891
    tallies_group = open_group(file_id, "tallies");
6✔
892
  }
893

894
  // Get global tallies
895
  auto& gt = simulation::global_tallies;
8✔
896

897
#ifdef OPENMC_MPI
898
  // Reduce global tallies
899
  tensor::Tensor<double> gt_reduced({N_GLOBAL_TALLIES, 3});
4✔
900
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
4✔
901
    mpi::intracomm);
902

903
  // Transfer values to value on master
904
  if (mpi::master) {
4✔
905
    if (simulation::current_batch == settings::n_max_batches ||
2!
906
        simulation::satisfy_triggers) {
907
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
2✔
908
    }
909
  }
910
#endif
911

912
  // Write out global tallies sum and sum_sq
913
  if (mpi::master) {
8✔
914
    write_dataset(file_id, "global_tallies", gt);
6✔
915
  }
916

917
  for (const auto& t : model::tallies) {
16✔
918
    // Skip any tallies that are not active
919
    if (!t->active_)
8!
UNCOV
920
      continue;
×
921
    if (!t->writable_)
8!
UNCOV
922
      continue;
×
923

924
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
8!
925
      write_attribute(file_id, "tallies_present", 1);
6✔
926
    }
927

928
    // Copy the SUM and SUM_SQ columns from the tally results into a
929
    // contiguous array for MPI reduction
930
    const int r_start = static_cast<int>(TallyResult::SUM);
8✔
931
    const int r_end = static_cast<int>(TallyResult::SUM_SQ) + 1;
8✔
932
    const size_t r_count = r_end - r_start;
8✔
933
    const size_t ni = t->results_.shape(0);
8!
934
    const size_t nj = t->results_.shape(1);
8!
935
    tensor::Tensor<double> values({ni, nj, r_count});
8✔
936
    for (size_t i = 0; i < ni; i++)
16✔
937
      for (size_t j = 0; j < nj; j++)
16✔
938
        for (size_t r = 0; r < r_count; r++)
24✔
939
          values(i, j, r) = t->results_(i, j, r_start + r);
16✔
940

941
    if (mpi::master) {
8✔
942
      // Open group for tally
943
      std::string groupname {"tally " + std::to_string(t->id_)};
6✔
944
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
6✔
945

946
      // The MPI_IN_PLACE specifier allows the master to copy values into
947
      // a receive buffer without having a temporary variable
948
#ifdef OPENMC_MPI
949
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
2✔
950
        MPI_SUM, 0, mpi::intracomm);
951
#endif
952

953
      // At the end of the simulation, store the reduced results back
954
      // into the tally results array
955
      if (simulation::current_batch == settings::n_max_batches ||
6!
956
          simulation::satisfy_triggers) {
957
        for (size_t i = 0; i < ni; i++)
12✔
958
          for (size_t j = 0; j < nj; j++)
12✔
959
            for (size_t r = 0; r < r_count; r++)
18✔
960
              t->results_(i, j, r_start + r) = values(i, j, r);
12✔
961
      }
962

963
      // Put reduced values into a full-sized copy for writing to HDF5
964
      tensor::Tensor<double> results_copy = tensor::zeros_like(t->results_);
6✔
965
      for (size_t i = 0; i < ni; i++)
12✔
966
        for (size_t j = 0; j < nj; j++)
12✔
967
          for (size_t r = 0; r < r_count; r++)
18✔
968
            results_copy(i, j, r_start + r) = values(i, j, r);
12✔
969

970
      // Write reduced tally results to file
971
      auto shape = results_copy.shape();
6✔
972
      write_tally_results(
6✔
973
        tally_group, shape[0], shape[1], shape[2], results_copy.data());
6✔
974

975
      close_group(tally_group);
6✔
976
    } else {
12✔
977
      // Receive buffer not significant at other processors
978
#ifdef OPENMC_MPI
979
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
2✔
980
        mpi::intracomm);
981
#endif
982
    }
983
  }
8✔
984

985
  if (mpi::master) {
8✔
986
    if (!object_exists(file_id, "tallies_present")) {
6!
987
      // Indicate that tallies are off
988
      write_dataset(file_id, "tallies_present", 0);
6✔
989
    }
990

991
    close_group(tallies_group);
6✔
992
  }
993
}
8✔
994

995
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc