• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 25632480981

10 May 2026 03:25PM UTC coverage: 81.003% (-0.4%) from 81.388%
25632480981

Pull #3757

github

web-flow
Merge f7b7414eb into d56cda254
Pull Request #3757: Testing point detectors

17777 of 25846 branches covered (68.78%)

Branch coverage included in aggregate %.

51 of 372 new or added lines in 25 files covered. (13.71%)

3 existing lines in 2 files now uncovered.

58790 of 68678 relevant lines covered (85.6%)

46992975.87 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

84.76
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "openmc/tensor.h"
8
#include <fmt/core.h>
9

10
#include "openmc/bank.h"
11
#include "openmc/bank_io.h"
12
#include "openmc/capi.h"
13
#include "openmc/constants.h"
14
#include "openmc/eigenvalue.h"
15
#include "openmc/error.h"
16
#include "openmc/file_utils.h"
17
#include "openmc/hdf5_interface.h"
18
#include "openmc/mcpl_interface.h"
19
#include "openmc/mesh.h"
20
#include "openmc/message_passing.h"
21
#include "openmc/mgxs_interface.h"
22
#include "openmc/nuclide.h"
23
#include "openmc/output.h"
24
#include "openmc/particle_type.h"
25
#include "openmc/settings.h"
26
#include "openmc/simulation.h"
27
#include "openmc/tallies/derivative.h"
28
#include "openmc/tallies/filter.h"
29
#include "openmc/tallies/filter_mesh.h"
30
#include "openmc/tallies/tally.h"
31
#include "openmc/timer.h"
32
#include "openmc/vector.h"
33

34
namespace openmc {
35

36
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
8,419✔
37
{
38
  simulation::time_statepoint.start();
8,419✔
39

40
  // If a nullptr is passed in, we assume that the user
41
  // wants a default name for this, of the form like output/statepoint.20.h5
42
  std::string filename_;
8,419✔
43
  if (filename) {
8,419✔
44
    filename_ = filename;
867✔
45
  } else {
46
    // Determine width for zero padding
47
    int w = std::to_string(settings::n_max_batches).size();
7,552✔
48

49
    // Set filename for state point
50
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
15,104✔
51
      simulation::current_batch, w);
7,552✔
52
  }
53

54
  // If a file name was specified, ensure it has .h5 file extension
55
  const auto extension = get_file_extension(filename_);
8,419✔
56
  if (extension != "h5") {
8,419!
57
    warning("openmc_statepoint_write was passed a file extension differing "
×
58
            "from .h5, but an hdf5 file will be written.");
59
  }
60

61
  // Determine whether or not to write the source bank
62
  bool write_source_ = write_source ? *write_source : true;
8,419!
63

64
  // Write message
65
  write_message("Creating state point " + filename_ + "...", 5);
16,838✔
66

67
  hid_t file_id;
8,419✔
68
  if (mpi::master) {
8,419✔
69
    // Create statepoint file
70
    file_id = file_open(filename_, 'w');
7,423✔
71

72
    // Write file type
73
    write_attribute(file_id, "filetype", "statepoint");
7,423✔
74

75
    // Write revision number for state point file
76
    write_attribute(file_id, "version", VERSION_STATEPOINT);
7,423✔
77

78
    // Write OpenMC version
79
    write_attribute(file_id, "openmc_version", VERSION);
7,423✔
80
#ifdef GIT_SHA1
81
    write_attribute(file_id, "git_sha1", GIT_SHA1);
82
#endif
83

84
    // Write current date and time
85
    write_attribute(file_id, "date_and_time", time_stamp());
14,846✔
86

87
    // Write path to input
88
    write_attribute(file_id, "path", settings::path_input);
7,423✔
89

90
    // Write out random number seed
91
    write_dataset(file_id, "seed", openmc_get_seed());
7,423✔
92

93
    // Write out random number stride
94
    write_dataset(file_id, "stride", openmc_get_stride());
7,423✔
95

96
    // Write run information
97
    write_dataset(file_id, "energy_mode",
8,448✔
98
      settings::run_CE ? "continuous-energy" : "multi-group");
99
    switch (settings::run_mode) {
7,423!
100
    case RunMode::FIXED_SOURCE:
2,850✔
101
      write_dataset(file_id, "run_mode", "fixed source");
2,850✔
102
      break;
103
    case RunMode::EIGENVALUE:
4,573✔
104
      write_dataset(file_id, "run_mode", "eigenvalue");
4,573✔
105
      break;
106
    default:
107
      break;
108
    }
109
    write_attribute(file_id, "photon_transport", settings::photon_transport);
7,423✔
110
    write_dataset(file_id, "n_particles", settings::n_particles);
7,423✔
111
    write_dataset(file_id, "n_batches", settings::n_batches);
7,423✔
112

113
    // Write out current batch number
114
    write_dataset(file_id, "current_batch", simulation::current_batch);
7,423✔
115

116
    // Indicate whether source bank is stored in statepoint
117
    write_attribute(file_id, "source_present", write_source_);
7,423✔
118

119
    // Write out information for eigenvalue run
120
    if (settings::run_mode == RunMode::EIGENVALUE)
7,423✔
121
      write_eigenvalue_hdf5(file_id);
4,573✔
122

123
    hid_t tallies_group = create_group(file_id, "tallies");
7,423✔
124

125
    // Write meshes
126
    meshes_to_hdf5(tallies_group);
7,423✔
127

128
    // Write information for derivatives
129
    if (!model::tally_derivs.empty()) {
7,423✔
130
      hid_t derivs_group = create_group(tallies_group, "derivatives");
11✔
131
      for (const auto& deriv : model::tally_derivs) {
66✔
132
        hid_t deriv_group =
55✔
133
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
55✔
134
        write_dataset(deriv_group, "material", deriv.diff_material);
55✔
135
        if (deriv.variable == DerivativeVariable::DENSITY) {
55✔
136
          write_dataset(deriv_group, "independent variable", "density");
22✔
137
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
33✔
138
          write_dataset(deriv_group, "independent variable", "nuclide_density");
22✔
139
          write_dataset(
44✔
140
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
22✔
141
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
11!
142
          write_dataset(deriv_group, "independent variable", "temperature");
11✔
143
        } else {
144
          fatal_error("Independent variable for derivative " +
×
145
                      std::to_string(deriv.id) +
×
146
                      " not defined in state_point.cpp");
147
        }
148
        close_group(deriv_group);
55✔
149
      }
150
      close_group(derivs_group);
11✔
151
    }
152

153
    // Write information for filters
154
    hid_t filters_group = create_group(tallies_group, "filters");
7,423✔
155
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
7,423✔
156
    if (!model::tally_filters.empty()) {
7,423✔
157
      // Write filter IDs
158
      vector<int32_t> filter_ids;
4,761✔
159
      filter_ids.reserve(model::tally_filters.size());
4,761✔
160
      for (const auto& filt : model::tally_filters)
16,691✔
161
        filter_ids.push_back(filt->id());
11,930✔
162
      write_attribute(filters_group, "ids", filter_ids);
4,761✔
163

164
      // Write info for each filter
165
      for (const auto& filt : model::tally_filters) {
16,691✔
166
        hid_t filter_group =
11,930✔
167
          create_group(filters_group, "filter " + std::to_string(filt->id()));
11,930✔
168
        filt->to_statepoint(filter_group);
11,930✔
169
        close_group(filter_group);
11,930✔
170
      }
171
    }
4,761✔
172
    close_group(filters_group);
7,423✔
173

174
    // Write information for tallies
175
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
7,423✔
176
    if (!model::tallies.empty()) {
7,423✔
177
      // Write tally IDs
178
      vector<int32_t> tally_ids;
5,333✔
179
      tally_ids.reserve(model::tallies.size());
5,333✔
180
      for (const auto& tally : model::tallies)
27,984✔
181
        tally_ids.push_back(tally->id_);
22,651✔
182
      write_attribute(tallies_group, "ids", tally_ids);
5,333✔
183

184
      // Write all tally information except results
185
      for (const auto& tally : model::tallies) {
27,984✔
186
        hid_t tally_group =
22,651✔
187
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
22,651✔
188

189
        write_dataset(tally_group, "name", tally->name_);
22,651✔
190

191
        if (tally->writable_) {
22,651✔
192
          write_attribute(tally_group, "internal", 0);
21,319✔
193
        } else {
194
          write_attribute(tally_group, "internal", 1);
1,332✔
195
          close_group(tally_group);
1,332✔
196
          continue;
1,332✔
197
        }
198

199
        if (tally->multiply_density()) {
21,319✔
200
          write_attribute(tally_group, "multiply_density", 1);
21,209✔
201
        } else {
202
          write_attribute(tally_group, "multiply_density", 0);
110✔
203
        }
204

205
        if (tally->higher_moments()) {
21,319✔
206
          write_attribute(tally_group, "higher_moments", 1);
11✔
207
        } else {
208
          write_attribute(tally_group, "higher_moments", 0);
21,308✔
209
        }
210

211
        if (tally->estimator_ == TallyEstimator::ANALOG) {
21,319✔
212
          write_dataset(tally_group, "estimator", "analog");
7,961✔
213
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
13,358✔
214
          write_dataset(tally_group, "estimator", "tracklength");
12,472✔
215
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
886!
216
          write_dataset(tally_group, "estimator", "collision");
886✔
NEW
217
        } else if (tally->estimator_ == TallyEstimator::NEXT_EVENT) {
×
NEW
218
          write_dataset(tally_group, "estimator", "next-event");
×
219
        }
220

221
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
21,319✔
222

223
        // Write the ID of each filter attached to this tally
224
        write_dataset(tally_group, "n_filters", tally->filters().size());
21,319✔
225
        if (!tally->filters().empty()) {
21,319✔
226
          vector<int32_t> filter_ids;
20,098✔
227
          filter_ids.reserve(tally->filters().size());
20,098✔
228
          for (auto i_filt : tally->filters())
60,951✔
229
            filter_ids.push_back(model::tally_filters[i_filt]->id());
40,853✔
230
          write_dataset(tally_group, "filters", filter_ids);
20,098✔
231
        }
20,098✔
232

233
        // Write the nuclides this tally scores
234
        vector<std::string> nuclides;
21,319✔
235
        for (auto i_nuclide : tally->nuclides_) {
50,624✔
236
          if (i_nuclide == -1) {
29,305✔
237
            nuclides.push_back("total");
37,270✔
238
          } else {
239
            if (settings::run_CE) {
10,670✔
240
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
10,560✔
241
            } else {
242
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
110✔
243
            }
244
          }
245
        }
246
        write_dataset(tally_group, "nuclides", nuclides);
21,319✔
247

248
        if (tally->deriv_ != C_NONE)
21,319✔
249
          write_dataset(
220✔
250
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
220✔
251

252
        // Write the tally score bins
253
        vector<std::string> scores;
21,319✔
254
        for (auto sc : tally->scores_)
52,585✔
255
          scores.push_back(reaction_name(sc));
62,532✔
256
        write_dataset(tally_group, "n_score_bins", scores.size());
21,319✔
257
        write_dataset(tally_group, "score_bins", scores);
21,319✔
258

259
        close_group(tally_group);
21,319✔
260
      }
21,319✔
261
    }
5,333✔
262

263
    if (settings::reduce_tallies) {
7,423✔
264
      // Write global tallies
265
      write_dataset(file_id, "global_tallies", simulation::global_tallies);
7,412✔
266

267
      // Write tallies
268
      if (model::active_tallies.size() > 0) {
7,412✔
269
        // Indicate that tallies are on
270
        write_attribute(file_id, "tallies_present", 1);
5,102✔
271

272
        // Write all tally results
273
        for (const auto& tally : model::tallies) {
27,522✔
274
          if (!tally->writable_)
22,420✔
275
            continue;
1,112✔
276

277
          // Write results for each bin
278
          std::string name = "tally " + std::to_string(tally->id_);
21,308✔
279
          hid_t tally_group = open_group(tallies_group, name.c_str());
21,308✔
280
          auto& results = tally->results_;
21,308!
281
          write_tally_results(tally_group, results.shape(0), results.shape(1),
63,924!
282
            results.shape(2), results.data());
42,616!
283
          close_group(tally_group);
21,308✔
284
        }
21,308✔
285
      } else {
286
        // Indicate tallies are off
287
        write_attribute(file_id, "tallies_present", 0);
2,310✔
288
      }
289
    }
290

291
    close_group(tallies_group);
7,423✔
292
  }
293

294
  // Check for the no-tally-reduction method
295
  if (!settings::reduce_tallies) {
8,419✔
296
    // If using the no-tally-reduction method, we need to collect tally
297
    // results before writing them to the state point file.
298
    write_tally_results_nr(file_id);
15✔
299

300
  } else if (mpi::master) {
8,404✔
301
    // Write number of global realizations
302
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
7,412✔
303
  }
304

305
  if (mpi::master) {
8,419✔
306
    // Write out the runtime metrics.
307
    using namespace simulation;
7,423✔
308
    hid_t runtime_group = create_group(file_id, "runtime");
7,423✔
309
    write_dataset(
7,423✔
310
      runtime_group, "total initialization", time_initialize.elapsed());
311
    write_dataset(
7,423✔
312
      runtime_group, "reading cross sections", time_read_xs.elapsed());
313
    write_dataset(runtime_group, "simulation",
7,423✔
314
      time_inactive.elapsed() + time_active.elapsed());
7,423✔
315
    write_dataset(runtime_group, "transport", time_transport.elapsed());
7,423✔
316
    if (settings::run_mode == RunMode::EIGENVALUE) {
7,423✔
317
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
4,573✔
318
    }
319
    write_dataset(runtime_group, "active batches", time_active.elapsed());
7,423✔
320
    if (settings::run_mode == RunMode::EIGENVALUE) {
7,423✔
321
      write_dataset(
4,573✔
322
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
323
      write_dataset(
4,573✔
324
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
325
      write_dataset(
4,573✔
326
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
327
    }
328
    write_dataset(
7,423✔
329
      runtime_group, "accumulating tallies", time_tallies.elapsed());
330
    write_dataset(runtime_group, "total", time_total.elapsed());
7,423✔
331
    write_dataset(
7,423✔
332
      runtime_group, "writing statepoints", time_statepoint.elapsed());
333
    close_group(runtime_group);
7,423✔
334

335
    file_close(file_id);
7,423✔
336
  }
337

338
#ifdef PHDF5
339
  bool parallel = true;
3,705✔
340
#else
341
  bool parallel = false;
4,714✔
342
#endif
343

344
  // Write the source bank if desired
345
  if (write_source_) {
8,419✔
346
    if (mpi::master || parallel)
4,070!
347
      file_id = file_open(filename_, 'a', true);
4,070✔
348
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
4,070✔
349
    if (mpi::master || parallel)
4,070!
350
      file_close(file_id);
4,070✔
351
  }
352

353
#if defined(OPENMC_LIBMESH_ENABLED) || defined(OPENMC_DAGMC_ENABLED)
354
  // write unstructured mesh tally files
355
  write_unstructured_mesh_results();
2,569✔
356
#endif
357

358
  simulation::time_statepoint.stop();
8,419✔
359

360
  return 0;
8,419✔
361
}
8,419✔
362

363
void restart_set_keff()
63✔
364
{
365
  if (simulation::restart_batch > settings::n_inactive) {
63!
366
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
300✔
367
      simulation::k_sum[0] += simulation::k_generation[i];
237✔
368
      simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
237✔
369
    }
370
    int n = settings::gen_per_batch * simulation::n_realizations;
63✔
371
    simulation::keff = simulation::k_sum[0] / n;
63✔
372
  } else {
373
    simulation::keff = simulation::k_generation.back();
×
374
  }
375
}
63✔
376

377
void load_state_point()
63✔
378
{
379
  write_message(
63✔
380
    fmt::format("Loading state point {}...", settings::path_statepoint_c), 5);
63✔
381
  openmc_statepoint_load(settings::path_statepoint.c_str());
63✔
382
}
63✔
383

384
void statepoint_version_check(hid_t file_id)
63✔
385
{
386
  // Read revision number for state point file and make sure it matches with
387
  // current version
388
  array<int, 2> version_array;
63✔
389
  read_attribute(file_id, "version", version_array);
63✔
390
  if (version_array != VERSION_STATEPOINT) {
63!
391
    fatal_error(
×
392
      "State point version does not match current version in OpenMC.");
393
  }
394
}
63✔
395

396
extern "C" int openmc_statepoint_load(const char* filename)
63✔
397
{
398
  // Open file for reading
399
  hid_t file_id = file_open(filename, 'r', true);
63✔
400

401
  // Read filetype
402
  std::string word;
63✔
403
  read_attribute(file_id, "filetype", word);
63✔
404
  if (word != "statepoint") {
63!
405
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
406
  }
407

408
  statepoint_version_check(file_id);
63✔
409

410
  // Read and overwrite random number seed
411
  int64_t seed;
63✔
412
  read_dataset(file_id, "seed", seed);
63✔
413
  openmc_set_seed(seed);
63✔
414

415
  // Read and overwrite random number stride
416
  uint64_t stride;
63✔
417
  read_dataset(file_id, "stride", stride);
63✔
418
  openmc_set_stride(stride);
63✔
419

420
  // It is not impossible for a state point to be generated from a CE run but
421
  // to be loaded in to an MG run (or vice versa), check to prevent that.
422
  read_dataset(file_id, "energy_mode", word);
63✔
423
  if (word == "multi-group" && settings::run_CE) {
63!
424
    fatal_error("State point file is from multigroup run but current run is "
×
425
                "continous energy.");
426
  } else if (word == "continuous-energy" && !settings::run_CE) {
63!
427
    fatal_error("State point file is from continuous-energy run but current "
×
428
                "run is multigroup!");
429
  }
430

431
  // Read and overwrite run information except number of batches
432
  read_dataset(file_id, "run_mode", word);
63✔
433
  if (word == "fixed source") {
63!
434
    settings::run_mode = RunMode::FIXED_SOURCE;
×
435
  } else if (word == "eigenvalue") {
63!
436
    settings::run_mode = RunMode::EIGENVALUE;
63✔
437
  }
438
  read_attribute(file_id, "photon_transport", settings::photon_transport);
63✔
439
  read_dataset(file_id, "n_particles", settings::n_particles);
63✔
440
  int temp;
63✔
441
  read_dataset(file_id, "n_batches", temp);
63✔
442

443
  // Take maximum of statepoint n_batches and input n_batches
444
  settings::n_batches = std::max(settings::n_batches, temp);
63✔
445

446
  // Read batch number to restart at
447
  read_dataset(file_id, "current_batch", simulation::restart_batch);
63✔
448

449
  if (settings::restart_run &&
63!
450
      simulation::restart_batch >= settings::n_max_batches) {
63✔
451
    warning(fmt::format(
22✔
452
      "The number of batches specified for simulation ({}) is smaller "
453
      "than or equal to the number of batches in the restart statepoint file "
454
      "({})",
455
      settings::n_max_batches, simulation::restart_batch));
456
  }
457

458
  // Logical flag for source present in statepoint file
459
  bool source_present;
63✔
460
  read_attribute(file_id, "source_present", source_present);
63✔
461

462
  // Read information specific to eigenvalue run
463
  if (settings::run_mode == RunMode::EIGENVALUE) {
63!
464
    read_dataset(file_id, "n_inactive", temp);
63✔
465
    read_eigenvalue_hdf5(file_id);
63✔
466

467
    // Take maximum of statepoint n_inactive and input n_inactive
468
    settings::n_inactive = std::max(settings::n_inactive, temp);
63!
469

470
    // Check to make sure source bank is present
471
    if (settings::path_sourcepoint == settings::path_statepoint &&
63!
472
        !source_present) {
63!
473
      fatal_error("Source bank must be contained in statepoint restart file");
×
474
    }
475
  }
476

477
  // Read number of realizations for global tallies
478
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
63✔
479

480
  // Set k_sum, keff, and current_batch based on whether restart file is part
481
  // of active cycle or inactive cycle
482
  if (settings::run_mode == RunMode::EIGENVALUE) {
63!
483
    restart_set_keff();
63✔
484
  }
485

486
  // Set current batch number
487
  simulation::current_batch = simulation::restart_batch;
63✔
488

489
  // Read tallies to master. If we are using Parallel HDF5, all processes
490
  // need to be included in the HDF5 calls.
491
#ifdef PHDF5
492
  if (true) {
28✔
493
#else
494
  if (mpi::master) {
35!
495
#endif
496
    // Read global tally data
497
    read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
63✔
498
      false, simulation::global_tallies.data());
63✔
499

500
    // Check if tally results are present
501
    bool present;
63✔
502
    read_attribute(file_id, "tallies_present", present);
63✔
503

504
    // Read in sum and sum squared
505
    if (present) {
63!
506
      hid_t tallies_group = open_group(file_id, "tallies");
63✔
507

508
      for (auto& tally : model::tallies) {
218✔
509
        // Read sum, sum_sq, and N for each bin
510
        std::string name = "tally " + std::to_string(tally->id_);
155✔
511
        hid_t tally_group = open_group(tallies_group, name.c_str());
155✔
512

513
        int internal = 0;
155✔
514
        if (attribute_exists(tally_group, "internal")) {
155!
515
          read_attribute(tally_group, "internal", internal);
155✔
516
        }
517
        if (internal) {
155!
518
          tally->writable_ = false;
×
519
        } else {
520
          auto& results = tally->results_;
155!
521
          read_tally_results(tally_group, results.shape(0), results.shape(1),
465!
522
            results.shape(2), results.data());
155!
523

524
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
155✔
525
          close_group(tally_group);
155✔
526
        }
527
      }
155✔
528
      close_group(tallies_group);
63✔
529
    }
530
  }
531

532
  // Read source if in eigenvalue mode
533
  if (settings::run_mode == RunMode::EIGENVALUE) {
63!
534

535
    // Check if source was written out separately
536
    if (!source_present) {
63!
537

538
      // Close statepoint file
539
      file_close(file_id);
×
540

541
      // Write message
542
      write_message(
×
543
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
544

545
      // Open source file
546
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
547
    }
548

549
    // Read source
550
    read_source_bank(file_id, simulation::source_bank, true);
63✔
551
  }
552

553
  // Close file
554
  file_close(file_id);
63✔
555

556
  return 0;
63✔
557
}
63✔
558

559
hid_t h5banktype(bool memory)
11,006✔
560
{
561
  // Create compound type for position
562
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
11,006✔
563
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
11,006✔
564
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
11,006✔
565
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
11,006✔
566

567
  // Create bank datatype
568
  //
569
  // If you make changes to the compound datatype here, make sure you update:
570
  // - openmc/source.py
571
  // - openmc/statepoint.py
572
  // - docs/source/io_formats/statepoint.rst
573
  // - docs/source/io_formats/source.rst
574
  auto n = sizeof(SourceSite);
11,006✔
575
  if (!memory)
11,006✔
576
    n = 2 * sizeof(struct Position) + 3 * sizeof(double) + 3 * sizeof(int);
5,441✔
577
  hid_t banktype = H5Tcreate(H5T_COMPOUND, n);
11,006✔
578
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
11,006✔
579
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
11,006✔
580
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
11,006✔
581
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
11,006✔
582
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
11,006✔
583
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
11,006✔
584
    H5T_NATIVE_INT);
11,006✔
585
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
11,006✔
586
  H5Tinsert(
11,006✔
587
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
11,006✔
588

589
  H5Tclose(postype);
11,006✔
590
  return banktype;
11,006✔
591
}
592

593
void write_source_point(std::string filename, span<SourceSite> source_bank,
1,408✔
594
  const vector<int64_t>& bank_index, bool use_mcpl)
595
{
596
  std::string ext = use_mcpl ? "mcpl" : "h5";
2,779✔
597

598
  int total_surf_particles = source_bank.size();
1,408✔
599
#ifdef OPENMC_MPI
600
  int num_particles = source_bank.size();
552✔
601
  MPI_Allreduce(
552✔
602
    &num_particles, &total_surf_particles, 1, MPI_INT, MPI_SUM, mpi::intracomm);
603
#endif
604

605
  write_message("Creating source file {}.{} with {} particles ...", filename,
1,408✔
606
    ext, total_surf_particles, 5);
1,408✔
607

608
  // Dispatch to appropriate function based on file type
609
  if (use_mcpl) {
1,408✔
610
    filename.append(".mcpl");
37✔
611
    write_mcpl_source_point(filename.c_str(), source_bank, bank_index);
37✔
612
  } else {
613
    filename.append(".h5");
1,371✔
614
    write_h5_source_point(filename.c_str(), source_bank, bank_index);
1,371✔
615
  }
616
}
1,408✔
617

618
void write_h5_source_point(const char* filename, span<SourceSite> source_bank,
1,371✔
619
  const vector<int64_t>& bank_index)
620
{
621
  // When using parallel HDF5, the file is written to collectively by all
622
  // processes. With MPI-only, the file is opened and written by the master
623
  // (note that the call to write_source_bank is by all processes since slave
624
  // processes need to send source bank data to the master.
625
#ifdef PHDF5
626
  bool parallel = true;
536✔
627
#else
628
  bool parallel = false;
835✔
629
#endif
630

631
  if (!filename)
1,371!
632
    fatal_error("write_source_point filename needs a nonempty name.");
×
633

634
  std::string filename_(filename);
1,371✔
635
  const auto extension = get_file_extension(filename_);
1,371✔
636
  if (extension != "h5") {
1,371!
637
    warning("write_source_point was passed a file extension differing "
×
638
            "from .h5, but an hdf5 file will be written.");
639
  }
640

641
  hid_t file_id;
1,371✔
642
  if (mpi::master || parallel) {
1,371!
643
    file_id = file_open(filename_.c_str(), 'w', true);
1,371✔
644
    write_attribute(file_id, "filetype", "source");
1,371✔
645
    write_attribute(file_id, "version", VERSION_STATEPOINT);
1,371✔
646
  }
647

648
  // Get pointer to source bank and write to file
649
  write_source_bank(file_id, source_bank, bank_index);
1,371✔
650

651
  if (mpi::master || parallel)
1,371!
652
    file_close(file_id);
1,371✔
653
}
1,371✔
654

655
void write_source_bank(hid_t group_id, span<SourceSite> source_bank,
5,441✔
656
  const vector<int64_t>& bank_index)
657
{
658
  hid_t membanktype = h5banktype(true);
5,441✔
659
  hid_t filebanktype = h5banktype(false);
5,441✔
660

661
#ifdef OPENMC_MPI
662
  write_bank_dataset("source_bank", group_id, source_bank, bank_index,
2,392✔
663
    membanktype, filebanktype, mpi::source_site);
664
#else
665
  write_bank_dataset("source_bank", group_id, source_bank, bank_index,
3,049✔
666
    membanktype, filebanktype);
667
#endif
668

669
  H5Tclose(membanktype);
5,441✔
670
  H5Tclose(filebanktype);
5,441✔
671
}
5,441✔
672

673
// Determine member names of a compound HDF5 datatype
674
std::string dtype_member_names(hid_t dtype_id)
248✔
675
{
676
  int nmembers = H5Tget_nmembers(dtype_id);
248✔
677
  std::string names;
248✔
678
  for (int i = 0; i < nmembers; i++) {
2,187✔
679
    char* name = H5Tget_member_name(dtype_id, i);
1,939✔
680
    names = names.append(name);
1,939✔
681
    H5free_memory(name);
1,939✔
682
    if (i < nmembers - 1)
1,939✔
683
      names += ", ";
1,939✔
684
  }
685
  return names;
248✔
686
}
×
687

688
void read_source_bank(
124✔
689
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
690
{
691
  bool legacy_particle_codes = true;
124✔
692
  if (attribute_exists(group_id, "version")) {
124✔
693
    array<int, 2> version;
115✔
694
    read_attribute(group_id, "version", version);
115✔
695
    if (version[0] > VERSION_STATEPOINT[0] ||
115!
696
        (version[0] == VERSION_STATEPOINT[0] && version[1] >= 2)) {
115!
697
      legacy_particle_codes = false;
698
    }
699
  }
700

701
  hid_t banktype = h5banktype(true);
124✔
702

703
  // Open the dataset
704
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
124✔
705

706
  // Make sure number of members matches
707
  hid_t dtype = H5Dget_type(dset);
124✔
708
  auto file_member_names = dtype_member_names(dtype);
124✔
709
  auto bank_member_names = dtype_member_names(banktype);
124✔
710
  if (file_member_names != bank_member_names) {
124✔
711
    fatal_error(fmt::format(
9✔
712
      "Source site attributes in file do not match what is "
713
      "expected for this version of OpenMC. File attributes = ({}). Expected "
714
      "attributes = ({})",
715
      file_member_names, bank_member_names));
716
  }
717

718
  hid_t dspace = H5Dget_space(dset);
115✔
719
  hsize_t n_sites;
115✔
720
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
115✔
721

722
  // Make sure vector is big enough in case where we're reading entire source on
723
  // each process
724
  if (!distribute)
115✔
725
    sites.resize(n_sites);
52✔
726

727
  hid_t memspace;
115✔
728
  if (distribute) {
115✔
729
    if (simulation::work_index[mpi::n_procs] > n_sites) {
63!
730
      fatal_error("Number of source sites in source file is less "
×
731
                  "than number of source particles per generation.");
732
    }
733

734
    // Create another data space but for each proc individually
735
    hsize_t n_sites_local = simulation::work_per_rank;
63✔
736
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
63✔
737

738
    // Select hyperslab for each process
739
    hsize_t offset = simulation::work_index[mpi::rank];
63✔
740
    H5Sselect_hyperslab(
63✔
741
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
742
  } else {
743
    memspace = H5S_ALL;
744
  }
745

746
#ifdef PHDF5
747
  // Read data in parallel
748
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
52✔
749
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
52✔
750
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
52✔
751
  H5Pclose(plist);
52✔
752
#else
753
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
63✔
754
#endif
755

756
  // Close all ids
757
  H5Sclose(dspace);
115✔
758
  if (distribute)
115✔
759
    H5Sclose(memspace);
63✔
760
  H5Dclose(dset);
115✔
761
  H5Tclose(banktype);
115✔
762

763
  if (legacy_particle_codes) {
115!
764
    for (auto& site : sites) {
×
765
      site.particle = legacy_particle_index_to_type(site.particle.pdg_number());
×
766
    }
767
  }
768
}
115✔
769

770
void write_unstructured_mesh_results()
2,569✔
771
{
772

773
  for (auto& tally : model::tallies) {
11,612✔
774

775
    vector<std::string> tally_scores;
9,043✔
776
    for (auto filter_idx : tally->filters()) {
26,456✔
777
      auto& filter = model::tally_filters[filter_idx];
17,413!
778
      if (filter->type() != FilterType::MESH)
17,413!
779
        continue;
17,398✔
780

781
      // check if the filter uses an unstructured mesh
782
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
1,967!
783
      auto mesh_idx = mesh_filter->mesh();
1,967!
784
      auto umesh =
1,967✔
785
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
1,967!
786

787
      if (!umesh)
1,967✔
788
        continue;
1,932✔
789

790
      if (!umesh->output_)
35!
791
        continue;
×
792

793
      if (umesh->library() == "moab") {
70!
794
        if (mpi::master)
20✔
795
          warning(fmt::format(
20!
796
            "Output for a MOAB mesh (mesh {}) was "
797
            "requested but will not be written. Please use the Python "
798
            "API to generated the desired VTK tetrahedral mesh.",
799
            umesh->id_));
10!
800
        continue;
20✔
801
      }
802

803
      // if this tally has more than one filter, print
804
      // warning and skip writing the mesh
805
      if (tally->filters().size() > 1) {
15!
806
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
807
                            "{}. More than one filter is present on the tally.",
808
          tally->id_));
×
809
        break;
×
810
      }
811

812
      int n_realizations = tally->n_realizations_;
15✔
813

814
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
815
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
816
          // combine the score and nuclide into a name for the value
817
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30!
818
            tally->nuclide_name(nuc_idx));
45!
819
          // add this score to the mesh
820
          // (this is in a separate loop because all variables need to be added
821
          //  to libMesh's equation system before any are initialized, which
822
          //  happens in set_score_data)
823
          umesh->add_score(score_str);
15!
824
        }
15✔
825
      }
826

827
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
828
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
829
          // combine the score and nuclide into a name for the value
830
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30!
831
            tally->nuclide_name(nuc_idx));
45!
832

833
          // index for this nuclide and score
834
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
15!
835

836
          // construct result vectors
837
          vector<double> mean_vec(umesh->n_bins()),
15!
838
            std_dev_vec(umesh->n_bins());
15!
839
          for (int j = 0; j < tally->results_.shape(0); j++) {
293,598!
840
            // get the volume for this bin
841
            double volume = umesh->volume(j);
146,784!
842
            // compute the mean
843
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
146,784!
844
                          n_realizations;
146,784✔
845
            mean_vec.at(j) = mean / volume;
146,784!
846

847
            // compute the standard deviation
848
            double sum_sq =
146,784!
849
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
146,784✔
850
            double std_dev {0.0};
146,784✔
851
            if (n_realizations > 1) {
146,784!
852
              std_dev = sum_sq / n_realizations - mean * mean;
146,784✔
853
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
146,784✔
854
            }
855
            std_dev_vec[j] = std_dev / volume;
146,784✔
856
          }
857
#ifdef OPENMC_MPI
858
          MPI_Bcast(
10!
859
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
10!
860
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
10!
861
            mpi::intracomm);
862
#endif
863
          // set the data for this score
864
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
15!
865
        }
15✔
866
      }
867

868
      // Generate a file name based on the tally id
869
      // and the current batch number
870
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
15!
871
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
15!
872
        simulation::current_batch, batch_width);
15!
873

874
      // Write the unstructured mesh and data to file
875
      umesh->write(filename);
15!
876

877
      // remove score data added for this mesh write
878
      umesh->remove_scores();
15!
879
    }
15✔
880
  }
9,043✔
881
}
2,569✔
882

883
void write_tally_results_nr(hid_t file_id)
15✔
884
{
885
  // ==========================================================================
886
  // COLLECT AND WRITE GLOBAL TALLIES
887

888
  hid_t tallies_group;
15✔
889
  if (mpi::master) {
15✔
890
    // Write number of realizations
891
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
11✔
892

893
    tallies_group = open_group(file_id, "tallies");
11✔
894
  }
895

896
  // Get global tallies
897
  auto& gt = simulation::global_tallies;
15✔
898

899
#ifdef OPENMC_MPI
900
  // Reduce global tallies
901
  tensor::Tensor<double> gt_reduced({N_GLOBAL_TALLIES, 3});
8✔
902
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
8✔
903
    mpi::intracomm);
904

905
  // Transfer values to value on master
906
  if (mpi::master) {
8✔
907
    if (simulation::current_batch == settings::n_max_batches ||
4!
908
        simulation::satisfy_triggers) {
909
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
4✔
910
    }
911
  }
912
#endif
913

914
  // Write out global tallies sum and sum_sq
915
  if (mpi::master) {
15✔
916
    write_dataset(file_id, "global_tallies", gt);
11✔
917
  }
918

919
  for (const auto& t : model::tallies) {
30✔
920
    // Skip any tallies that are not active
921
    if (!t->active_)
15!
922
      continue;
×
923
    if (!t->writable_)
15!
924
      continue;
×
925

926
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
15!
927
      write_attribute(file_id, "tallies_present", 1);
11✔
928
    }
929

930
    // Copy the SUM and SUM_SQ columns from the tally results into a
931
    // contiguous array for MPI reduction
932
    const int r_start = static_cast<int>(TallyResult::SUM);
15✔
933
    const int r_end = static_cast<int>(TallyResult::SUM_SQ) + 1;
15✔
934
    const size_t r_count = r_end - r_start;
15✔
935
    const size_t ni = t->results_.shape(0);
15!
936
    const size_t nj = t->results_.shape(1);
15!
937
    tensor::Tensor<double> values({ni, nj, r_count});
15✔
938
    for (size_t i = 0; i < ni; i++)
30✔
939
      for (size_t j = 0; j < nj; j++)
30✔
940
        for (size_t r = 0; r < r_count; r++)
45✔
941
          values(i, j, r) = t->results_(i, j, r_start + r);
30✔
942

943
    if (mpi::master) {
15✔
944
      // Open group for tally
945
      std::string groupname {"tally " + std::to_string(t->id_)};
11✔
946
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
11✔
947

948
      // The MPI_IN_PLACE specifier allows the master to copy values into
949
      // a receive buffer without having a temporary variable
950
#ifdef OPENMC_MPI
951
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
4✔
952
        MPI_SUM, 0, mpi::intracomm);
953
#endif
954

955
      // At the end of the simulation, store the reduced results back
956
      // into the tally results array
957
      if (simulation::current_batch == settings::n_max_batches ||
11!
958
          simulation::satisfy_triggers) {
959
        for (size_t i = 0; i < ni; i++)
22✔
960
          for (size_t j = 0; j < nj; j++)
22✔
961
            for (size_t r = 0; r < r_count; r++)
33✔
962
              t->results_(i, j, r_start + r) = values(i, j, r);
22✔
963
      }
964

965
      // Put reduced values into a full-sized copy for writing to HDF5
966
      tensor::Tensor<double> results_copy = tensor::zeros_like(t->results_);
11✔
967
      for (size_t i = 0; i < ni; i++)
22✔
968
        for (size_t j = 0; j < nj; j++)
22✔
969
          for (size_t r = 0; r < r_count; r++)
33✔
970
            results_copy(i, j, r_start + r) = values(i, j, r);
22✔
971

972
      // Write reduced tally results to file
973
      auto shape = results_copy.shape();
11✔
974
      write_tally_results(
11✔
975
        tally_group, shape[0], shape[1], shape[2], results_copy.data());
11✔
976

977
      close_group(tally_group);
11✔
978
    } else {
22✔
979
      // Receive buffer not significant at other processors
980
#ifdef OPENMC_MPI
981
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
4✔
982
        mpi::intracomm);
983
#endif
984
    }
985
  }
15✔
986

987
  if (mpi::master) {
15✔
988
    if (!object_exists(file_id, "tallies_present")) {
11!
989
      // Indicate that tallies are off
990
      write_dataset(file_id, "tallies_present", 0);
11✔
991
    }
992

993
    close_group(tallies_group);
11✔
994
  }
995
}
15✔
996

997
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc