• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 23285088002

19 Mar 2026 07:51AM UTC coverage: 80.71% (-0.7%) from 81.447%
23285088002

Pull #3886

github

web-flow
Merge 9cdb2d588 into 3ce6cbfdd
Pull Request #3886: Implement python tally types

16322 of 23494 branches covered (69.47%)

Branch coverage included in aggregate %.

215 of 264 new or added lines in 11 files covered. (81.44%)

713 existing lines in 49 files now uncovered.

56449 of 66670 relevant lines covered (84.67%)

14035992.85 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

74.35
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "openmc/tensor.h"
8
#include <fmt/core.h>
9

10
#include "openmc/bank.h"
11
#include "openmc/bank_io.h"
12
#include "openmc/capi.h"
13
#include "openmc/constants.h"
14
#include "openmc/eigenvalue.h"
15
#include "openmc/error.h"
16
#include "openmc/file_utils.h"
17
#include "openmc/hdf5_interface.h"
18
#include "openmc/mcpl_interface.h"
19
#include "openmc/mesh.h"
20
#include "openmc/message_passing.h"
21
#include "openmc/mgxs_interface.h"
22
#include "openmc/nuclide.h"
23
#include "openmc/output.h"
24
#include "openmc/particle_type.h"
25
#include "openmc/settings.h"
26
#include "openmc/simulation.h"
27
#include "openmc/tallies/derivative.h"
28
#include "openmc/tallies/filter.h"
29
#include "openmc/tallies/filter_mesh.h"
30
#include "openmc/tallies/tally.h"
31
#include "openmc/timer.h"
32
#include "openmc/vector.h"
33

34
namespace openmc {
35

36
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
1,280✔
37
{
38
  simulation::time_statepoint.start();
1,280✔
39

40
  // If a nullptr is passed in, we assume that the user
41
  // wants a default name for this, of the form like output/statepoint.20.h5
42
  std::string filename_;
1,280✔
43
  if (filename) {
1,280✔
44
    filename_ = filename;
148✔
45
  } else {
46
    // Determine width for zero padding
47
    int w = std::to_string(settings::n_max_batches).size();
1,132✔
48

49
    // Set filename for state point
50
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
2,264✔
51
      simulation::current_batch, w);
1,132✔
52
  }
53

54
  // If a file name was specified, ensure it has .h5 file extension
55
  const auto extension = get_file_extension(filename_);
1,280✔
56
  if (extension != "h5") {
1,280!
57
    warning("openmc_statepoint_write was passed a file extension differing "
×
58
            "from .h5, but an hdf5 file will be written.");
59
  }
60

61
  // Determine whether or not to write the source bank
62
  bool write_source_ = write_source ? *write_source : true;
1,280!
63

64
  // Write message
65
  write_message("Creating state point " + filename_ + "...", 5);
2,560✔
66

67
  hid_t file_id;
1,280✔
68
  if (mpi::master) {
1,280!
69
    // Create statepoint file
70
    file_id = file_open(filename_, 'w');
1,280✔
71

72
    // Write file type
73
    write_attribute(file_id, "filetype", "statepoint");
1,280✔
74

75
    // Write revision number for state point file
76
    write_attribute(file_id, "version", VERSION_STATEPOINT);
1,280✔
77

78
    // Write OpenMC version
79
    write_attribute(file_id, "openmc_version", VERSION);
1,280✔
80
#ifdef GIT_SHA1
81
    write_attribute(file_id, "git_sha1", GIT_SHA1);
82
#endif
83

84
    // Write current date and time
85
    write_attribute(file_id, "date_and_time", time_stamp());
2,560✔
86

87
    // Write path to input
88
    write_attribute(file_id, "path", settings::path_input);
1,280✔
89

90
    // Write out random number seed
91
    write_dataset(file_id, "seed", openmc_get_seed());
1,280✔
92

93
    // Write out random number stride
94
    write_dataset(file_id, "stride", openmc_get_stride());
1,280✔
95

96
    // Write run information
97
    write_dataset(file_id, "energy_mode",
1,458✔
98
      settings::run_CE ? "continuous-energy" : "multi-group");
99
    switch (settings::run_mode) {
1,280!
100
    case RunMode::FIXED_SOURCE:
494✔
101
      write_dataset(file_id, "run_mode", "fixed source");
494✔
102
      break;
103
    case RunMode::EIGENVALUE:
786✔
104
      write_dataset(file_id, "run_mode", "eigenvalue");
786✔
105
      break;
106
    default:
107
      break;
108
    }
109
    write_attribute(file_id, "photon_transport", settings::photon_transport);
1,280✔
110
    write_dataset(file_id, "n_particles", settings::n_particles);
1,280✔
111
    write_dataset(file_id, "n_batches", settings::n_batches);
1,280✔
112

113
    // Write out current batch number
114
    write_dataset(file_id, "current_batch", simulation::current_batch);
1,280✔
115

116
    // Indicate whether source bank is stored in statepoint
117
    write_attribute(file_id, "source_present", write_source_);
1,280✔
118

119
    // Write out information for eigenvalue run
120
    if (settings::run_mode == RunMode::EIGENVALUE)
1,280✔
121
      write_eigenvalue_hdf5(file_id);
786✔
122

123
    hid_t tallies_group = create_group(file_id, "tallies");
1,280✔
124

125
    // Write meshes
126
    meshes_to_hdf5(tallies_group);
1,280✔
127

128
    // Write information for derivatives
129
    if (!model::tally_derivs.empty()) {
1,280✔
130
      hid_t derivs_group = create_group(tallies_group, "derivatives");
2✔
131
      for (const auto& deriv : model::tally_derivs) {
12✔
132
        hid_t deriv_group =
10✔
133
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
10✔
134
        write_dataset(deriv_group, "material", deriv.diff_material);
10✔
135
        if (deriv.variable == DerivativeVariable::DENSITY) {
10✔
136
          write_dataset(deriv_group, "independent variable", "density");
4✔
137
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
6✔
138
          write_dataset(deriv_group, "independent variable", "nuclide_density");
4✔
139
          write_dataset(
8✔
140
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
4✔
141
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
2!
142
          write_dataset(deriv_group, "independent variable", "temperature");
2✔
143
        } else {
144
          fatal_error("Independent variable for derivative " +
×
145
                      std::to_string(deriv.id) +
×
146
                      " not defined in state_point.cpp");
147
        }
148
        close_group(deriv_group);
10✔
149
      }
150
      close_group(derivs_group);
2✔
151
    }
152

153
    // Write information for filters
154
    hid_t filters_group = create_group(tallies_group, "filters");
1,280✔
155
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
1,280✔
156
    if (!model::tally_filters.empty()) {
1,280✔
157
      // Write filter IDs
158
      vector<int32_t> filter_ids;
804✔
159
      filter_ids.reserve(model::tally_filters.size());
804✔
160
      for (const auto& filt : model::tally_filters)
2,872✔
161
        filter_ids.push_back(filt->id());
2,068✔
162
      write_attribute(filters_group, "ids", filter_ids);
804✔
163

164
      // Write info for each filter
165
      for (const auto& filt : model::tally_filters) {
2,872✔
166
        hid_t filter_group =
2,068✔
167
          create_group(filters_group, "filter " + std::to_string(filt->id()));
2,068✔
168
        filt->to_statepoint(filter_group);
2,068✔
169
        close_group(filter_group);
2,068✔
170
      }
171
    }
804✔
172
    close_group(filters_group);
1,280✔
173

174
    // Write information for tallies
175
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
1,280✔
176
    if (!model::tallies.empty()) {
1,280✔
177
      // Write tally IDs
178
      vector<int32_t> tally_ids;
902✔
179
      tally_ids.reserve(model::tallies.size());
902✔
180
      for (const auto& tally : model::tallies)
4,926✔
181
        tally_ids.push_back(tally->id_);
4,024✔
182
      write_attribute(tallies_group, "ids", tally_ids);
902✔
183

184
      // Write all tally information except results
185
      for (const auto& tally : model::tallies) {
4,926✔
186
        hid_t tally_group =
4,024✔
187
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
4,024✔
188

189
        write_dataset(tally_group, "name", tally->name_);
4,024✔
190

191
        if ((tally->type_ == TallyType::SURFACE) ||
4,024✔
192
            (tally->type_ == TallyType::MESH_SURFACE)) {
3,982✔
193
          write_dataset(tally_group, "type", "surface");
98✔
194
        } else if (tally->type_ == TallyType::PULSE_HEIGHT) {
3,926✔
195
          write_dataset(tally_group, "type", "pulse-height");
2✔
196
        }
197

198
        if (tally->writable_) {
4,024✔
199
          write_attribute(tally_group, "internal", 0);
3,828✔
200
        } else {
201
          write_attribute(tally_group, "internal", 1);
196✔
202
          close_group(tally_group);
196✔
203
          continue;
196✔
204
        }
205

206
        if (tally->multiply_density()) {
3,828✔
207
          write_attribute(tally_group, "multiply_density", 1);
3,816✔
208
        } else {
209
          write_attribute(tally_group, "multiply_density", 0);
12✔
210
        }
211

212
        if (tally->higher_moments()) {
3,828✔
213
          write_attribute(tally_group, "higher_moments", 1);
2✔
214
        } else {
215
          write_attribute(tally_group, "higher_moments", 0);
3,826✔
216
        }
217

218
        if (tally->estimator_ == TallyEstimator::ANALOG) {
3,828✔
219
          write_dataset(tally_group, "estimator", "analog");
1,464✔
220
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
2,364✔
221
          write_dataset(tally_group, "estimator", "tracklength");
2,206✔
222
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
158!
223
          write_dataset(tally_group, "estimator", "collision");
158✔
224
        }
225

226
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
3,828✔
227

228
        // Write the ID of each filter attached to this tally
229
        write_dataset(tally_group, "n_filters", tally->filters().size());
3,828✔
230
        if (!tally->filters().empty()) {
3,828✔
231
          vector<int32_t> filter_ids;
3,612✔
232
          filter_ids.reserve(tally->filters().size());
3,612✔
233
          for (auto i_filt : tally->filters())
10,972✔
234
            filter_ids.push_back(model::tally_filters[i_filt]->id());
7,360✔
235
          write_dataset(tally_group, "filters", filter_ids);
3,612✔
236
        }
3,612✔
237

238
        // Write the nuclides this tally scores
239
        vector<std::string> nuclides;
3,828✔
240
        for (auto i_nuclide : tally->nuclides_) {
9,068✔
241
          if (i_nuclide == -1) {
5,240✔
242
            nuclides.push_back("total");
6,696✔
243
          } else {
244
            if (settings::run_CE) {
1,892✔
245
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
1,872✔
246
            } else {
247
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
20✔
248
            }
249
          }
250
        }
251
        write_dataset(tally_group, "nuclides", nuclides);
3,828✔
252

253
        if (tally->deriv_ != C_NONE)
3,828✔
254
          write_dataset(
40✔
255
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
40✔
256

257
        // Write the tally score bins
258
        vector<std::string> scores;
3,828✔
259
        for (auto sc : tally->scores_)
9,444✔
260
          scores.push_back(reaction_name(sc));
11,232✔
261
        write_dataset(tally_group, "n_score_bins", scores.size());
3,828✔
262
        write_dataset(tally_group, "score_bins", scores);
3,828✔
263

264
        close_group(tally_group);
3,828✔
265
      }
3,828✔
266
    }
902✔
267

268
    if (settings::reduce_tallies) {
1,280✔
269
      // Write global tallies
270
      write_dataset(file_id, "global_tallies", simulation::global_tallies);
1,278✔
271

272
      // Write tallies
273
      if (model::active_tallies.size() > 0) {
1,278✔
274
        // Indicate that tallies are on
275
        write_attribute(file_id, "tallies_present", 1);
860✔
276

277
        // Write all tally results
278
        for (const auto& tally : model::tallies) {
4,842✔
279
          if (!tally->writable_)
3,982✔
280
            continue;
156✔
281

282
          // Write results for each bin
283
          std::string name = "tally " + std::to_string(tally->id_);
3,826✔
284
          hid_t tally_group = open_group(tallies_group, name.c_str());
3,826✔
285
          auto& results = tally->results_;
3,826!
286
          write_tally_results(tally_group, results.shape(0), results.shape(1),
11,478!
287
            results.shape(2), results.data());
7,652!
288
          close_group(tally_group);
3,826✔
289
        }
3,826✔
290
      } else {
291
        // Indicate tallies are off
292
        write_attribute(file_id, "tallies_present", 0);
418✔
293
      }
294
    }
295

296
    close_group(tallies_group);
1,280✔
297
  }
298

299
  // Check for the no-tally-reduction method
300
  if (!settings::reduce_tallies) {
1,280✔
301
    // If using the no-tally-reduction method, we need to collect tally
302
    // results before writing them to the state point file.
303
    write_tally_results_nr(file_id);
2✔
304

305
  } else if (mpi::master) {
1,278!
306
    // Write number of global realizations
307
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
1,278✔
308
  }
309

310
  if (mpi::master) {
1,280!
311
    // Write out the runtime metrics.
312
    using namespace simulation;
1,280✔
313
    hid_t runtime_group = create_group(file_id, "runtime");
1,280✔
314
    write_dataset(
1,280✔
315
      runtime_group, "total initialization", time_initialize.elapsed());
316
    write_dataset(
1,280✔
317
      runtime_group, "reading cross sections", time_read_xs.elapsed());
318
    write_dataset(runtime_group, "simulation",
1,280✔
319
      time_inactive.elapsed() + time_active.elapsed());
1,280✔
320
    write_dataset(runtime_group, "transport", time_transport.elapsed());
1,280✔
321
    if (settings::run_mode == RunMode::EIGENVALUE) {
1,280✔
322
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
786✔
323
    }
324
    write_dataset(runtime_group, "active batches", time_active.elapsed());
1,280✔
325
    if (settings::run_mode == RunMode::EIGENVALUE) {
1,280✔
326
      write_dataset(
786✔
327
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
328
      write_dataset(
786✔
329
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
330
      write_dataset(
786✔
331
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
332
    }
333
    write_dataset(
1,280✔
334
      runtime_group, "accumulating tallies", time_tallies.elapsed());
335
    write_dataset(runtime_group, "total", time_total.elapsed());
1,280✔
336
    write_dataset(
1,280✔
337
      runtime_group, "writing statepoints", time_statepoint.elapsed());
338
    close_group(runtime_group);
1,280✔
339

340
    file_close(file_id);
1,280✔
341
  }
342

343
#ifdef PHDF5
344
  bool parallel = true;
345
#else
346
  bool parallel = false;
1,280✔
347
#endif
348

349
  // Write the source bank if desired
350
  if (write_source_) {
1,280✔
351
    if (mpi::master || parallel)
610!
352
      file_id = file_open(filename_, 'a', true);
610✔
353
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
610✔
354
    if (mpi::master || parallel)
610!
355
      file_close(file_id);
610✔
356
  }
357

358
#if defined(OPENMC_LIBMESH_ENABLED) || defined(OPENMC_DAGMC_ENABLED)
359
  // write unstructured mesh tally files
360
  write_unstructured_mesh_results();
361
#endif
362

363
  simulation::time_statepoint.stop();
1,280✔
364

365
  return 0;
1,280✔
366
}
1,280✔
367

368
void restart_set_keff()
10✔
369
{
370
  if (simulation::restart_batch > settings::n_inactive) {
10!
371
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
48✔
372
      simulation::k_sum[0] += simulation::k_generation[i];
38✔
373
      simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
38✔
374
    }
375
    int n = settings::gen_per_batch * simulation::n_realizations;
10✔
376
    simulation::keff = simulation::k_sum[0] / n;
10✔
377
  } else {
378
    simulation::keff = simulation::k_generation.back();
×
379
  }
380
}
10✔
381

382
void load_state_point()
10✔
383
{
384
  write_message(
10✔
385
    fmt::format("Loading state point {}...", settings::path_statepoint_c), 5);
10✔
386
  openmc_statepoint_load(settings::path_statepoint.c_str());
10✔
387
}
10✔
388

389
void statepoint_version_check(hid_t file_id)
10✔
390
{
391
  // Read revision number for state point file and make sure it matches with
392
  // current version
393
  array<int, 2> version_array;
10✔
394
  read_attribute(file_id, "version", version_array);
10✔
395
  if (version_array != VERSION_STATEPOINT) {
10!
396
    fatal_error(
×
397
      "State point version does not match current version in OpenMC.");
398
  }
399
}
10✔
400

401
extern "C" int openmc_statepoint_load(const char* filename)
10✔
402
{
403
  // Open file for reading
404
  hid_t file_id = file_open(filename, 'r', true);
10✔
405

406
  // Read filetype
407
  std::string word;
10✔
408
  read_attribute(file_id, "filetype", word);
10✔
409
  if (word != "statepoint") {
10!
410
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
411
  }
412

413
  statepoint_version_check(file_id);
10✔
414

415
  // Read and overwrite random number seed
416
  int64_t seed;
10✔
417
  read_dataset(file_id, "seed", seed);
10✔
418
  openmc_set_seed(seed);
10✔
419

420
  // Read and overwrite random number stride
421
  uint64_t stride;
10✔
422
  read_dataset(file_id, "stride", stride);
10✔
423
  openmc_set_stride(stride);
10✔
424

425
  // It is not impossible for a state point to be generated from a CE run but
426
  // to be loaded in to an MG run (or vice versa), check to prevent that.
427
  read_dataset(file_id, "energy_mode", word);
10✔
428
  if (word == "multi-group" && settings::run_CE) {
10!
429
    fatal_error("State point file is from multigroup run but current run is "
×
430
                "continous energy.");
431
  } else if (word == "continuous-energy" && !settings::run_CE) {
10!
432
    fatal_error("State point file is from continuous-energy run but current "
×
433
                "run is multigroup!");
434
  }
435

436
  // Read and overwrite run information except number of batches
437
  read_dataset(file_id, "run_mode", word);
10✔
438
  if (word == "fixed source") {
10!
439
    settings::run_mode = RunMode::FIXED_SOURCE;
×
440
  } else if (word == "eigenvalue") {
10!
441
    settings::run_mode = RunMode::EIGENVALUE;
10✔
442
  }
443
  read_attribute(file_id, "photon_transport", settings::photon_transport);
10✔
444
  read_dataset(file_id, "n_particles", settings::n_particles);
10✔
445
  int temp;
10✔
446
  read_dataset(file_id, "n_batches", temp);
10✔
447

448
  // Take maximum of statepoint n_batches and input n_batches
449
  settings::n_batches = std::max(settings::n_batches, temp);
10✔
450

451
  // Read batch number to restart at
452
  read_dataset(file_id, "current_batch", simulation::restart_batch);
10✔
453

454
  if (settings::restart_run &&
10!
455
      simulation::restart_batch >= settings::n_max_batches) {
10✔
456
    warning(fmt::format(
4✔
457
      "The number of batches specified for simulation ({}) is smaller "
458
      "than or equal to the number of batches in the restart statepoint file "
459
      "({})",
460
      settings::n_max_batches, simulation::restart_batch));
461
  }
462

463
  // Logical flag for source present in statepoint file
464
  bool source_present;
10✔
465
  read_attribute(file_id, "source_present", source_present);
10✔
466

467
  // Read information specific to eigenvalue run
468
  if (settings::run_mode == RunMode::EIGENVALUE) {
10!
469
    read_dataset(file_id, "n_inactive", temp);
10✔
470
    read_eigenvalue_hdf5(file_id);
10✔
471

472
    // Take maximum of statepoint n_inactive and input n_inactive
473
    settings::n_inactive = std::max(settings::n_inactive, temp);
10!
474

475
    // Check to make sure source bank is present
476
    if (settings::path_sourcepoint == settings::path_statepoint &&
10!
477
        !source_present) {
10!
478
      fatal_error("Source bank must be contained in statepoint restart file");
×
479
    }
480
  }
481

482
  // Read number of realizations for global tallies
483
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
10✔
484

485
  // Set k_sum, keff, and current_batch based on whether restart file is part
486
  // of active cycle or inactive cycle
487
  if (settings::run_mode == RunMode::EIGENVALUE) {
10!
488
    restart_set_keff();
10✔
489
  }
490

491
  // Set current batch number
492
  simulation::current_batch = simulation::restart_batch;
10✔
493

494
  // Read tallies to master. If we are using Parallel HDF5, all processes
495
  // need to be included in the HDF5 calls.
496
#ifdef PHDF5
497
  if (true) {
498
#else
499
  if (mpi::master) {
10!
500
#endif
501
    // Read global tally data
502
    read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
10✔
503
      false, simulation::global_tallies.data());
10✔
504

505
    // Check if tally results are present
506
    bool present;
10✔
507
    read_attribute(file_id, "tallies_present", present);
10✔
508

509
    // Read in sum and sum squared
510
    if (present) {
10!
511
      hid_t tallies_group = open_group(file_id, "tallies");
10✔
512

513
      for (auto& tally : model::tallies) {
36✔
514
        // Read sum, sum_sq, and N for each bin
515
        std::string name = "tally " + std::to_string(tally->id_);
26✔
516
        hid_t tally_group = open_group(tallies_group, name.c_str());
26✔
517

518
        int internal = 0;
26✔
519
        if (attribute_exists(tally_group, "internal")) {
26!
520
          read_attribute(tally_group, "internal", internal);
26✔
521
        }
522
        if (internal) {
26!
523
          tally->writable_ = false;
×
524
        } else {
525
          auto& results = tally->results_;
26!
526
          read_tally_results(tally_group, results.shape(0), results.shape(1),
78!
527
            results.shape(2), results.data());
26!
528

529
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
26✔
530
          close_group(tally_group);
26✔
531
        }
532
      }
26✔
533
      close_group(tallies_group);
10✔
534
    }
535
  }
536

537
  // Read source if in eigenvalue mode
538
  if (settings::run_mode == RunMode::EIGENVALUE) {
10!
539

540
    // Check if source was written out separately
541
    if (!source_present) {
10!
542

543
      // Close statepoint file
544
      file_close(file_id);
×
545

546
      // Write message
547
      write_message(
×
548
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
549

550
      // Open source file
551
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
552
    }
553

554
    // Read source
555
    read_source_bank(file_id, simulation::source_bank, true);
10✔
556
  }
557

558
  // Close file
559
  file_close(file_id);
10✔
560

561
  return 0;
10✔
562
}
10✔
563

564
hid_t h5banktype(bool memory)
1,664✔
565
{
566
  // Create compound type for position
567
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
1,664✔
568
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
1,664✔
569
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
1,664✔
570
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
1,664✔
571

572
  // Create bank datatype
573
  //
574
  // If you make changes to the compound datatype here, make sure you update:
575
  // - openmc/source.py
576
  // - openmc/statepoint.py
577
  // - docs/source/io_formats/statepoint.rst
578
  // - docs/source/io_formats/source.rst
579
  auto n = sizeof(SourceSite);
1,664✔
580
  if (!memory)
1,664✔
581
    n = 2 * sizeof(struct Position) + 3 * sizeof(double) + 3 * sizeof(int);
822✔
582
  hid_t banktype = H5Tcreate(H5T_COMPOUND, n);
1,664✔
583
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
1,664✔
584
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
1,664✔
585
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
1,664✔
586
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
1,664✔
587
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
1,664✔
588
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
1,664✔
589
    H5T_NATIVE_INT);
1,664✔
590
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
1,664✔
591
  H5Tinsert(
1,664✔
592
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
1,664✔
593

594
  H5Tclose(postype);
1,664✔
595
  return banktype;
1,664✔
596
}
597

598
void write_source_point(std::string filename, span<SourceSite> source_bank,
218✔
599
  const vector<int64_t>& bank_index, bool use_mcpl)
600
{
601
  std::string ext = use_mcpl ? "mcpl" : "h5";
430✔
602
  write_message("Creating source file {}.{} with {} particles ...", filename,
218✔
603
    ext, source_bank.size(), 5);
218✔
604

605
  // Dispatch to appropriate function based on file type
606
  if (use_mcpl) {
218✔
607
    filename.append(".mcpl");
6✔
608
    write_mcpl_source_point(filename.c_str(), source_bank, bank_index);
6✔
609
  } else {
610
    filename.append(".h5");
212✔
611
    write_h5_source_point(filename.c_str(), source_bank, bank_index);
212✔
612
  }
613
}
218✔
614

615
void write_h5_source_point(const char* filename, span<SourceSite> source_bank,
212✔
616
  const vector<int64_t>& bank_index)
617
{
618
  // When using parallel HDF5, the file is written to collectively by all
619
  // processes. With MPI-only, the file is opened and written by the master
620
  // (note that the call to write_source_bank is by all processes since slave
621
  // processes need to send source bank data to the master.
622
#ifdef PHDF5
623
  bool parallel = true;
624
#else
625
  bool parallel = false;
212✔
626
#endif
627

628
  if (!filename)
212!
629
    fatal_error("write_source_point filename needs a nonempty name.");
×
630

631
  std::string filename_(filename);
212✔
632
  const auto extension = get_file_extension(filename_);
212✔
633
  if (extension != "h5") {
212!
634
    warning("write_source_point was passed a file extension differing "
×
635
            "from .h5, but an hdf5 file will be written.");
636
  }
637

638
  hid_t file_id;
212✔
639
  if (mpi::master || parallel) {
212!
640
    file_id = file_open(filename_.c_str(), 'w', true);
212✔
641
    write_attribute(file_id, "filetype", "source");
212✔
642
    write_attribute(file_id, "version", VERSION_STATEPOINT);
212✔
643
  }
644

645
  // Get pointer to source bank and write to file
646
  write_source_bank(file_id, source_bank, bank_index);
212✔
647

648
  if (mpi::master || parallel)
212!
649
    file_close(file_id);
212✔
650
}
212✔
651

652
void write_source_bank(hid_t group_id, span<SourceSite> source_bank,
822✔
653
  const vector<int64_t>& bank_index)
654
{
655
  hid_t membanktype = h5banktype(true);
822✔
656
  hid_t filebanktype = h5banktype(false);
822✔
657

658
#ifdef OPENMC_MPI
659
  write_bank_dataset("source_bank", group_id, source_bank, bank_index,
660
    membanktype, filebanktype, mpi::source_site);
661
#else
662
  write_bank_dataset("source_bank", group_id, source_bank, bank_index,
822✔
663
    membanktype, filebanktype);
664
#endif
665

666
  H5Tclose(membanktype);
822✔
667
  H5Tclose(filebanktype);
822✔
668
}
822✔
669

670
// Determine member names of a compound HDF5 datatype
671
std::string dtype_member_names(hid_t dtype_id)
40✔
672
{
673
  int nmembers = H5Tget_nmembers(dtype_id);
40✔
674
  std::string names;
40✔
675
  for (int i = 0; i < nmembers; i++) {
350✔
676
    char* name = H5Tget_member_name(dtype_id, i);
310✔
677
    names = names.append(name);
310✔
678
    H5free_memory(name);
310✔
679
    if (i < nmembers - 1)
310✔
680
      names += ", ";
310✔
681
  }
682
  return names;
40✔
683
}
×
684

685
void read_source_bank(
20✔
686
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
687
{
688
  bool legacy_particle_codes = true;
20✔
689
  if (attribute_exists(group_id, "version")) {
20✔
690
    array<int, 2> version;
18✔
691
    read_attribute(group_id, "version", version);
18✔
692
    if (version[0] > VERSION_STATEPOINT[0] ||
18!
693
        (version[0] == VERSION_STATEPOINT[0] && version[1] >= 2)) {
18!
694
      legacy_particle_codes = false;
695
    }
696
  }
697

698
  hid_t banktype = h5banktype(true);
20✔
699

700
  // Open the dataset
701
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
20✔
702

703
  // Make sure number of members matches
704
  hid_t dtype = H5Dget_type(dset);
20✔
705
  auto file_member_names = dtype_member_names(dtype);
20✔
706
  auto bank_member_names = dtype_member_names(banktype);
20✔
707
  if (file_member_names != bank_member_names) {
20✔
708
    fatal_error(fmt::format(
2✔
709
      "Source site attributes in file do not match what is "
710
      "expected for this version of OpenMC. File attributes = ({}). Expected "
711
      "attributes = ({})",
712
      file_member_names, bank_member_names));
713
  }
714

715
  hid_t dspace = H5Dget_space(dset);
18✔
716
  hsize_t n_sites;
18✔
717
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
18✔
718

719
  // Make sure vector is big enough in case where we're reading entire source on
720
  // each process
721
  if (!distribute)
18✔
722
    sites.resize(n_sites);
8✔
723

724
  hid_t memspace;
18✔
725
  if (distribute) {
18✔
726
    if (simulation::work_index[mpi::n_procs] > n_sites) {
10!
727
      fatal_error("Number of source sites in source file is less "
×
728
                  "than number of source particles per generation.");
729
    }
730

731
    // Create another data space but for each proc individually
732
    hsize_t n_sites_local = simulation::work_per_rank;
10✔
733
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
10✔
734

735
    // Select hyperslab for each process
736
    hsize_t offset = simulation::work_index[mpi::rank];
10✔
737
    H5Sselect_hyperslab(
10✔
738
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
739
  } else {
740
    memspace = H5S_ALL;
741
  }
742

743
#ifdef PHDF5
744
  // Read data in parallel
745
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
746
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
747
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
748
  H5Pclose(plist);
749
#else
750
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
18✔
751
#endif
752

753
  // Close all ids
754
  H5Sclose(dspace);
18✔
755
  if (distribute)
18✔
756
    H5Sclose(memspace);
10✔
757
  H5Dclose(dset);
18✔
758
  H5Tclose(banktype);
18✔
759

760
  if (legacy_particle_codes) {
18!
761
    for (auto& site : sites) {
×
762
      site.particle = legacy_particle_index_to_type(site.particle.pdg_number());
×
763
    }
764
  }
765
}
18✔
766

UNCOV
767
void write_unstructured_mesh_results()
×
768
{
769

UNCOV
770
  for (auto& tally : model::tallies) {
×
771

UNCOV
772
    vector<std::string> tally_scores;
×
UNCOV
773
    for (auto filter_idx : tally->filters()) {
×
UNCOV
774
      auto& filter = model::tally_filters[filter_idx];
×
UNCOV
775
      if (filter->type() != FilterType::MESH)
×
UNCOV
776
        continue;
×
777

778
      // check if the filter uses an unstructured mesh
UNCOV
779
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
×
UNCOV
780
      auto mesh_idx = mesh_filter->mesh();
×
UNCOV
781
      auto umesh =
×
UNCOV
782
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
×
783

UNCOV
784
      if (!umesh)
×
UNCOV
785
        continue;
×
786

UNCOV
787
      if (!umesh->output_)
×
788
        continue;
×
789

UNCOV
790
      if (umesh->library() == "moab") {
×
UNCOV
791
        if (mpi::master)
×
UNCOV
792
          warning(fmt::format(
×
793
            "Output for a MOAB mesh (mesh {}) was "
794
            "requested but will not be written. Please use the Python "
795
            "API to generated the desired VTK tetrahedral mesh.",
UNCOV
796
            umesh->id_));
×
UNCOV
797
        continue;
×
798
      }
799

800
      // if this tally has more than one filter, print
801
      // warning and skip writing the mesh
UNCOV
802
      if (tally->filters().size() > 1) {
×
803
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
804
                            "{}. More than one filter is present on the tally.",
805
          tally->id_));
×
806
        break;
×
807
      }
808

UNCOV
809
      int n_realizations = tally->n_realizations_;
×
810

UNCOV
811
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
×
UNCOV
812
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
×
813
          // combine the score and nuclide into a name for the value
UNCOV
814
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
×
UNCOV
815
            tally->nuclide_name(nuc_idx));
×
816
          // add this score to the mesh
817
          // (this is in a separate loop because all variables need to be added
818
          //  to libMesh's equation system before any are initialized, which
819
          //  happens in set_score_data)
UNCOV
820
          umesh->add_score(score_str);
×
UNCOV
821
        }
×
822
      }
823

UNCOV
824
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
×
UNCOV
825
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
×
826
          // combine the score and nuclide into a name for the value
UNCOV
827
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
×
UNCOV
828
            tally->nuclide_name(nuc_idx));
×
829

830
          // index for this nuclide and score
UNCOV
831
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
×
832

833
          // construct result vectors
UNCOV
834
          vector<double> mean_vec(umesh->n_bins()),
×
UNCOV
835
            std_dev_vec(umesh->n_bins());
×
UNCOV
836
          for (int j = 0; j < tally->results_.shape(0); j++) {
×
837
            // get the volume for this bin
UNCOV
838
            double volume = umesh->volume(j);
×
839
            // compute the mean
UNCOV
840
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
×
UNCOV
841
                          n_realizations;
×
UNCOV
842
            mean_vec.at(j) = mean / volume;
×
843

844
            // compute the standard deviation
UNCOV
845
            double sum_sq =
×
UNCOV
846
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
×
UNCOV
847
            double std_dev {0.0};
×
UNCOV
848
            if (n_realizations > 1) {
×
UNCOV
849
              std_dev = sum_sq / n_realizations - mean * mean;
×
UNCOV
850
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
×
851
            }
UNCOV
852
            std_dev_vec[j] = std_dev / volume;
×
853
          }
854
#ifdef OPENMC_MPI
855
          MPI_Bcast(
856
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
857
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
858
            mpi::intracomm);
859
#endif
860
          // set the data for this score
UNCOV
861
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
×
UNCOV
862
        }
×
863
      }
864

865
      // Generate a file name based on the tally id
866
      // and the current batch number
UNCOV
867
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
×
UNCOV
868
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
×
UNCOV
869
        simulation::current_batch, batch_width);
×
870

871
      // Write the unstructured mesh and data to file
UNCOV
872
      umesh->write(filename);
×
873

874
      // remove score data added for this mesh write
UNCOV
875
      umesh->remove_scores();
×
UNCOV
876
    }
×
UNCOV
877
  }
×
UNCOV
878
}
×
879

880
void write_tally_results_nr(hid_t file_id)
2✔
881
{
882
  // ==========================================================================
883
  // COLLECT AND WRITE GLOBAL TALLIES
884

885
  hid_t tallies_group;
2✔
886
  if (mpi::master) {
2!
887
    // Write number of realizations
888
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
2✔
889

890
    tallies_group = open_group(file_id, "tallies");
2✔
891
  }
892

893
  // Get global tallies
894
  auto& gt = simulation::global_tallies;
2✔
895

896
#ifdef OPENMC_MPI
897
  // Reduce global tallies
898
  tensor::Tensor<double> gt_reduced({N_GLOBAL_TALLIES, 3});
899
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
900
    mpi::intracomm);
901

902
  // Transfer values to value on master
903
  if (mpi::master) {
904
    if (simulation::current_batch == settings::n_max_batches ||
905
        simulation::satisfy_triggers) {
906
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
907
    }
908
  }
909
#endif
910

911
  // Write out global tallies sum and sum_sq
912
  if (mpi::master) {
2!
913
    write_dataset(file_id, "global_tallies", gt);
2✔
914
  }
915

916
  for (const auto& t : model::tallies) {
4✔
917
    // Skip any tallies that are not active
918
    if (!t->active_)
2!
919
      continue;
×
920
    if (!t->writable_)
2!
921
      continue;
×
922

923
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
2!
924
      write_attribute(file_id, "tallies_present", 1);
2✔
925
    }
926

927
    // Copy the SUM and SUM_SQ columns from the tally results into a
928
    // contiguous array for MPI reduction
929
    const int r_start = static_cast<int>(TallyResult::SUM);
2✔
930
    const int r_end = static_cast<int>(TallyResult::SUM_SQ) + 1;
2✔
931
    const size_t r_count = r_end - r_start;
2✔
932
    const size_t ni = t->results_.shape(0);
2!
933
    const size_t nj = t->results_.shape(1);
2!
934
    tensor::Tensor<double> values({ni, nj, r_count});
2✔
935
    for (size_t i = 0; i < ni; i++)
4✔
936
      for (size_t j = 0; j < nj; j++)
4✔
937
        for (size_t r = 0; r < r_count; r++)
6✔
938
          values(i, j, r) = t->results_(i, j, r_start + r);
4✔
939

940
    if (mpi::master) {
2!
941
      // Open group for tally
942
      std::string groupname {"tally " + std::to_string(t->id_)};
2✔
943
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
2✔
944

945
      // The MPI_IN_PLACE specifier allows the master to copy values into
946
      // a receive buffer without having a temporary variable
947
#ifdef OPENMC_MPI
948
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
949
        MPI_SUM, 0, mpi::intracomm);
950
#endif
951

952
      // At the end of the simulation, store the reduced results back
953
      // into the tally results array
954
      if (simulation::current_batch == settings::n_max_batches ||
2!
955
          simulation::satisfy_triggers) {
956
        for (size_t i = 0; i < ni; i++)
4✔
957
          for (size_t j = 0; j < nj; j++)
4✔
958
            for (size_t r = 0; r < r_count; r++)
6✔
959
              t->results_(i, j, r_start + r) = values(i, j, r);
4✔
960
      }
961

962
      // Put reduced values into a full-sized copy for writing to HDF5
963
      tensor::Tensor<double> results_copy = tensor::zeros_like(t->results_);
2✔
964
      for (size_t i = 0; i < ni; i++)
4✔
965
        for (size_t j = 0; j < nj; j++)
4✔
966
          for (size_t r = 0; r < r_count; r++)
6✔
967
            results_copy(i, j, r_start + r) = values(i, j, r);
4✔
968

969
      // Write reduced tally results to file
970
      auto shape = results_copy.shape();
2✔
971
      write_tally_results(
2✔
972
        tally_group, shape[0], shape[1], shape[2], results_copy.data());
2✔
973

974
      close_group(tally_group);
2✔
975
    } else {
4✔
976
      // Receive buffer not significant at other processors
977
#ifdef OPENMC_MPI
978
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
979
        mpi::intracomm);
980
#endif
981
    }
982
  }
2✔
983

984
  if (mpi::master) {
2!
985
    if (!object_exists(file_id, "tallies_present")) {
2!
986
      // Indicate that tallies are off
987
      write_dataset(file_id, "tallies_present", 0);
2✔
988
    }
989

990
    close_group(tallies_group);
2✔
991
  }
992
}
2✔
993

994
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc