• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 21034277178

15 Jan 2026 02:11PM UTC coverage: 81.871% (-0.2%) from 82.044%
21034277178

Pull #3702

github

web-flow
Merge d0e725296 into 179048b80
Pull Request #3702: Random Ray Kinetic Simulation Mode

17650 of 24558 branches covered (71.87%)

Branch coverage included in aggregate %.

1008 of 1149 new or added lines in 20 files covered. (87.73%)

166 existing lines in 15 files now uncovered.

56377 of 65861 relevant lines covered (85.6%)

47768466.83 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.87
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "xtensor/xbuilder.hpp" // for empty_like
8
#include "xtensor/xview.hpp"
9
#include <fmt/core.h>
10

11
#include "openmc/bank.h"
12
#include "openmc/bank_io.h"
13
#include "openmc/capi.h"
14
#include "openmc/constants.h"
15
#include "openmc/eigenvalue.h"
16
#include "openmc/error.h"
17
#include "openmc/file_utils.h"
18
#include "openmc/hdf5_interface.h"
19
#include "openmc/mcpl_interface.h"
20
#include "openmc/mesh.h"
21
#include "openmc/message_passing.h"
22
#include "openmc/mgxs_interface.h"
23
#include "openmc/nuclide.h"
24
#include "openmc/output.h"
25
#include "openmc/random_ray/random_ray_simulation.h"
26
#include "openmc/settings.h"
27
#include "openmc/simulation.h"
28
#include "openmc/tallies/derivative.h"
29
#include "openmc/tallies/filter.h"
30
#include "openmc/tallies/filter_mesh.h"
31
#include "openmc/tallies/tally.h"
32
#include "openmc/timer.h"
33
#include "openmc/vector.h"
34

35
namespace openmc {
36

37
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
7,221✔
38
{
39
  simulation::time_statepoint.start();
7,221✔
40

41
  // If a nullptr is passed in, we assume that the user
42
  // wants a default name for this, of the form like output/statepoint.20.h5
43
  std::string filename_;
7,221✔
44
  if (filename) {
7,221✔
45
    filename_ = filename;
574✔
46
  } else {
47
    // Determine width for zero padding
48
    int w = std::to_string(settings::n_max_batches).size();
6,647✔
49

50
    // Set filename for state point
51
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
12,342✔
52
      simulation::current_batch, w);
6,647✔
53
  }
54

55
  // If a file name was specified, ensure it has .h5 file extension
56
  const auto extension = get_file_extension(filename_);
7,221✔
57
  if (extension != "h5") {
7,221!
58
    warning("openmc_statepoint_write was passed a file extension differing "
×
59
            "from .h5, but an hdf5 file will be written.");
60
  }
61

62
  // Determine whether or not to write the source bank
63
  bool write_source_ = write_source ? *write_source : true;
7,221!
64

65
  // Write message
66
  write_message("Creating state point " + filename_ + "...", 5);
7,221✔
67

68
  hid_t file_id;
69
  if (mpi::master) {
7,221✔
70
    // Create statepoint file
71
    file_id = file_open(filename_, 'w');
5,631✔
72

73
    // Write file type
74
    write_attribute(file_id, "filetype", "statepoint");
5,631✔
75

76
    // Write revision number for state point file
77
    write_attribute(file_id, "version", VERSION_STATEPOINT);
5,631✔
78

79
    // Write OpenMC version
80
    write_attribute(file_id, "openmc_version", VERSION);
5,631✔
81
#ifdef GIT_SHA1
82
    write_attribute(file_id, "git_sha1", GIT_SHA1);
83
#endif
84

85
    // Write current date and time
86
    write_attribute(file_id, "date_and_time", time_stamp());
5,631✔
87

88
    // Write path to input
89
    write_attribute(file_id, "path", settings::path_input);
5,631✔
90

91
    // Write out random number seed
92
    write_dataset(file_id, "seed", openmc_get_seed());
5,631✔
93

94
    // Write out random number stride
95
    write_dataset(file_id, "stride", openmc_get_stride());
5,631✔
96

97
    // Write run information
98
    write_dataset(file_id, "energy_mode",
5,631✔
99
      settings::run_CE ? "continuous-energy" : "multi-group");
100
    if (!settings::run_CE) {
5,631✔
101
      write_dataset(file_id, "n_energy_groups", data::mg.num_energy_groups_);
1,314✔
102
      write_dataset(file_id, "n_delay_groups", data::mg.num_delayed_groups_);
1,314✔
103
    }
104
    switch (settings::run_mode) {
5,631!
105
    case RunMode::FIXED_SOURCE:
1,911✔
106
      write_dataset(file_id, "run_mode", "fixed source");
1,911✔
107
      break;
1,911✔
108
    case RunMode::EIGENVALUE:
3,720✔
109
      write_dataset(file_id, "run_mode", "eigenvalue");
3,720✔
110
      break;
3,720✔
111
    default:
×
112
      break;
×
113
    }
114
    switch (settings::solver_type) {
5,631!
115
    case SolverType::MONTE_CARLO:
4,581✔
116
      write_dataset(file_id, "solver_type", "monte carlo");
4,581✔
117
      break;
4,581✔
118
    case SolverType::RANDOM_RAY:
1,050✔
119
      write_dataset(file_id, "solver_type", "random ray");
1,050✔
120
      write_random_ray_hdf5(file_id);
1,050✔
121
      break;
1,050✔
NEW
122
    default:
×
NEW
123
      break;
×
124
    }
125
    write_attribute(file_id, "photon_transport", settings::photon_transport);
5,631✔
126
    write_dataset(file_id, "n_particles", settings::n_particles);
5,631✔
127
    write_dataset(file_id, "n_batches", settings::n_batches);
5,631✔
128

129
    write_dataset(file_id, "kinetic_simulation",
5,631✔
130
      settings::kinetic_simulation ? true : false);
131
    if (settings::kinetic_simulation) {
5,631✔
132
      hid_t timestep_group = create_group(file_id, "timestep_data");
672✔
133
      write_dataset(timestep_group, "dt", settings::dt);
672✔
134
      write_dataset(
672✔
135
        timestep_group, "current_timestep", simulation::current_timestep);
136
      write_dataset(timestep_group, "current_time", simulation::current_time);
672✔
137
      close_group(timestep_group);
672✔
138
    }
139

140
    // Write out current batch number
141
    write_dataset(file_id, "current_batch", simulation::current_batch);
5,631✔
142

143
    // Indicate whether source bank is stored in statepoint
144
    write_attribute(file_id, "source_present", write_source_);
5,631✔
145

146
    // Write out information for eigenvalue run
147
    if (settings::run_mode == RunMode::EIGENVALUE)
5,631✔
148
      write_eigenvalue_hdf5(file_id);
3,720✔
149

150
    hid_t tallies_group = create_group(file_id, "tallies");
5,631✔
151

152
    // Write meshes
153
    meshes_to_hdf5(tallies_group);
5,631✔
154

155
    // Write information for derivatives
156
    if (!model::tally_derivs.empty()) {
5,631✔
157
      hid_t derivs_group = create_group(tallies_group, "derivatives");
8✔
158
      for (const auto& deriv : model::tally_derivs) {
48✔
159
        hid_t deriv_group =
160
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
40✔
161
        write_dataset(deriv_group, "material", deriv.diff_material);
40✔
162
        if (deriv.variable == DerivativeVariable::DENSITY) {
40✔
163
          write_dataset(deriv_group, "independent variable", "density");
16✔
164
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
24✔
165
          write_dataset(deriv_group, "independent variable", "nuclide_density");
16✔
166
          write_dataset(
16✔
167
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
16✔
168
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
8!
169
          write_dataset(deriv_group, "independent variable", "temperature");
8✔
170
        } else {
171
          fatal_error("Independent variable for derivative " +
×
172
                      std::to_string(deriv.id) +
×
173
                      " not defined in state_point.cpp");
174
        }
175
        close_group(deriv_group);
40✔
176
      }
177
      close_group(derivs_group);
8✔
178
    }
179

180
    // Write information for filters
181
    hid_t filters_group = create_group(tallies_group, "filters");
5,631✔
182
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
5,631✔
183
    if (!model::tally_filters.empty()) {
5,631✔
184
      // Write filter IDs
185
      vector<int32_t> filter_ids;
3,296✔
186
      filter_ids.reserve(model::tally_filters.size());
3,296✔
187
      for (const auto& filt : model::tally_filters)
11,829✔
188
        filter_ids.push_back(filt->id());
8,533✔
189
      write_attribute(filters_group, "ids", filter_ids);
3,296✔
190

191
      // Write info for each filter
192
      for (const auto& filt : model::tally_filters) {
11,829✔
193
        hid_t filter_group =
194
          create_group(filters_group, "filter " + std::to_string(filt->id()));
8,533✔
195
        filt->to_statepoint(filter_group);
8,533✔
196
        close_group(filter_group);
8,533✔
197
      }
198
    }
3,296✔
199
    close_group(filters_group);
5,631✔
200

201
    // Write information for tallies
202
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
5,631✔
203
    if (!model::tallies.empty()) {
5,631✔
204
      // Write tally IDs
205
      vector<int32_t> tally_ids;
3,680✔
206
      tally_ids.reserve(model::tallies.size());
3,680✔
207
      for (const auto& tally : model::tallies)
20,385✔
208
        tally_ids.push_back(tally->id_);
16,705✔
209
      write_attribute(tallies_group, "ids", tally_ids);
3,680✔
210

211
      // Write all tally information except results
212
      for (const auto& tally : model::tallies) {
20,385✔
213
        hid_t tally_group =
214
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
16,705✔
215

216
        write_dataset(tally_group, "name", tally->name_);
16,705✔
217

218
        if (tally->writable_) {
16,705✔
219
          write_attribute(tally_group, "internal", 0);
15,961✔
220
        } else {
221
          write_attribute(tally_group, "internal", 1);
744✔
222
          close_group(tally_group);
744✔
223
          continue;
744✔
224
        }
225

226
        if (tally->multiply_density()) {
15,961✔
227
          write_attribute(tally_group, "multiply_density", 1);
15,921✔
228
        } else {
229
          write_attribute(tally_group, "multiply_density", 0);
40✔
230
        }
231

232
        if (tally->higher_moments()) {
15,961✔
233
          write_attribute(tally_group, "higher_moments", 1);
8✔
234
        } else {
235
          write_attribute(tally_group, "higher_moments", 0);
15,953✔
236
        }
237

238
        if (tally->estimator_ == TallyEstimator::ANALOG) {
15,961✔
239
          write_dataset(tally_group, "estimator", "analog");
6,076✔
240
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
9,885✔
241
          write_dataset(tally_group, "estimator", "tracklength");
9,259✔
242
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
626!
243
          write_dataset(tally_group, "estimator", "collision");
626✔
244
        }
245

246
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
15,961✔
247

248
        // Write the ID of each filter attached to this tally
249
        write_dataset(tally_group, "n_filters", tally->filters().size());
15,961✔
250
        if (!tally->filters().empty()) {
15,961✔
251
          vector<int32_t> filter_ids;
14,993✔
252
          filter_ids.reserve(tally->filters().size());
14,993✔
253
          for (auto i_filt : tally->filters())
45,678✔
254
            filter_ids.push_back(model::tally_filters[i_filt]->id());
30,685✔
255
          write_dataset(tally_group, "filters", filter_ids);
14,993✔
256
        }
14,993✔
257

258
        // Write the nuclides this tally scores
259
        vector<std::string> nuclides;
15,961✔
260
        for (auto i_nuclide : tally->nuclides_) {
37,570✔
261
          if (i_nuclide == -1) {
21,609✔
262
            nuclides.push_back("total");
14,049✔
263
          } else {
264
            if (settings::run_CE) {
7,560✔
265
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
7,480✔
266
            } else {
267
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
80✔
268
            }
269
          }
270
        }
271
        write_dataset(tally_group, "nuclides", nuclides);
15,961✔
272

273
        if (tally->deriv_ != C_NONE)
15,961✔
274
          write_dataset(
160✔
275
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
160✔
276

277
        // Write the tally score bins
278
        vector<std::string> scores;
15,961✔
279
        for (auto sc : tally->scores_)
39,488✔
280
          scores.push_back(reaction_name(sc));
23,527✔
281
        write_dataset(tally_group, "n_score_bins", scores.size());
15,961✔
282
        write_dataset(tally_group, "score_bins", scores);
15,961✔
283

284
        close_group(tally_group);
15,961✔
285
      }
15,961✔
286
    }
3,680✔
287

288
    if (settings::reduce_tallies) {
5,631✔
289
      // Write global tallies
290
      write_dataset(file_id, "global_tallies", simulation::global_tallies);
5,623✔
291

292
      // Write tallies
293
      if (model::active_tallies.size() > 0) {
5,623✔
294
        // Indicate that tallies are on
295
        write_attribute(file_id, "tallies_present", 1);
3,512✔
296

297
        // Write all tally results
298
        for (const auto& tally : model::tallies) {
20,049✔
299
          if (!tally->writable_)
16,537✔
300
            continue;
584✔
301

302
          // Write results for each bin
303
          std::string name = "tally " + std::to_string(tally->id_);
15,953✔
304
          hid_t tally_group = open_group(tallies_group, name.c_str());
15,953✔
305
          auto& results = tally->results_;
15,953✔
306
          write_tally_results(tally_group, results.shape()[0],
15,953✔
307
            results.shape()[1], results.shape()[2], results.data());
15,953✔
308
          close_group(tally_group);
15,953✔
309
        }
15,953✔
310
      } else {
311
        // Indicate tallies are off
312
        write_attribute(file_id, "tallies_present", 0);
2,111✔
313
      }
314
    }
315

316
    close_group(tallies_group);
5,631✔
317
  }
318

319
  // Check for the no-tally-reduction method
320
  if (!settings::reduce_tallies) {
7,221✔
321
    // If using the no-tally-reduction method, we need to collect tally
322
    // results before writing them to the state point file.
323
    write_tally_results_nr(file_id);
13✔
324

325
  } else if (mpi::master) {
7,208✔
326
    // Write number of global realizations
327
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
5,623✔
328
  }
329

330
  if (mpi::master) {
7,221✔
331
    // Write out the runtime metrics.
332
    using namespace simulation;
333
    hid_t runtime_group = create_group(file_id, "runtime");
5,631✔
334
    write_dataset(
5,631✔
335
      runtime_group, "total initialization", time_initialize.elapsed());
336
    write_dataset(
5,631✔
337
      runtime_group, "reading cross sections", time_read_xs.elapsed());
338
    write_dataset(runtime_group, "simulation",
5,631✔
339
      time_inactive.elapsed() + time_active.elapsed());
5,631✔
340
    write_dataset(runtime_group, "transport", time_transport.elapsed());
5,631✔
341
    if (settings::run_mode == RunMode::EIGENVALUE) {
5,631✔
342
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
3,720✔
343
    }
344
    write_dataset(runtime_group, "active batches", time_active.elapsed());
5,631✔
345
    if (settings::solver_type == SolverType::RANDOM_RAY) {
5,631✔
346
      write_dataset(runtime_group, "source_update", time_update_src.elapsed());
1,050✔
347
      if (settings::kinetic_simulation) {
1,050✔
348
        write_dataset(
672✔
349
          runtime_group, "precursor_update", time_compute_precursors.elapsed());
350
      }
351
    }
352
    if (settings::run_mode == RunMode::EIGENVALUE) {
5,631✔
353
      write_dataset(
3,720✔
354
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
355
      write_dataset(
3,720✔
356
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
357
      write_dataset(
3,720✔
358
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
359
    }
360
    write_dataset(
5,631✔
361
      runtime_group, "accumulating tallies", time_tallies.elapsed());
362
    write_dataset(runtime_group, "total", time_total.elapsed());
5,631✔
363
    write_dataset(
5,631✔
364
      runtime_group, "writing statepoints", time_statepoint.elapsed());
365
    close_group(runtime_group);
5,631✔
366

367
    file_close(file_id);
5,631✔
368
  }
369

370
#ifdef PHDF5
371
  bool parallel = true;
5,109✔
372
#else
373
  bool parallel = false;
2,112✔
374
#endif
375

376
  // Write the source bank if desired
377
  if (write_source_) {
7,221✔
378
    if (mpi::master || parallel)
3,146!
379
      file_id = file_open(filename_, 'a', true);
3,146✔
380
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
3,146✔
381
    if (mpi::master || parallel)
3,146!
382
      file_close(file_id);
3,146✔
383
  }
384

385
#if defined(OPENMC_LIBMESH_ENABLED) || defined(OPENMC_DAGMC_ENABLED)
386
  // write unstructured mesh tally files
387
  write_unstructured_mesh_results();
2,082✔
388
#endif
389

390
  simulation::time_statepoint.stop();
7,221✔
391

392
  return 0;
7,221✔
393
}
7,221✔
394

395
void restart_set_keff()
50✔
396
{
397
  if (simulation::restart_batch > settings::n_inactive) {
50!
398
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
237✔
399
      simulation::k_sum[0] += simulation::k_generation[i];
187✔
400
      simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
187✔
401
    }
402
    int n = settings::gen_per_batch * simulation::n_realizations;
50✔
403
    simulation::keff = simulation::k_sum[0] / n;
50✔
404
  } else {
405
    simulation::keff = simulation::k_generation.back();
×
406
  }
407
}
50✔
408

409
void load_state_point()
50✔
410
{
411
  write_message(
50✔
412
    fmt::format("Loading state point {}...", settings::path_statepoint_c), 5);
93✔
413
  openmc_statepoint_load(settings::path_statepoint.c_str());
50✔
414
}
50✔
415

416
void statepoint_version_check(hid_t file_id)
50✔
417
{
418
  // Read revision number for state point file and make sure it matches with
419
  // current version
420
  array<int, 2> version_array;
421
  read_attribute(file_id, "version", version_array);
50✔
422
  if (version_array != VERSION_STATEPOINT) {
50!
423
    fatal_error(
×
424
      "State point version does not match current version in OpenMC.");
425
  }
426
}
50✔
427

428
extern "C" int openmc_statepoint_load(const char* filename)
50✔
429
{
430
  // Open file for reading
431
  hid_t file_id = file_open(filename, 'r', true);
50✔
432

433
  // Read filetype
434
  std::string word;
50✔
435
  read_attribute(file_id, "filetype", word);
50✔
436
  if (word != "statepoint") {
50!
437
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
438
  }
439

440
  statepoint_version_check(file_id);
50✔
441

442
  // Read and overwrite random number seed
443
  int64_t seed;
444
  read_dataset(file_id, "seed", seed);
50✔
445
  openmc_set_seed(seed);
50✔
446

447
  // Read and overwrite random number stride
448
  uint64_t stride;
449
  read_dataset(file_id, "stride", stride);
50✔
450
  openmc_set_stride(stride);
50✔
451

452
  // It is not impossible for a state point to be generated from a CE run but
453
  // to be loaded in to an MG run (or vice versa), check to prevent that.
454
  read_dataset(file_id, "energy_mode", word);
50✔
455
  if (word == "multi-group" && settings::run_CE) {
50!
456
    fatal_error("State point file is from multigroup run but current run is "
×
457
                "continous energy.");
458
  } else if (word == "continuous-energy" && !settings::run_CE) {
50!
459
    fatal_error("State point file is from continuous-energy run but current "
×
460
                "run is multigroup!");
461
  }
462

463
  // Read and overwrite run information except number of batches
464
  read_dataset(file_id, "run_mode", word);
50✔
465
  if (word == "fixed source") {
50!
466
    settings::run_mode = RunMode::FIXED_SOURCE;
×
467
  } else if (word == "eigenvalue") {
50!
468
    settings::run_mode = RunMode::EIGENVALUE;
50✔
469
  }
470
  read_attribute(file_id, "photon_transport", settings::photon_transport);
50✔
471
  read_dataset(file_id, "n_particles", settings::n_particles);
50✔
472
  int temp;
473
  read_dataset(file_id, "n_batches", temp);
50✔
474

475
  // Take maximum of statepoint n_batches and input n_batches
476
  settings::n_batches = std::max(settings::n_batches, temp);
50✔
477

478
  // Read batch number to restart at
479
  read_dataset(file_id, "current_batch", simulation::restart_batch);
50✔
480

481
  if (settings::restart_run &&
50!
482
      simulation::restart_batch >= settings::n_max_batches) {
50✔
483
    warning(fmt::format(
8✔
484
      "The number of batches specified for simulation ({}) is smaller "
485
      "than or equal to the number of batches in the restart statepoint file "
486
      "({})",
487
      settings::n_max_batches, simulation::restart_batch));
488
  }
489

490
  // Logical flag for source present in statepoint file
491
  bool source_present;
492
  read_attribute(file_id, "source_present", source_present);
50✔
493

494
  // Read information specific to eigenvalue run
495
  if (settings::run_mode == RunMode::EIGENVALUE) {
50!
496
    read_dataset(file_id, "n_inactive", temp);
50✔
497
    read_eigenvalue_hdf5(file_id);
50✔
498

499
    // Take maximum of statepoint n_inactive and input n_inactive
500
    settings::n_inactive = std::max(settings::n_inactive, temp);
50✔
501

502
    // Check to make sure source bank is present
503
    if (settings::path_sourcepoint == settings::path_statepoint &&
100!
504
        !source_present) {
50!
505
      fatal_error("Source bank must be contained in statepoint restart file");
×
506
    }
507
  }
508

509
  // Read number of realizations for global tallies
510
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
50✔
511

512
  // Set k_sum, keff, and current_batch based on whether restart file is part
513
  // of active cycle or inactive cycle
514
  if (settings::run_mode == RunMode::EIGENVALUE) {
50!
515
    restart_set_keff();
50✔
516
  }
517

518
  // Set current batch number
519
  simulation::current_batch = simulation::restart_batch;
50✔
520

521
  // Read tallies to master. If we are using Parallel HDF5, all processes
522
  // need to be included in the HDF5 calls.
523
#ifdef PHDF5
524
  if (true) {
525
#else
526
  if (mpi::master) {
15!
527
#endif
528
    // Read global tally data
529
    read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
50✔
530
      false, simulation::global_tallies.data());
50✔
531

532
    // Check if tally results are present
533
    bool present;
534
    read_attribute(file_id, "tallies_present", present);
50✔
535

536
    // Read in sum and sum squared
537
    if (present) {
50!
538
      hid_t tallies_group = open_group(file_id, "tallies");
50✔
539

540
      for (auto& tally : model::tallies) {
169✔
541
        // Read sum, sum_sq, and N for each bin
542
        std::string name = "tally " + std::to_string(tally->id_);
119✔
543
        hid_t tally_group = open_group(tallies_group, name.c_str());
119✔
544

545
        int internal = 0;
119✔
546
        if (attribute_exists(tally_group, "internal")) {
119!
547
          read_attribute(tally_group, "internal", internal);
119✔
548
        }
549
        if (internal) {
119!
550
          tally->writable_ = false;
×
551
        } else {
552
          auto& results = tally->results_;
119✔
553
          read_tally_results(tally_group, results.shape()[0],
238✔
554
            results.shape()[1], results.shape()[2], results.data());
119✔
555

556
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
119✔
557
          close_group(tally_group);
119✔
558
        }
559
      }
119✔
560
      close_group(tallies_group);
50✔
561
    }
562
  }
563

564
  // Read source if in eigenvalue mode
565
  if (settings::run_mode == RunMode::EIGENVALUE) {
50!
566

567
    // Check if source was written out separately
568
    if (!source_present) {
50!
569

570
      // Close statepoint file
571
      file_close(file_id);
×
572

573
      // Write message
574
      write_message(
×
575
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
576

577
      // Open source file
578
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
579
    }
580

581
    // Read source
582
    read_source_bank(file_id, simulation::source_bank, true);
50✔
583
  }
584

585
  // Close file
586
  file_close(file_id);
50✔
587

588
  return 0;
50✔
589
}
50✔
590

591
hid_t h5banktype(bool memory)
8,201✔
592
{
593
  // Create compound type for position
594
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
8,201✔
595
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
8,201✔
596
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
8,201✔
597
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
8,201✔
598

599
  // Create bank datatype
600
  //
601
  // If you make changes to the compound datatype here, make sure you update:
602
  // - openmc/source.py
603
  // - openmc/statepoint.py
604
  // - docs/source/io_formats/statepoint.rst
605
  // - docs/source/io_formats/source.rst
606
  auto n = sizeof(SourceSite);
8,201✔
607
  if (!memory)
8,201✔
608
    n = 2 * sizeof(struct Position) + 3 * sizeof(double) + 3 * sizeof(int);
4,051✔
609
  hid_t banktype = H5Tcreate(H5T_COMPOUND, n);
8,201✔
610
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
8,201✔
611
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
8,201✔
612
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
8,201✔
613
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
8,201✔
614
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
8,201✔
615
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
8,201✔
616
    H5T_NATIVE_INT);
8,201✔
617
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
8,201✔
618
  H5Tinsert(
8,201✔
619
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
8,201✔
620

621
  H5Tclose(postype);
8,201✔
622
  return banktype;
8,201✔
623
}
624

625
void write_source_point(std::string filename, span<SourceSite> source_bank,
934✔
626
  const vector<int64_t>& bank_index, bool use_mcpl)
627
{
628
  std::string ext = use_mcpl ? "mcpl" : "h5";
934✔
629
  write_message("Creating source file {}.{} with {} particles ...", filename,
934✔
630
    ext, source_bank.size(), 5);
934✔
631

632
  // Dispatch to appropriate function based on file type
633
  if (use_mcpl) {
934✔
634
    filename.append(".mcpl");
29✔
635
    write_mcpl_source_point(filename.c_str(), source_bank, bank_index);
29✔
636
  } else {
637
    filename.append(".h5");
905✔
638
    write_h5_source_point(filename.c_str(), source_bank, bank_index);
905✔
639
  }
640
}
934✔
641

642
void write_h5_source_point(const char* filename, span<SourceSite> source_bank,
905✔
643
  const vector<int64_t>& bank_index)
644
{
645
  // When using parallel HDF5, the file is written to collectively by all
646
  // processes. With MPI-only, the file is opened and written by the master
647
  // (note that the call to write_source_bank is by all processes since slave
648
  // processes need to send source bank data to the master.
649
#ifdef PHDF5
650
  bool parallel = true;
587✔
651
#else
652
  bool parallel = false;
318✔
653
#endif
654

655
  if (!filename)
905!
656
    fatal_error("write_source_point filename needs a nonempty name.");
×
657

658
  std::string filename_(filename);
905✔
659
  const auto extension = get_file_extension(filename_);
905✔
660
  if (extension != "h5") {
905!
661
    warning("write_source_point was passed a file extension differing "
×
662
            "from .h5, but an hdf5 file will be written.");
663
  }
664

665
  hid_t file_id;
666
  if (mpi::master || parallel) {
905!
667
    file_id = file_open(filename_.c_str(), 'w', true);
905✔
668
    write_attribute(file_id, "filetype", "source");
905✔
669
  }
670

671
  // Get pointer to source bank and write to file
672
  write_source_bank(file_id, source_bank, bank_index);
905✔
673

674
  if (mpi::master || parallel)
905!
675
    file_close(file_id);
905✔
676
}
905✔
677

678
void write_source_bank(hid_t group_id, span<SourceSite> source_bank,
4,051✔
679
  const vector<int64_t>& bank_index)
680
{
681
  hid_t membanktype = h5banktype(true);
4,051✔
682
  hid_t filebanktype = h5banktype(false);
4,051✔
683

684
#ifdef OPENMC_MPI
685
  write_bank_dataset("source_bank", group_id, source_bank, bank_index,
2,821✔
686
    membanktype, filebanktype, mpi::source_site);
687
#else
688
  write_bank_dataset("source_bank", group_id, source_bank, bank_index,
1,230✔
689
    membanktype, filebanktype);
690
#endif
691

692
  H5Tclose(membanktype);
4,051✔
693
  H5Tclose(filebanktype);
4,051✔
694
}
4,051✔
695

696
// Determine member names of a compound HDF5 datatype
697
std::string dtype_member_names(hid_t dtype_id)
198✔
698
{
699
  int nmembers = H5Tget_nmembers(dtype_id);
198✔
700
  std::string names;
198✔
701
  for (int i = 0; i < nmembers; i++) {
1,747✔
702
    char* name = H5Tget_member_name(dtype_id, i);
1,549✔
703
    names = names.append(name);
1,549✔
704
    H5free_memory(name);
1,549✔
705
    if (i < nmembers - 1)
1,549✔
706
      names += ", ";
1,351✔
707
  }
708
  return names;
198✔
709
}
×
710

711
void read_source_bank(
99✔
712
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
713
{
714
  hid_t banktype = h5banktype(true);
99✔
715

716
  // Open the dataset
717
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
99✔
718

719
  // Make sure number of members matches
720
  hid_t dtype = H5Dget_type(dset);
99✔
721
  auto file_member_names = dtype_member_names(dtype);
99✔
722
  auto bank_member_names = dtype_member_names(banktype);
99✔
723
  if (file_member_names != bank_member_names) {
99✔
724
    fatal_error(fmt::format(
7✔
725
      "Source site attributes in file do not match what is "
726
      "expected for this version of OpenMC. File attributes = ({}). Expected "
727
      "attributes = ({})",
728
      file_member_names, bank_member_names));
729
  }
730

731
  hid_t dspace = H5Dget_space(dset);
92✔
732
  hsize_t n_sites;
733
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
92✔
734

735
  // Make sure vector is big enough in case where we're reading entire source on
736
  // each process
737
  if (!distribute)
92✔
738
    sites.resize(n_sites);
42✔
739

740
  hid_t memspace;
741
  if (distribute) {
92✔
742
    if (simulation::work_index[mpi::n_procs] > n_sites) {
50!
743
      fatal_error("Number of source sites in source file is less "
×
744
                  "than number of source particles per generation.");
745
    }
746

747
    // Create another data space but for each proc individually
748
    hsize_t n_sites_local = simulation::work_per_rank;
50✔
749
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
50✔
750

751
    // Select hyperslab for each process
752
    hsize_t offset = simulation::work_index[mpi::rank];
50✔
753
    H5Sselect_hyperslab(
50✔
754
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
755
  } else {
756
    memspace = H5S_ALL;
42✔
757
  }
758

759
#ifdef PHDF5
760
  // Read data in parallel
761
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
65✔
762
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
65✔
763
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
65✔
764
  H5Pclose(plist);
65✔
765
#else
766
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
27✔
767
#endif
768

769
  // Close all ids
770
  H5Sclose(dspace);
92✔
771
  if (distribute)
92✔
772
    H5Sclose(memspace);
50✔
773
  H5Dclose(dset);
92✔
774
  H5Tclose(banktype);
92✔
775
}
92✔
776

777
void write_unstructured_mesh_results()
2,082✔
778
{
779

780
  for (auto& tally : model::tallies) {
9,185✔
781

782
    vector<std::string> tally_scores;
7,103✔
783
    for (auto filter_idx : tally->filters()) {
20,918✔
784
      auto& filter = model::tally_filters[filter_idx];
13,815✔
785
      if (filter->type() != FilterType::MESH)
13,815!
786
        continue;
13,805✔
787

788
      // check if the filter uses an unstructured mesh
789
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
1,598!
790
      auto mesh_idx = mesh_filter->mesh();
1,598!
791
      auto umesh =
792
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
1,598!
793

794
      if (!umesh)
1,598✔
795
        continue;
1,568✔
796

797
      if (!umesh->output_)
30!
798
        continue;
×
799

800
      if (umesh->library() == "moab") {
30!
801
        if (mpi::master)
20✔
802
          warning(fmt::format(
10!
803
            "Output for a MOAB mesh (mesh {}) was "
804
            "requested but will not be written. Please use the Python "
805
            "API to generated the desired VTK tetrahedral mesh.",
806
            umesh->id_));
10✔
807
        continue;
20✔
808
      }
809

810
      // if this tally has more than one filter, print
811
      // warning and skip writing the mesh
812
      if (tally->filters().size() > 1) {
10!
813
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
814
                            "{}. More than one filter is present on the tally.",
815
          tally->id_));
×
816
        break;
×
817
      }
818

819
      int n_realizations = tally->n_realizations_;
10✔
820

821
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
20✔
822
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
20✔
823
          // combine the score and nuclide into a name for the value
824
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
20!
825
            tally->nuclide_name(nuc_idx));
20!
826
          // add this score to the mesh
827
          // (this is in a separate loop because all variables need to be added
828
          //  to libMesh's equation system before any are initialized, which
829
          //  happens in set_score_data)
830
          umesh->add_score(score_str);
10!
831
        }
10✔
832
      }
833

834
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
20✔
835
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
20✔
836
          // combine the score and nuclide into a name for the value
837
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
20!
838
            tally->nuclide_name(nuc_idx));
20!
839

840
          // index for this nuclide and score
841
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
10✔
842

843
          // construct result vectors
844
          vector<double> mean_vec(umesh->n_bins()),
10!
845
            std_dev_vec(umesh->n_bins());
10!
846
          for (int j = 0; j < tally->results_.shape()[0]; j++) {
97,866✔
847
            // get the volume for this bin
848
            double volume = umesh->volume(j);
97,856!
849
            // compute the mean
850
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
97,856!
851
                          n_realizations;
97,856✔
852
            mean_vec.at(j) = mean / volume;
97,856!
853

854
            // compute the standard deviation
855
            double sum_sq =
856
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
97,856!
857
            double std_dev {0.0};
97,856✔
858
            if (n_realizations > 1) {
97,856!
859
              std_dev = sum_sq / n_realizations - mean * mean;
97,856✔
860
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
97,856✔
861
            }
862
            std_dev_vec[j] = std_dev / volume;
97,856✔
863
          }
864
#ifdef OPENMC_MPI
865
          MPI_Bcast(
10!
866
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
10✔
867
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
10!
868
            mpi::intracomm);
869
#endif
870
          // set the data for this score
871
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
10!
872
        }
10✔
873
      }
874

875
      // Generate a file name based on the tally id
876
      // and the current batch number
877
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
10!
878
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
10✔
879
        simulation::current_batch, batch_width);
×
880

881
      // Write the unstructured mesh and data to file
882
      umesh->write(filename);
10!
883

884
      // remove score data added for this mesh write
885
      umesh->remove_scores();
10!
886
    }
10✔
887
  }
7,103✔
888
}
2,082✔
889

890
void write_tally_results_nr(hid_t file_id)
13✔
891
{
892
  // ==========================================================================
893
  // COLLECT AND WRITE GLOBAL TALLIES
894

895
  hid_t tallies_group;
896
  if (mpi::master) {
13✔
897
    // Write number of realizations
898
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
8✔
899

900
    tallies_group = open_group(file_id, "tallies");
8✔
901
  }
902

903
  // Get global tallies
904
  auto& gt = simulation::global_tallies;
13✔
905

906
#ifdef OPENMC_MPI
907
  // Reduce global tallies
908
  xt::xtensor<double, 2> gt_reduced = xt::empty_like(gt);
10✔
909
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
10✔
910
    mpi::intracomm);
911

912
  // Transfer values to value on master
913
  if (mpi::master) {
10✔
914
    if (simulation::current_batch == settings::n_max_batches ||
5!
915
        simulation::satisfy_triggers) {
916
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
5✔
917
    }
918
  }
919
#endif
920

921
  // Write out global tallies sum and sum_sq
922
  if (mpi::master) {
13✔
923
    write_dataset(file_id, "global_tallies", gt);
8✔
924
  }
925

926
  for (const auto& t : model::tallies) {
26✔
927
    // Skip any tallies that are not active
928
    if (!t->active_)
13!
929
      continue;
×
930
    if (!t->writable_)
13!
931
      continue;
×
932

933
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
13!
934
      write_attribute(file_id, "tallies_present", 1);
8✔
935
    }
936

937
    // Get view of accumulated tally values
938
    auto values_view = xt::view(t->results_, xt::all(), xt::all(),
13✔
939
      xt::range(static_cast<int>(TallyResult::SUM),
13✔
940
        static_cast<int>(TallyResult::SUM_SQ) + 1));
13✔
941

942
    // Make copy of tally values in contiguous array
943
    xt::xtensor<double, 3> values = values_view;
13✔
944

945
    if (mpi::master) {
13✔
946
      // Open group for tally
947
      std::string groupname {"tally " + std::to_string(t->id_)};
8✔
948
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
8✔
949

950
      // The MPI_IN_PLACE specifier allows the master to copy values into
951
      // a receive buffer without having a temporary variable
952
#ifdef OPENMC_MPI
953
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
5✔
954
        MPI_SUM, 0, mpi::intracomm);
955
#endif
956

957
      // At the end of the simulation, store the results back in the
958
      // regular TallyResults array
959
      if (simulation::current_batch == settings::n_max_batches ||
8!
960
          simulation::satisfy_triggers) {
961
        values_view = values;
8✔
962
      }
963

964
      // Put in temporary tally result
965
      xt::xtensor<double, 3> results_copy = xt::zeros_like(t->results_);
8✔
966
      auto copy_view = xt::view(results_copy, xt::all(), xt::all(),
8✔
967
        xt::range(static_cast<int>(TallyResult::SUM),
8✔
968
          static_cast<int>(TallyResult::SUM_SQ) + 1));
8✔
969
      copy_view = values;
8✔
970

971
      // Write reduced tally results to file
972
      auto shape = results_copy.shape();
8✔
973
      write_tally_results(
8✔
974
        tally_group, shape[0], shape[1], shape[2], results_copy.data());
8✔
975

976
      close_group(tally_group);
8✔
977
    } else {
8✔
978
      // Receive buffer not significant at other processors
979
#ifdef OPENMC_MPI
980
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
5✔
981
        mpi::intracomm);
982
#endif
983
    }
984
  }
13✔
985

986
  if (mpi::master) {
13✔
987
    if (!object_exists(file_id, "tallies_present")) {
8!
988
      // Indicate that tallies are off
989
      write_dataset(file_id, "tallies_present", 0);
8✔
990
    }
991

992
    close_group(tallies_group);
8✔
993
  }
994
}
13✔
995

996
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc