• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 12776996362

14 Jan 2025 09:49PM UTC coverage: 84.938% (+0.2%) from 84.729%
12776996362

Pull #3133

github

web-flow
Merge 0495246d9 into 549cc0973
Pull Request #3133: Kinetics parameters using Iterated Fission Probability

318 of 330 new or added lines in 10 files covered. (96.36%)

1658 existing lines in 66 files now uncovered.

50402 of 59340 relevant lines covered (84.94%)

33987813.96 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.9
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "xtensor/xbuilder.hpp" // for empty_like
8
#include "xtensor/xview.hpp"
9
#include <fmt/core.h>
10

11
#include "openmc/bank.h"
12
#include "openmc/capi.h"
13
#include "openmc/constants.h"
14
#include "openmc/eigenvalue.h"
15
#include "openmc/error.h"
16
#include "openmc/file_utils.h"
17
#include "openmc/hdf5_interface.h"
18
#include "openmc/mcpl_interface.h"
19
#include "openmc/mesh.h"
20
#include "openmc/message_passing.h"
21
#include "openmc/mgxs_interface.h"
22
#include "openmc/nuclide.h"
23
#include "openmc/output.h"
24
#include "openmc/settings.h"
25
#include "openmc/simulation.h"
26
#include "openmc/tallies/derivative.h"
27
#include "openmc/tallies/filter.h"
28
#include "openmc/tallies/filter_mesh.h"
29
#include "openmc/tallies/tally.h"
30
#include "openmc/timer.h"
31
#include "openmc/vector.h"
32

33
namespace openmc {
34

35
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
7,268✔
36
{
37
  simulation::time_statepoint.start();
7,268✔
38

39
  // If a nullptr is passed in, we assume that the user
40
  // wants a default name for this, of the form like output/statepoint.20.h5
41
  std::string filename_;
7,268✔
42
  if (filename) {
7,268✔
43
    filename_ = filename;
1,165✔
44
  } else {
45
    // Determine width for zero padding
46
    int w = std::to_string(settings::n_max_batches).size();
6,103✔
47

48
    // Set filename for state point
49
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
11,138✔
50
      simulation::current_batch, w);
6,103✔
51
  }
52

53
  // If a file name was specified, ensure it has .h5 file extension
54
  const auto extension = get_file_extension(filename_);
7,268✔
55
  if (extension == "") {
7,268✔
UNCOV
56
    filename_.append(".h5");
×
57
  } else if (extension != "h5") {
7,268✔
UNCOV
58
    warning("openmc_statepoint_write was passed a file extension differing "
×
59
            "from .h5, but an hdf5 file will be written.");
60
  }
61

62
  // Determine whether or not to write the source bank
63
  bool write_source_ = write_source ? *write_source : true;
7,268✔
64

65
  // Write message
66
  write_message("Creating state point " + filename_ + "...", 5);
7,268✔
67

68
  hid_t file_id;
69
  if (mpi::master) {
7,268✔
70
    // Create statepoint file
71
    file_id = file_open(filename_, 'w');
6,323✔
72

73
    // Write file type
74
    write_attribute(file_id, "filetype", "statepoint");
6,323✔
75

76
    // Write revision number for state point file
77
    write_attribute(file_id, "version", VERSION_STATEPOINT);
6,323✔
78

79
    // Write OpenMC version
80
    write_attribute(file_id, "openmc_version", VERSION);
6,323✔
81
#ifdef GIT_SHA1
82
    write_attribute(file_id, "git_sha1", GIT_SHA1);
6,323✔
83
#endif
84

85
    // Write current date and time
86
    write_attribute(file_id, "date_and_time", time_stamp());
6,323✔
87

88
    // Write path to input
89
    write_attribute(file_id, "path", settings::path_input);
6,323✔
90

91
    // Write out random number seed
92
    write_dataset(file_id, "seed", openmc_get_seed());
6,323✔
93

94
    // Write run information
95
    write_dataset(file_id, "energy_mode",
6,323✔
96
      settings::run_CE ? "continuous-energy" : "multi-group");
97
    switch (settings::run_mode) {
6,323✔
98
    case RunMode::FIXED_SOURCE:
2,095✔
99
      write_dataset(file_id, "run_mode", "fixed source");
2,095✔
100
      break;
2,095✔
101
    case RunMode::EIGENVALUE:
4,228✔
102
      write_dataset(file_id, "run_mode", "eigenvalue");
4,228✔
103
      break;
4,228✔
104
    default:
×
UNCOV
105
      break;
×
106
    }
107
    write_attribute(file_id, "photon_transport", settings::photon_transport);
6,323✔
108
    write_dataset(file_id, "n_particles", settings::n_particles);
6,323✔
109
    write_dataset(file_id, "n_batches", settings::n_batches);
6,323✔
110

111
    // Write out current batch number
112
    write_dataset(file_id, "current_batch", simulation::current_batch);
6,323✔
113

114
    // Indicate whether source bank is stored in statepoint
115
    write_attribute(file_id, "source_present", write_source_);
6,323✔
116

117
    // Write out information for eigenvalue run
118
    if (settings::run_mode == RunMode::EIGENVALUE)
6,323✔
119
      write_eigenvalue_hdf5(file_id);
4,228✔
120

121
    hid_t tallies_group = create_group(file_id, "tallies");
6,323✔
122

123
    // Write meshes
124
    meshes_to_hdf5(tallies_group);
6,323✔
125

126
    // Write information for derivatives
127
    if (!model::tally_derivs.empty()) {
6,323✔
128
      hid_t derivs_group = create_group(tallies_group, "derivatives");
12✔
129
      for (const auto& deriv : model::tally_derivs) {
72✔
130
        hid_t deriv_group =
131
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
60✔
132
        write_dataset(deriv_group, "material", deriv.diff_material);
60✔
133
        if (deriv.variable == DerivativeVariable::DENSITY) {
60✔
134
          write_dataset(deriv_group, "independent variable", "density");
24✔
135
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
36✔
136
          write_dataset(deriv_group, "independent variable", "nuclide_density");
24✔
137
          write_dataset(
24✔
138
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
24✔
139
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
12✔
140
          write_dataset(deriv_group, "independent variable", "temperature");
12✔
141
        } else {
142
          fatal_error("Independent variable for derivative " +
×
UNCOV
143
                      std::to_string(deriv.id) +
×
144
                      " not defined in state_point.cpp");
145
        }
146
        close_group(deriv_group);
60✔
147
      }
148
      close_group(derivs_group);
12✔
149
    }
150

151
    // Write information for filters
152
    hid_t filters_group = create_group(tallies_group, "filters");
6,323✔
153
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
6,323✔
154
    if (!model::tally_filters.empty()) {
6,323✔
155
      // Write filter IDs
156
      vector<int32_t> filter_ids;
4,100✔
157
      filter_ids.reserve(model::tally_filters.size());
4,100✔
158
      for (const auto& filt : model::tally_filters)
14,176✔
159
        filter_ids.push_back(filt->id());
10,076✔
160
      write_attribute(filters_group, "ids", filter_ids);
4,100✔
161

162
      // Write info for each filter
163
      for (const auto& filt : model::tally_filters) {
14,176✔
164
        hid_t filter_group =
165
          create_group(filters_group, "filter " + std::to_string(filt->id()));
10,076✔
166
        filt->to_statepoint(filter_group);
10,076✔
167
        close_group(filter_group);
10,076✔
168
      }
169
    }
4,100✔
170
    close_group(filters_group);
6,323✔
171

172
    // Write information for tallies
173
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
6,323✔
174
    if (!model::tallies.empty()) {
6,323✔
175
      // Write tally IDs
176
      vector<int32_t> tally_ids;
4,616✔
177
      tally_ids.reserve(model::tallies.size());
4,616✔
178
      for (const auto& tally : model::tallies)
26,762✔
179
        tally_ids.push_back(tally->id_);
22,146✔
180
      write_attribute(tallies_group, "ids", tally_ids);
4,616✔
181

182
      // Write all tally information except results
183
      for (const auto& tally : model::tallies) {
26,762✔
184
        hid_t tally_group =
185
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
22,146✔
186

187
        write_dataset(tally_group, "name", tally->name_);
22,146✔
188

189
        if (tally->writable_) {
22,146✔
190
          write_attribute(tally_group, "internal", 0);
20,895✔
191
        } else {
192
          write_attribute(tally_group, "internal", 1);
1,251✔
193
          close_group(tally_group);
1,251✔
194
          continue;
1,251✔
195
        }
196

197
        if (tally->multiply_density()) {
20,895✔
198
          write_attribute(tally_group, "multiply_density", 1);
20,859✔
199
        } else {
200
          write_attribute(tally_group, "multiply_density", 0);
36✔
201
        }
202

203
        if (tally->estimator_ == TallyEstimator::ANALOG) {
20,895✔
204
          write_dataset(tally_group, "estimator", "analog");
7,913✔
205
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
12,982✔
206
          write_dataset(tally_group, "estimator", "tracklength");
12,294✔
207
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
688✔
208
          write_dataset(tally_group, "estimator", "collision");
688✔
209
        }
210

211
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
20,895✔
212

213
        // Write the ID of each filter attached to this tally
214
        write_dataset(tally_group, "n_filters", tally->filters().size());
20,895✔
215
        if (!tally->filters().empty()) {
20,895✔
216
          vector<int32_t> filter_ids;
19,803✔
217
          filter_ids.reserve(tally->filters().size());
19,803✔
218
          for (auto i_filt : tally->filters())
59,122✔
219
            filter_ids.push_back(model::tally_filters[i_filt]->id());
39,319✔
220
          write_dataset(tally_group, "filters", filter_ids);
19,803✔
221
        }
19,803✔
222

223
        // Write the nuclides this tally scores
224
        vector<std::string> nuclides;
20,895✔
225
        for (auto i_nuclide : tally->nuclides_) {
50,226✔
226
          if (i_nuclide == -1) {
29,331✔
227
            nuclides.push_back("total");
18,063✔
228
          } else {
229
            if (settings::run_CE) {
11,268✔
230
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
11,148✔
231
            } else {
232
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
120✔
233
            }
234
          }
235
        }
236
        write_dataset(tally_group, "nuclides", nuclides);
20,895✔
237

238
        if (tally->deriv_ != C_NONE)
20,895✔
239
          write_dataset(
240✔
240
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
240✔
241

242
        // Write the tally score bins
243
        vector<std::string> scores;
20,895✔
244
        for (auto sc : tally->scores_)
50,150✔
245
          scores.push_back(reaction_name(sc));
29,255✔
246
        write_dataset(tally_group, "n_score_bins", scores.size());
20,895✔
247
        write_dataset(tally_group, "score_bins", scores);
20,895✔
248

249
        close_group(tally_group);
20,895✔
250
      }
20,895✔
251
    }
4,616✔
252

253
    if (settings::reduce_tallies) {
6,323✔
254
      // Write global tallies
255
      write_dataset(file_id, "global_tallies", simulation::global_tallies);
6,323✔
256

257
      // Write tallies
258
      if (model::active_tallies.size() > 0) {
6,323✔
259
        // Indicate that tallies are on
260
        write_attribute(file_id, "tallies_present", 1);
4,319✔
261

262
        // Write all tally results
263
        for (const auto& tally : model::tallies) {
26,168✔
264
          if (!tally->writable_)
21,849✔
265
            continue;
954✔
266
          // Write sum and sum_sq for each bin
267
          std::string name = "tally " + std::to_string(tally->id_);
20,895✔
268
          hid_t tally_group = open_group(tallies_group, name.c_str());
20,895✔
269
          auto& results = tally->results_;
20,895✔
270
          write_tally_results(tally_group, results.shape()[0],
20,895✔
271
            results.shape()[1], results.data());
20,895✔
272
          close_group(tally_group);
20,895✔
273
        }
20,895✔
274
      } else {
275
        // Indicate tallies are off
276
        write_attribute(file_id, "tallies_present", 0);
2,004✔
277
      }
278
    }
279

280
    close_group(tallies_group);
6,323✔
281
  }
282

283
  // Check for the no-tally-reduction method
284
  if (!settings::reduce_tallies) {
7,268✔
285
    // If using the no-tally-reduction method, we need to collect tally
286
    // results before writing them to the state point file.
UNCOV
287
    write_tally_results_nr(file_id);
×
288

289
  } else if (mpi::master) {
7,268✔
290
    // Write number of global realizations
291
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
6,323✔
292
  }
293

294
  if (mpi::master) {
7,268✔
295
    // Write out the runtime metrics.
296
    using namespace simulation;
297
    hid_t runtime_group = create_group(file_id, "runtime");
6,323✔
298
    write_dataset(
6,323✔
299
      runtime_group, "total initialization", time_initialize.elapsed());
300
    write_dataset(
6,323✔
301
      runtime_group, "reading cross sections", time_read_xs.elapsed());
302
    write_dataset(runtime_group, "simulation",
6,323✔
303
      time_inactive.elapsed() + time_active.elapsed());
6,323✔
304
    write_dataset(runtime_group, "transport", time_transport.elapsed());
6,323✔
305
    if (settings::run_mode == RunMode::EIGENVALUE) {
6,323✔
306
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
4,228✔
307
    }
308
    write_dataset(runtime_group, "active batches", time_active.elapsed());
6,323✔
309
    if (settings::run_mode == RunMode::EIGENVALUE) {
6,323✔
310
      write_dataset(
4,228✔
311
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
312
      write_dataset(
4,228✔
313
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
314
      write_dataset(
4,228✔
315
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
316
    }
317
    write_dataset(
6,323✔
318
      runtime_group, "accumulating tallies", time_tallies.elapsed());
319
    write_dataset(runtime_group, "total", time_total.elapsed());
6,323✔
320
    write_dataset(
6,323✔
321
      runtime_group, "writing statepoints", time_statepoint.elapsed());
322
    close_group(runtime_group);
6,323✔
323

324
    file_close(file_id);
6,323✔
325
  }
326

327
#ifdef PHDF5
328
  bool parallel = true;
3,917✔
329
#else
330
  bool parallel = false;
3,351✔
331
#endif
332

333
  // Write the source bank if desired
334
  if (write_source_) {
7,268✔
335
    if (mpi::master || parallel)
3,990✔
336
      file_id = file_open(filename_, 'a', true);
3,990✔
337
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
3,990✔
338
    if (mpi::master || parallel)
3,990✔
339
      file_close(file_id);
3,990✔
340
  }
341

342
#if defined(LIBMESH) || defined(DAGMC)
343
  // write unstructured mesh tally files
344
  write_unstructured_mesh_results();
2,081✔
345
#endif
346

347
  simulation::time_statepoint.stop();
7,268✔
348

349
  return 0;
7,268✔
350
}
7,268✔
351

352
void restart_set_keff()
83✔
353
{
354
  if (simulation::restart_batch > settings::n_inactive) {
83✔
355
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
411✔
356
      simulation::k_sum[0] += simulation::k_generation[i];
328✔
357
      simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
328✔
358
    }
359
    int n = settings::gen_per_batch * simulation::n_realizations;
83✔
360
    simulation::keff = simulation::k_sum[0] / n;
83✔
361
  } else {
UNCOV
362
    simulation::keff = simulation::k_generation.back();
×
363
  }
364
}
83✔
365

366
void load_state_point()
83✔
367
{
368
  write_message(
83✔
369
    fmt::format("Loading state point {}...", settings::path_statepoint_c), 5);
151✔
370
  openmc_statepoint_load(settings::path_statepoint.c_str());
83✔
371
}
83✔
372

373
void statepoint_version_check(hid_t file_id)
83✔
374
{
375
  // Read revision number for state point file and make sure it matches with
376
  // current version
377
  array<int, 2> version_array;
378
  read_attribute(file_id, "version", version_array);
83✔
379
  if (version_array != VERSION_STATEPOINT) {
83✔
UNCOV
380
    fatal_error(
×
381
      "State point version does not match current version in OpenMC.");
382
  }
383
}
83✔
384

385
extern "C" int openmc_statepoint_load(const char* filename)
83✔
386
{
387
  // Open file for reading
388
  hid_t file_id = file_open(filename, 'r', true);
83✔
389

390
  // Read filetype
391
  std::string word;
83✔
392
  read_attribute(file_id, "filetype", word);
83✔
393
  if (word != "statepoint") {
83✔
UNCOV
394
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
395
  }
396

397
  statepoint_version_check(file_id);
83✔
398

399
  // Read and overwrite random number seed
400
  int64_t seed;
401
  read_dataset(file_id, "seed", seed);
83✔
402
  openmc_set_seed(seed);
83✔
403

404
  // It is not impossible for a state point to be generated from a CE run but
405
  // to be loaded in to an MG run (or vice versa), check to prevent that.
406
  read_dataset(file_id, "energy_mode", word);
83✔
407
  if (word == "multi-group" && settings::run_CE) {
83✔
UNCOV
408
    fatal_error("State point file is from multigroup run but current run is "
×
409
                "continous energy.");
410
  } else if (word == "continuous-energy" && !settings::run_CE) {
83✔
UNCOV
411
    fatal_error("State point file is from continuous-energy run but current "
×
412
                "run is multigroup!");
413
  }
414

415
  // Read and overwrite run information except number of batches
416
  read_dataset(file_id, "run_mode", word);
83✔
417
  if (word == "fixed source") {
83✔
UNCOV
418
    settings::run_mode = RunMode::FIXED_SOURCE;
×
419
  } else if (word == "eigenvalue") {
83✔
420
    settings::run_mode = RunMode::EIGENVALUE;
83✔
421
  }
422
  read_attribute(file_id, "photon_transport", settings::photon_transport);
83✔
423
  read_dataset(file_id, "n_particles", settings::n_particles);
83✔
424
  int temp;
425
  read_dataset(file_id, "n_batches", temp);
83✔
426

427
  // Take maximum of statepoint n_batches and input n_batches
428
  settings::n_batches = std::max(settings::n_batches, temp);
83✔
429

430
  // Read batch number to restart at
431
  read_dataset(file_id, "current_batch", simulation::restart_batch);
83✔
432

433
  if (simulation::restart_batch >= settings::n_max_batches) {
83✔
434
    warning(fmt::format(
12✔
435
      "The number of batches specified for simulation ({}) is smaller "
436
      "than or equal to the number of batches in the restart statepoint file "
437
      "({})",
438
      settings::n_max_batches, simulation::restart_batch));
439
  }
440

441
  // Logical flag for source present in statepoint file
442
  bool source_present;
443
  read_attribute(file_id, "source_present", source_present);
83✔
444

445
  // Read information specific to eigenvalue run
446
  if (settings::run_mode == RunMode::EIGENVALUE) {
83✔
447
    read_dataset(file_id, "n_inactive", temp);
83✔
448
    read_eigenvalue_hdf5(file_id);
83✔
449

450
    // Take maximum of statepoint n_inactive and input n_inactive
451
    settings::n_inactive = std::max(settings::n_inactive, temp);
83✔
452

453
    // Check to make sure source bank is present
454
    if (settings::path_sourcepoint == settings::path_statepoint &&
166✔
455
        !source_present) {
83✔
UNCOV
456
      fatal_error("Source bank must be contained in statepoint restart file");
×
457
    }
458
  }
459

460
  // Read number of realizations for global tallies
461
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
83✔
462

463
  // Set k_sum, keff, and current_batch based on whether restart file is part
464
  // of active cycle or inactive cycle
465
  if (settings::run_mode == RunMode::EIGENVALUE) {
83✔
466
    restart_set_keff();
83✔
467
  }
468

469
  // Set current batch number
470
  simulation::current_batch = simulation::restart_batch;
83✔
471

472
  // Read tallies to master. If we are using Parallel HDF5, all processes
473
  // need to be included in the HDF5 calls.
474
#ifdef PHDF5
475
  if (true) {
476
#else
477
  if (mpi::master) {
35✔
478
#endif
479
    // Read global tally data
480
    read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
83✔
481
      false, simulation::global_tallies.data());
83✔
482

483
    // Check if tally results are present
484
    bool present;
485
    read_attribute(file_id, "tallies_present", present);
83✔
486

487
    // Read in sum and sum squared
488
    if (present) {
83✔
489
      hid_t tallies_group = open_group(file_id, "tallies");
83✔
490

491
      for (auto& tally : model::tallies) {
319✔
492
        // Read sum, sum_sq, and N for each bin
493
        std::string name = "tally " + std::to_string(tally->id_);
236✔
494
        hid_t tally_group = open_group(tallies_group, name.c_str());
236✔
495

496
        int internal = 0;
236✔
497
        if (attribute_exists(tally_group, "internal")) {
236✔
498
          read_attribute(tally_group, "internal", internal);
236✔
499
        }
500
        if (internal) {
236✔
UNCOV
501
          tally->writable_ = false;
×
502
        } else {
503
          auto& results = tally->results_;
236✔
504
          read_tally_results(tally_group, results.shape()[0],
472✔
505
            results.shape()[1], results.data());
236✔
506
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
236✔
507
          close_group(tally_group);
236✔
508
        }
509
      }
236✔
510
      close_group(tallies_group);
83✔
511
    }
512
  }
513

514
  // Read source if in eigenvalue mode
515
  if (settings::run_mode == RunMode::EIGENVALUE) {
83✔
516

517
    // Check if source was written out separately
518
    if (!source_present) {
83✔
519

520
      // Close statepoint file
UNCOV
521
      file_close(file_id);
×
522

523
      // Write message
524
      write_message(
×
UNCOV
525
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
526

527
      // Open source file
UNCOV
528
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
529
    }
530

531
    // Read source
532
    read_source_bank(file_id, simulation::source_bank, true);
83✔
533
  }
534

535
  // Close file
536
  file_close(file_id);
83✔
537

538
  return 0;
83✔
539
}
83✔
540

541
hid_t h5banktype()
5,039✔
542
{
543
  // Create compound type for position
544
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
5,039✔
545
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
5,039✔
546
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
5,039✔
547
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
5,039✔
548

549
  // Create bank datatype
550
  //
551
  // If you make changes to the compound datatype here, make sure you update:
552
  // - openmc/source.py
553
  // - openmc/statepoint.py
554
  // - docs/source/io_formats/statepoint.rst
555
  // - docs/source/io_formats/source.rst
556
  hid_t banktype = H5Tcreate(H5T_COMPOUND, sizeof(struct SourceSite));
5,039✔
557
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
5,039✔
558
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
5,039✔
559
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
5,039✔
560
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
5,039✔
561
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
5,039✔
562
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
5,039✔
563
    H5T_NATIVE_INT);
5,039✔
564
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
5,039✔
565
  H5Tinsert(
5,039✔
566
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
5,039✔
567

568
  H5Tclose(postype);
5,039✔
569
  return banktype;
5,039✔
570
}
571

572
void write_source_point(std::string filename, gsl::span<SourceSite> source_bank,
903✔
573
  const vector<int64_t>& bank_index, bool use_mcpl)
574
{
575
  std::string ext = use_mcpl ? "mcpl" : "h5";
903✔
576
  write_message("Creating source file {}.{} with {} particles ...", filename,
903✔
577
    ext, source_bank.size(), 5);
903✔
578

579
  // Dispatch to appropriate function based on file type
580
  if (use_mcpl) {
903✔
581
    write_mcpl_source_point(filename.c_str(), source_bank, bank_index);
17✔
582
  } else {
583
    write_h5_source_point(filename.c_str(), source_bank, bank_index);
886✔
584
  }
585
}
903✔
586

587
void write_h5_source_point(const char* filename,
886✔
588
  gsl::span<SourceSite> source_bank, const vector<int64_t>& bank_index)
589
{
590
  // When using parallel HDF5, the file is written to collectively by all
591
  // processes. With MPI-only, the file is opened and written by the master
592
  // (note that the call to write_source_bank is by all processes since slave
593
  // processes need to send source bank data to the master.
594
#ifdef PHDF5
595
  bool parallel = true;
422✔
596
#else
597
  bool parallel = false;
464✔
598
#endif
599

600
  if (!filename)
886✔
UNCOV
601
    fatal_error("write_source_point filename needs a nonempty name.");
×
602

603
  std::string filename_(filename);
886✔
604
  const auto extension = get_file_extension(filename_);
886✔
605
  if (extension == "") {
886✔
606
    filename_.append(".h5");
886✔
UNCOV
607
  } else if (extension != "h5") {
×
UNCOV
608
    warning("write_source_point was passed a file extension differing "
×
609
            "from .h5, but an hdf5 file will be written.");
610
  }
611

612
  hid_t file_id;
613
  if (mpi::master || parallel) {
886✔
614
    file_id = file_open(filename_.c_str(), 'w', true);
886✔
615
    write_attribute(file_id, "filetype", "source");
886✔
616
  }
617

618
  // Get pointer to source bank and write to file
619
  write_source_bank(file_id, source_bank, bank_index);
886✔
620

621
  if (mpi::master || parallel)
886✔
622
    file_close(file_id);
886✔
623
}
886✔
624

625
void write_source_bank(hid_t group_id, gsl::span<SourceSite> source_bank,
4,876✔
626
  const vector<int64_t>& bank_index)
627
{
628
  hid_t banktype = h5banktype();
4,876✔
629

630
  // Set total and individual process dataspace sizes for source bank
631
  int64_t dims_size = bank_index.back();
4,876✔
632
  int64_t count_size = bank_index[mpi::rank + 1] - bank_index[mpi::rank];
4,876✔
633

634
#ifdef PHDF5
635
  // Set size of total dataspace for all procs and rank
636
  hsize_t dims[] {static_cast<hsize_t>(dims_size)};
2,659✔
637
  hid_t dspace = H5Screate_simple(1, dims, nullptr);
2,659✔
638
  hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace, H5P_DEFAULT,
2,659✔
639
    H5P_DEFAULT, H5P_DEFAULT);
640

641
  // Create another data space but for each proc individually
642
  hsize_t count[] {static_cast<hsize_t>(count_size)};
2,659✔
643
  hid_t memspace = H5Screate_simple(1, count, nullptr);
2,659✔
644

645
  // Select hyperslab for this dataspace
646
  hsize_t start[] {static_cast<hsize_t>(bank_index[mpi::rank])};
2,659✔
647
  H5Sselect_hyperslab(dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
2,659✔
648

649
  // Set up the property list for parallel writing
650
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
2,659✔
651
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
2,659✔
652

653
  // Write data to file in parallel
654
  H5Dwrite(dset, banktype, memspace, dspace, plist, source_bank.data());
2,659✔
655

656
  // Free resources
657
  H5Sclose(dspace);
2,659✔
658
  H5Sclose(memspace);
2,659✔
659
  H5Dclose(dset);
2,659✔
660
  H5Pclose(plist);
2,659✔
661

662
#else
663

664
  if (mpi::master) {
2,217✔
665
    // Create dataset big enough to hold all source sites
666
    hsize_t dims[] {static_cast<hsize_t>(dims_size)};
2,217✔
667
    hid_t dspace = H5Screate_simple(1, dims, nullptr);
2,217✔
668
    hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace,
2,217✔
669
      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
670

671
    // Save source bank sites since the array is overwritten below
672
#ifdef OPENMC_MPI
673
    vector<SourceSite> temp_source {source_bank.begin(), source_bank.end()};
674
#endif
675

676
    for (int i = 0; i < mpi::n_procs; ++i) {
4,434✔
677
      // Create memory space
678
      hsize_t count[] {static_cast<hsize_t>(bank_index[i + 1] - bank_index[i])};
2,217✔
679
      hid_t memspace = H5Screate_simple(1, count, nullptr);
2,217✔
680

681
#ifdef OPENMC_MPI
682
      // Receive source sites from other processes
683
      if (i > 0)
684
        MPI_Recv(source_bank.data(), count[0], mpi::source_site, i, i,
685
          mpi::intracomm, MPI_STATUS_IGNORE);
686
#endif
687

688
      // Select hyperslab for this dataspace
689
      dspace = H5Dget_space(dset);
2,217✔
690
      hsize_t start[] {static_cast<hsize_t>(bank_index[i])};
2,217✔
691
      H5Sselect_hyperslab(
2,217✔
692
        dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
693

694
      // Write data to hyperslab
695
      H5Dwrite(
2,217✔
696
        dset, banktype, memspace, dspace, H5P_DEFAULT, source_bank.data());
2,217✔
697

698
      H5Sclose(memspace);
2,217✔
699
      H5Sclose(dspace);
2,217✔
700
    }
701

702
    // Close all ids
703
    H5Dclose(dset);
2,217✔
704

705
#ifdef OPENMC_MPI
706
    // Restore state of source bank
707
    std::copy(temp_source.begin(), temp_source.end(), source_bank.begin());
708
#endif
709
  } else {
710
#ifdef OPENMC_MPI
711
    MPI_Send(source_bank.data(), count_size, mpi::source_site, 0, mpi::rank,
712
      mpi::intracomm);
713
#endif
714
  }
715
#endif
716

717
  H5Tclose(banktype);
4,876✔
718
}
4,876✔
719

720
// Determine member names of a compound HDF5 datatype
721
std::string dtype_member_names(hid_t dtype_id)
326✔
722
{
723
  int nmembers = H5Tget_nmembers(dtype_id);
326✔
724
  std::string names;
326✔
725
  for (int i = 0; i < nmembers; i++) {
2,884✔
726
    char* name = H5Tget_member_name(dtype_id, i);
2,558✔
727
    names = names.append(name);
2,558✔
728
    H5free_memory(name);
2,558✔
729
    if (i < nmembers - 1)
2,558✔
730
      names += ", ";
2,232✔
731
  }
732
  return names;
326✔
UNCOV
733
}
×
734

735
void read_source_bank(
163✔
736
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
737
{
738
  hid_t banktype = h5banktype();
163✔
739

740
  // Open the dataset
741
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
163✔
742

743
  // Make sure number of members matches
744
  hid_t dtype = H5Dget_type(dset);
163✔
745
  auto file_member_names = dtype_member_names(dtype);
163✔
746
  auto bank_member_names = dtype_member_names(banktype);
163✔
747
  if (file_member_names != bank_member_names) {
163✔
748
    fatal_error(fmt::format(
10✔
749
      "Source site attributes in file do not match what is "
750
      "expected for this version of OpenMC. File attributes = ({}). Expected "
751
      "attributes = ({})",
752
      file_member_names, bank_member_names));
753
  }
754

755
  hid_t dspace = H5Dget_space(dset);
153✔
756
  hsize_t n_sites;
757
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
153✔
758

759
  // Make sure vector is big enough in case where we're reading entire source on
760
  // each process
761
  if (!distribute)
153✔
762
    sites.resize(n_sites);
70✔
763

764
  hid_t memspace;
765
  if (distribute) {
153✔
766
    if (simulation::work_index[mpi::n_procs] > n_sites) {
83✔
UNCOV
767
      fatal_error("Number of source sites in source file is less "
×
768
                  "than number of source particles per generation.");
769
    }
770

771
    // Create another data space but for each proc individually
772
    hsize_t n_sites_local = simulation::work_per_rank;
83✔
773
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
83✔
774

775
    // Select hyperslab for each process
776
    hsize_t offset = simulation::work_index[mpi::rank];
83✔
777
    H5Sselect_hyperslab(
83✔
778
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
779
  } else {
780
    memspace = H5S_ALL;
70✔
781
  }
782

783
#ifdef PHDF5
784
  // Read data in parallel
785
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
83✔
786
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
83✔
787
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
83✔
788
  H5Pclose(plist);
83✔
789
#else
790
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
70✔
791
#endif
792

793
  // Close all ids
794
  H5Sclose(dspace);
153✔
795
  if (distribute)
153✔
796
    H5Sclose(memspace);
83✔
797
  H5Dclose(dset);
153✔
798
  H5Tclose(banktype);
153✔
799
}
153✔
800

801
void write_unstructured_mesh_results()
2,081✔
802
{
803

804
  for (auto& tally : model::tallies) {
10,504✔
805

806
    vector<std::string> tally_scores;
8,423✔
807
    for (auto filter_idx : tally->filters()) {
24,479✔
808
      auto& filter = model::tally_filters[filter_idx];
16,056✔
809
      if (filter->type() != FilterType::MESH)
16,056✔
810
        continue;
16,041✔
811

812
      // check if the filter uses an unstructured mesh
813
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
2,159✔
814
      auto mesh_idx = mesh_filter->mesh();
2,159✔
815
      auto umesh =
816
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
2,159✔
817

818
      if (!umesh)
2,159✔
819
        continue;
2,126✔
820

821
      if (!umesh->output_)
33✔
UNCOV
822
        continue;
×
823

824
      if (umesh->library() == "moab") {
33✔
825
        if (mpi::master)
18✔
826
          warning(fmt::format(
8✔
827
            "Output for a MOAB mesh (mesh {}) was "
828
            "requested but will not be written. Please use the Python "
829
            "API to generated the desired VTK tetrahedral mesh.",
830
            umesh->id_));
8✔
831
        continue;
18✔
832
      }
833

834
      // if this tally has more than one filter, print
835
      // warning and skip writing the mesh
836
      if (tally->filters().size() > 1) {
15✔
UNCOV
837
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
838
                            "{}. More than one filter is present on the tally.",
UNCOV
839
          tally->id_));
×
UNCOV
840
        break;
×
841
      }
842

843
      int n_realizations = tally->n_realizations_;
15✔
844

845
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
846
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
847
          // combine the score and nuclide into a name for the value
848
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30✔
849
            tally->nuclide_name(nuc_idx));
30✔
850
          // add this score to the mesh
851
          // (this is in a separate loop because all variables need to be added
852
          //  to libMesh's equation system before any are initialized, which
853
          //  happens in set_score_data)
854
          umesh->add_score(score_str);
15✔
855
        }
15✔
856
      }
857

858
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
859
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
860
          // combine the score and nuclide into a name for the value
861
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30✔
862
            tally->nuclide_name(nuc_idx));
30✔
863

864
          // index for this nuclide and score
865
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
15✔
866

867
          // construct result vectors
868
          vector<double> mean_vec(umesh->n_bins()),
15✔
869
            std_dev_vec(umesh->n_bins());
15✔
870
          for (int j = 0; j < tally->results_.shape()[0]; j++) {
146,799✔
871
            // get the volume for this bin
872
            double volume = umesh->volume(j);
146,784✔
873
            // compute the mean
874
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
146,784✔
875
                          n_realizations;
146,784✔
876
            mean_vec.at(j) = mean / volume;
146,784✔
877

878
            // compute the standard deviation
879
            double sum_sq =
880
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
146,784✔
881
            double std_dev {0.0};
146,784✔
882
            if (n_realizations > 1) {
146,784✔
883
              std_dev = sum_sq / n_realizations - mean * mean;
146,784✔
884
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
146,784✔
885
            }
886
            std_dev_vec[j] = std_dev / volume;
146,784✔
887
          }
888
#ifdef OPENMC_MPI
889
          MPI_Bcast(
10✔
890
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
10✔
891
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
10✔
892
            mpi::intracomm);
893
#endif
894
          // set the data for this score
895
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
15✔
896
        }
15✔
897
      }
898

899
      // Generate a file name based on the tally id
900
      // and the current batch number
901
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
15✔
902
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
15✔
UNCOV
903
        simulation::current_batch, batch_width);
×
904

905
      // Write the unstructured mesh and data to file
906
      umesh->write(filename);
15✔
907

908
      // remove score data added for this mesh write
909
      umesh->remove_scores();
15✔
910
    }
15✔
911
  }
8,423✔
912
}
2,081✔
913

UNCOV
914
void write_tally_results_nr(hid_t file_id)
×
915
{
916
  // ==========================================================================
917
  // COLLECT AND WRITE GLOBAL TALLIES
918

919
  hid_t tallies_group;
UNCOV
920
  if (mpi::master) {
×
921
    // Write number of realizations
UNCOV
922
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
×
923

UNCOV
924
    tallies_group = open_group(file_id, "tallies");
×
925
  }
926

927
  // Get global tallies
UNCOV
928
  auto& gt = simulation::global_tallies;
×
929

930
#ifdef OPENMC_MPI
931
  // Reduce global tallies
932
  xt::xtensor<double, 2> gt_reduced = xt::empty_like(gt);
933
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
934
    mpi::intracomm);
935

936
  // Transfer values to value on master
937
  if (mpi::master) {
938
    if (simulation::current_batch == settings::n_max_batches ||
939
        simulation::satisfy_triggers) {
940
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
941
    }
942
  }
943
#endif
944

945
  // Write out global tallies sum and sum_sq
946
  if (mpi::master) {
×
947
    write_dataset(file_id, "global_tallies", gt);
×
948
  }
949

UNCOV
950
  for (const auto& t : model::tallies) {
×
951
    // Skip any tallies that are not active
UNCOV
952
    if (!t->active_)
×
953
      continue;
×
UNCOV
954
    if (!t->writable_)
×
955
      continue;
×
956

UNCOV
957
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
×
UNCOV
958
      write_attribute(file_id, "tallies_present", 1);
×
959
    }
960

961
    // Get view of accumulated tally values
UNCOV
962
    auto values_view = xt::view(t->results_, xt::all(), xt::all(),
×
UNCOV
963
      xt::range(static_cast<int>(TallyResult::SUM),
×
964
        static_cast<int>(TallyResult::SUM_SQ) + 1));
965

966
    // Make copy of tally values in contiguous array
967
    xt::xtensor<double, 3> values = values_view;
×
968

969
    if (mpi::master) {
×
970
      // Open group for tally
UNCOV
971
      std::string groupname {"tally " + std::to_string(t->id_)};
×
UNCOV
972
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
×
973

974
      // The MPI_IN_PLACE specifier allows the master to copy values into
975
      // a receive buffer without having a temporary variable
976
#ifdef OPENMC_MPI
977
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
978
        MPI_SUM, 0, mpi::intracomm);
979
#endif
980

981
      // At the end of the simulation, store the results back in the
982
      // regular TallyResults array
983
      if (simulation::current_batch == settings::n_max_batches ||
×
984
          simulation::satisfy_triggers) {
UNCOV
985
        values_view = values;
×
986
      }
987

988
      // Put in temporary tally result
UNCOV
989
      xt::xtensor<double, 3> results_copy = xt::zeros_like(t->results_);
×
UNCOV
990
      auto copy_view = xt::view(results_copy, xt::all(), xt::all(),
×
UNCOV
991
        xt::range(static_cast<int>(TallyResult::SUM),
×
992
          static_cast<int>(TallyResult::SUM_SQ) + 1));
993
      copy_view = values;
×
994

995
      // Write reduced tally results to file
996
      auto shape = results_copy.shape();
×
UNCOV
997
      write_tally_results(tally_group, shape[0], shape[1], results_copy.data());
×
998

999
      close_group(tally_group);
×
UNCOV
1000
    } else {
×
1001
      // Receive buffer not significant at other processors
1002
#ifdef OPENMC_MPI
1003
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
1004
        mpi::intracomm);
1005
#endif
1006
    }
1007
  }
1008

UNCOV
1009
  if (mpi::master) {
×
UNCOV
1010
    if (!object_exists(file_id, "tallies_present")) {
×
1011
      // Indicate that tallies are off
UNCOV
1012
      write_dataset(file_id, "tallies_present", 0);
×
1013
    }
1014

UNCOV
1015
    close_group(tallies_group);
×
1016
  }
1017
}
1018

1019
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc