• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 9617074623

21 Jun 2024 04:53PM UTC coverage: 84.715% (+0.03%) from 84.688%
9617074623

Pull #3042

github

web-flow
Merge 47e50a3a9 into 4bd0b09e6
Pull Request #3042: Rely on std::filesystem for file_utils

26 of 31 new or added lines in 5 files covered. (83.87%)

921 existing lines in 34 files now uncovered.

48900 of 57723 relevant lines covered (84.71%)

31496763.14 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.21
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "xtensor/xbuilder.hpp" // for empty_like
8
#include "xtensor/xview.hpp"
9
#include <fmt/core.h>
10

11
#include "openmc/bank.h"
12
#include "openmc/capi.h"
13
#include "openmc/constants.h"
14
#include "openmc/eigenvalue.h"
15
#include "openmc/error.h"
16
#include "openmc/file_utils.h"
17
#include "openmc/hdf5_interface.h"
18
#include "openmc/mesh.h"
19
#include "openmc/message_passing.h"
20
#include "openmc/mgxs_interface.h"
21
#include "openmc/nuclide.h"
22
#include "openmc/output.h"
23
#include "openmc/settings.h"
24
#include "openmc/simulation.h"
25
#include "openmc/tallies/derivative.h"
26
#include "openmc/tallies/filter.h"
27
#include "openmc/tallies/filter_mesh.h"
28
#include "openmc/tallies/tally.h"
29
#include "openmc/timer.h"
30
#include "openmc/vector.h"
31

32
namespace openmc {
33

34
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
6,592✔
35
{
36
  simulation::time_statepoint.start();
6,592✔
37

38
  // If a nullptr is passed in, we assume that the user
39
  // wants a default name for this, of the form like output/statepoint.20.h5
40
  std::string filename_;
6,592✔
41
  if (filename) {
6,592✔
42
    filename_ = filename;
998✔
43
  } else {
44
    // Determine width for zero padding
45
    int w = std::to_string(settings::n_max_batches).size();
5,594✔
46

47
    // Set filename for state point
48
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
11,188✔
49
      simulation::current_batch, w);
5,594✔
50
  }
51

52
  // If a file name was specified, ensure it has .h5 file extension
53
  const auto extension = get_file_extension(filename_);
6,592✔
54
  if (extension != "h5") {
6,592✔
55
    warning("openmc_statepoint_write was passed a file extension differing "
×
56
            "from .h5, but an hdf5 file will be written.");
57
  }
58

59
  // Determine whether or not to write the source bank
60
  bool write_source_ = write_source ? *write_source : true;
6,592✔
61

62
  // Write message
63
  write_message("Creating state point " + filename_ + "...", 5);
6,592✔
64

65
  hid_t file_id;
66
  if (mpi::master) {
6,592✔
67
    // Create statepoint file
68
    file_id = file_open(filename_, 'w');
5,749✔
69

70
    // Write file type
71
    write_attribute(file_id, "filetype", "statepoint");
5,749✔
72

73
    // Write revision number for state point file
74
    write_attribute(file_id, "version", VERSION_STATEPOINT);
5,749✔
75

76
    // Write OpenMC version
77
    write_attribute(file_id, "openmc_version", VERSION);
5,749✔
78
#ifdef GIT_SHA1
79
    write_attribute(file_id, "git_sha1", GIT_SHA1);
5,749✔
80
#endif
81

82
    // Write current date and time
83
    write_attribute(file_id, "date_and_time", time_stamp());
5,749✔
84

85
    // Write path to input
86
    write_attribute(file_id, "path", settings::path_input);
5,749✔
87

88
    // Write out random number seed
89
    write_dataset(file_id, "seed", openmc_get_seed());
5,749✔
90

91
    // Write run information
92
    write_dataset(file_id, "energy_mode",
5,749✔
93
      settings::run_CE ? "continuous-energy" : "multi-group");
94
    switch (settings::run_mode) {
5,749✔
95
    case RunMode::FIXED_SOURCE:
1,804✔
96
      write_dataset(file_id, "run_mode", "fixed source");
1,804✔
97
      break;
1,804✔
98
    case RunMode::EIGENVALUE:
3,945✔
99
      write_dataset(file_id, "run_mode", "eigenvalue");
3,945✔
100
      break;
3,945✔
101
    default:
×
102
      break;
×
103
    }
104
    write_attribute(file_id, "photon_transport", settings::photon_transport);
5,749✔
105
    write_dataset(file_id, "n_particles", settings::n_particles);
5,749✔
106
    write_dataset(file_id, "n_batches", settings::n_batches);
5,749✔
107

108
    // Write out current batch number
109
    write_dataset(file_id, "current_batch", simulation::current_batch);
5,749✔
110

111
    // Indicate whether source bank is stored in statepoint
112
    write_attribute(file_id, "source_present", write_source_);
5,749✔
113

114
    // Write out information for eigenvalue run
115
    if (settings::run_mode == RunMode::EIGENVALUE)
5,749✔
116
      write_eigenvalue_hdf5(file_id);
3,945✔
117

118
    hid_t tallies_group = create_group(file_id, "tallies");
5,749✔
119

120
    // Write meshes
121
    meshes_to_hdf5(tallies_group);
5,749✔
122

123
    // Write information for derivatives
124
    if (!model::tally_derivs.empty()) {
5,749✔
125
      hid_t derivs_group = create_group(tallies_group, "derivatives");
12✔
126
      for (const auto& deriv : model::tally_derivs) {
72✔
127
        hid_t deriv_group =
128
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
60✔
129
        write_dataset(deriv_group, "material", deriv.diff_material);
60✔
130
        if (deriv.variable == DerivativeVariable::DENSITY) {
60✔
131
          write_dataset(deriv_group, "independent variable", "density");
24✔
132
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
36✔
133
          write_dataset(deriv_group, "independent variable", "nuclide_density");
24✔
134
          write_dataset(
24✔
135
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
24✔
136
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
12✔
137
          write_dataset(deriv_group, "independent variable", "temperature");
12✔
138
        } else {
139
          fatal_error("Independent variable for derivative " +
×
140
                      std::to_string(deriv.id) +
×
141
                      " not defined in state_point.cpp");
142
        }
143
        close_group(deriv_group);
60✔
144
      }
145
      close_group(derivs_group);
12✔
146
    }
147

148
    // Write information for filters
149
    hid_t filters_group = create_group(tallies_group, "filters");
5,749✔
150
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
5,749✔
151
    if (!model::tally_filters.empty()) {
5,749✔
152
      // Write filter IDs
153
      vector<int32_t> filter_ids;
3,660✔
154
      filter_ids.reserve(model::tally_filters.size());
3,660✔
155
      for (const auto& filt : model::tally_filters)
12,664✔
156
        filter_ids.push_back(filt->id());
9,004✔
157
      write_attribute(filters_group, "ids", filter_ids);
3,660✔
158

159
      // Write info for each filter
160
      for (const auto& filt : model::tally_filters) {
12,664✔
161
        hid_t filter_group =
162
          create_group(filters_group, "filter " + std::to_string(filt->id()));
9,004✔
163
        filt->to_statepoint(filter_group);
9,004✔
164
        close_group(filter_group);
9,004✔
165
      }
166
    }
3,660✔
167
    close_group(filters_group);
5,749✔
168

169
    // Write information for tallies
170
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
5,749✔
171
    if (!model::tallies.empty()) {
5,749✔
172
      // Write tally IDs
173
      vector<int32_t> tally_ids;
4,092✔
174
      tally_ids.reserve(model::tallies.size());
4,092✔
175
      for (const auto& tally : model::tallies)
25,106✔
176
        tally_ids.push_back(tally->id_);
21,014✔
177
      write_attribute(tallies_group, "ids", tally_ids);
4,092✔
178

179
      // Write all tally information except results
180
      for (const auto& tally : model::tallies) {
25,106✔
181
        hid_t tally_group =
182
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
21,014✔
183

184
        write_dataset(tally_group, "name", tally->name_);
21,014✔
185

186
        if (tally->writable_) {
21,014✔
187
          write_attribute(tally_group, "internal", 0);
19,892✔
188
        } else {
189
          write_attribute(tally_group, "internal", 1);
1,122✔
190
          close_group(tally_group);
1,122✔
191
          continue;
1,122✔
192
        }
193

194
        if (tally->multiply_density()) {
19,892✔
195
          write_attribute(tally_group, "multiply_density", 1);
19,856✔
196
        } else {
197
          write_attribute(tally_group, "multiply_density", 0);
36✔
198
        }
199

200
        if (tally->estimator_ == TallyEstimator::ANALOG) {
19,892✔
201
          write_dataset(tally_group, "estimator", "analog");
7,617✔
202
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
12,275✔
203
          write_dataset(tally_group, "estimator", "tracklength");
11,599✔
204
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
676✔
205
          write_dataset(tally_group, "estimator", "collision");
676✔
206
        }
207

208
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
19,892✔
209

210
        // Write the ID of each filter attached to this tally
211
        write_dataset(tally_group, "n_filters", tally->filters().size());
19,892✔
212
        if (!tally->filters().empty()) {
19,892✔
213
          vector<int32_t> filter_ids;
18,884✔
214
          filter_ids.reserve(tally->filters().size());
18,884✔
215
          for (auto i_filt : tally->filters())
57,070✔
216
            filter_ids.push_back(model::tally_filters[i_filt]->id());
38,186✔
217
          write_dataset(tally_group, "filters", filter_ids);
18,884✔
218
        }
18,884✔
219

220
        // Write the nuclides this tally scores
221
        vector<std::string> nuclides;
19,892✔
222
        for (auto i_nuclide : tally->nuclides_) {
48,220✔
223
          if (i_nuclide == -1) {
28,328✔
224
            nuclides.push_back("total");
17,060✔
225
          } else {
226
            if (settings::run_CE) {
11,268✔
227
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
11,148✔
228
            } else {
229
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
120✔
230
            }
231
          }
232
        }
233
        write_dataset(tally_group, "nuclides", nuclides);
19,892✔
234

235
        if (tally->deriv_ != C_NONE)
19,892✔
236
          write_dataset(
240✔
237
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
240✔
238

239
        // Write the tally score bins
240
        vector<std::string> scores;
19,892✔
241
        for (auto sc : tally->scores_)
47,840✔
242
          scores.push_back(reaction_name(sc));
27,948✔
243
        write_dataset(tally_group, "n_score_bins", scores.size());
19,892✔
244
        write_dataset(tally_group, "score_bins", scores);
19,892✔
245

246
        close_group(tally_group);
19,892✔
247
      }
19,892✔
248
    }
4,092✔
249

250
    if (settings::reduce_tallies) {
5,749✔
251
      // Write global tallies
252
      write_dataset(file_id, "global_tallies", simulation::global_tallies);
5,749✔
253

254
      // Write tallies
255
      if (model::active_tallies.size() > 0) {
5,749✔
256
        // Indicate that tallies are on
257
        write_attribute(file_id, "tallies_present", 1);
3,840✔
258

259
        // Write all tally results
260
        for (const auto& tally : model::tallies) {
24,602✔
261
          if (!tally->writable_)
20,762✔
262
            continue;
870✔
263
          // Write sum and sum_sq for each bin
264
          std::string name = "tally " + std::to_string(tally->id_);
19,892✔
265
          hid_t tally_group = open_group(tallies_group, name.c_str());
19,892✔
266
          auto& results = tally->results_;
19,892✔
267
          write_tally_results(tally_group, results.shape()[0],
19,892✔
268
            results.shape()[1], results.data());
19,892✔
269
          close_group(tally_group);
19,892✔
270
        }
19,892✔
271
      } else {
272
        // Indicate tallies are off
273
        write_attribute(file_id, "tallies_present", 0);
1,909✔
274
      }
275
    }
276

277
    close_group(tallies_group);
5,749✔
278
  }
279

280
  // Check for the no-tally-reduction method
281
  if (!settings::reduce_tallies) {
6,592✔
282
    // If using the no-tally-reduction method, we need to collect tally
283
    // results before writing them to the state point file.
284
    write_tally_results_nr(file_id);
×
285

286
  } else if (mpi::master) {
6,592✔
287
    // Write number of global realizations
288
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
5,749✔
289
  }
290

291
  if (mpi::master) {
6,592✔
292
    // Write out the runtime metrics.
293
    using namespace simulation;
294
    hid_t runtime_group = create_group(file_id, "runtime");
5,749✔
295
    write_dataset(
5,749✔
296
      runtime_group, "total initialization", time_initialize.elapsed());
297
    write_dataset(
5,749✔
298
      runtime_group, "reading cross sections", time_read_xs.elapsed());
299
    write_dataset(runtime_group, "simulation",
5,749✔
300
      time_inactive.elapsed() + time_active.elapsed());
5,749✔
301
    write_dataset(runtime_group, "transport", time_transport.elapsed());
5,749✔
302
    if (settings::run_mode == RunMode::EIGENVALUE) {
5,749✔
303
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
3,945✔
304
    }
305
    write_dataset(runtime_group, "active batches", time_active.elapsed());
5,749✔
306
    if (settings::run_mode == RunMode::EIGENVALUE) {
5,749✔
307
      write_dataset(
3,945✔
308
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
309
      write_dataset(
3,945✔
310
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
311
      write_dataset(
3,945✔
312
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
313
    }
314
    write_dataset(
5,749✔
315
      runtime_group, "accumulating tallies", time_tallies.elapsed());
316
    write_dataset(runtime_group, "total", time_total.elapsed());
5,749✔
317
    write_dataset(
5,749✔
318
      runtime_group, "writing statepoints", time_statepoint.elapsed());
319
    close_group(runtime_group);
5,749✔
320

321
    file_close(file_id);
5,749✔
322
  }
323

324
#ifdef PHDF5
325
  bool parallel = true;
3,452✔
326
#else
327
  bool parallel = false;
3,140✔
328
#endif
329

330
  // Write the source bank if desired
331
  if (write_source_) {
6,592✔
332
    if (mpi::master || parallel)
3,830✔
333
      file_id = file_open(filename_, 'a', true);
3,830✔
334
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
3,830✔
335
    if (mpi::master || parallel)
3,830✔
336
      file_close(file_id);
3,830✔
337
  }
338

339
#if defined(LIBMESH) || defined(DAGMC)
340
  // write unstructured mesh tally files
341
  write_unstructured_mesh_results();
1,944✔
342
#endif
343

344
  simulation::time_statepoint.stop();
6,592✔
345

346
  return 0;
6,592✔
347
}
6,592✔
348

349
void restart_set_keff()
78✔
350
{
351
  if (simulation::restart_batch > settings::n_inactive) {
78✔
352
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
381✔
353
      simulation::k_sum[0] += simulation::k_generation[i];
303✔
354
      simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
303✔
355
    }
356
    int n = settings::gen_per_batch * simulation::n_realizations;
78✔
357
    simulation::keff = simulation::k_sum[0] / n;
78✔
358
  } else {
359
    simulation::keff = simulation::k_generation.back();
×
360
  }
361
}
78✔
362

363
void load_state_point()
78✔
364
{
365
  write_message(
78✔
366
    fmt::format("Loading state point {}...", settings::path_statepoint_c), 5);
156✔
367
  openmc_statepoint_load(settings::path_statepoint.c_str());
78✔
368
}
78✔
369

370
void statepoint_version_check(hid_t file_id)
78✔
371
{
372
  // Read revision number for state point file and make sure it matches with
373
  // current version
374
  array<int, 2> version_array;
375
  read_attribute(file_id, "version", version_array);
78✔
376
  if (version_array != VERSION_STATEPOINT) {
78✔
377
    fatal_error(
×
378
      "State point version does not match current version in OpenMC.");
379
  }
380
}
78✔
381

382
extern "C" int openmc_statepoint_load(const char* filename)
78✔
383
{
384
  // Open file for reading
385
  hid_t file_id = file_open(filename, 'r', true);
78✔
386

387
  // Read filetype
388
  std::string word;
78✔
389
  read_attribute(file_id, "filetype", word);
78✔
390
  if (word != "statepoint") {
78✔
391
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
392
  }
393

394
  statepoint_version_check(file_id);
78✔
395

396
  // Read and overwrite random number seed
397
  int64_t seed;
398
  read_dataset(file_id, "seed", seed);
78✔
399
  openmc_set_seed(seed);
78✔
400

401
  // It is not impossible for a state point to be generated from a CE run but
402
  // to be loaded in to an MG run (or vice versa), check to prevent that.
403
  read_dataset(file_id, "energy_mode", word);
78✔
404
  if (word == "multi-group" && settings::run_CE) {
78✔
405
    fatal_error("State point file is from multigroup run but current run is "
×
406
                "continous energy.");
407
  } else if (word == "continuous-energy" && !settings::run_CE) {
78✔
408
    fatal_error("State point file is from continuous-energy run but current "
×
409
                "run is multigroup!");
410
  }
411

412
  // Read and overwrite run information except number of batches
413
  read_dataset(file_id, "run_mode", word);
78✔
414
  if (word == "fixed source") {
78✔
415
    settings::run_mode = RunMode::FIXED_SOURCE;
×
416
  } else if (word == "eigenvalue") {
78✔
417
    settings::run_mode = RunMode::EIGENVALUE;
78✔
418
  }
419
  read_attribute(file_id, "photon_transport", settings::photon_transport);
78✔
420
  read_dataset(file_id, "n_particles", settings::n_particles);
78✔
421
  int temp;
422
  read_dataset(file_id, "n_batches", temp);
78✔
423

424
  // Take maximum of statepoint n_batches and input n_batches
425
  settings::n_batches = std::max(settings::n_batches, temp);
78✔
426

427
  // Read batch number to restart at
428
  read_dataset(file_id, "current_batch", simulation::restart_batch);
78✔
429

430
  if (simulation::restart_batch >= settings::n_max_batches) {
78✔
431
    warning(fmt::format(
24✔
432
      "The number of batches specified for simulation ({}) is smaller "
433
      "than or equal to the number of batches in the restart statepoint file "
434
      "({})",
435
      settings::n_max_batches, simulation::restart_batch));
436
  }
437

438
  // Logical flag for source present in statepoint file
439
  bool source_present;
440
  read_attribute(file_id, "source_present", source_present);
78✔
441

442
  // Read information specific to eigenvalue run
443
  if (settings::run_mode == RunMode::EIGENVALUE) {
78✔
444
    read_dataset(file_id, "n_inactive", temp);
78✔
445
    read_eigenvalue_hdf5(file_id);
78✔
446

447
    // Take maximum of statepoint n_inactive and input n_inactive
448
    settings::n_inactive = std::max(settings::n_inactive, temp);
78✔
449

450
    // Check to make sure source bank is present
451
    if (settings::path_sourcepoint == settings::path_statepoint &&
156✔
452
        !source_present) {
78✔
453
      fatal_error("Source bank must be contained in statepoint restart file");
×
454
    }
455
  }
456

457
  // Read number of realizations for global tallies
458
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
78✔
459

460
  // Set k_sum, keff, and current_batch based on whether restart file is part
461
  // of active cycle or inactive cycle
462
  if (settings::run_mode == RunMode::EIGENVALUE) {
78✔
463
    restart_set_keff();
78✔
464
  }
465

466
  // Set current batch number
467
  simulation::current_batch = simulation::restart_batch;
78✔
468

469
  // Read tallies to master. If we are using Parallel HDF5, all processes
470
  // need to be included in the HDF5 calls.
471
#ifdef PHDF5
472
  if (true) {
473
#else
474
  if (mpi::master) {
35✔
475
#endif
476
    // Read global tally data
477
    read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
78✔
478
      false, simulation::global_tallies.data());
78✔
479

480
    // Check if tally results are present
481
    bool present;
482
    read_attribute(file_id, "tallies_present", present);
78✔
483

484
    // Read in sum and sum squared
485
    if (present) {
78✔
486
      hid_t tallies_group = open_group(file_id, "tallies");
78✔
487

488
      for (auto& tally : model::tallies) {
289✔
489
        // Read sum, sum_sq, and N for each bin
490
        std::string name = "tally " + std::to_string(tally->id_);
211✔
491
        hid_t tally_group = open_group(tallies_group, name.c_str());
211✔
492

493
        int internal = 0;
211✔
494
        if (attribute_exists(tally_group, "internal")) {
211✔
495
          read_attribute(tally_group, "internal", internal);
211✔
496
        }
497
        if (internal) {
211✔
498
          tally->writable_ = false;
×
499
        } else {
500
          auto& results = tally->results_;
211✔
501
          read_tally_results(tally_group, results.shape()[0],
422✔
502
            results.shape()[1], results.data());
211✔
503
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
211✔
504
          close_group(tally_group);
211✔
505
        }
506
      }
211✔
507
      close_group(tallies_group);
78✔
508
    }
509
  }
510

511
  // Read source if in eigenvalue mode
512
  if (settings::run_mode == RunMode::EIGENVALUE) {
78✔
513

514
    // Check if source was written out separately
515
    if (!source_present) {
78✔
516

517
      // Close statepoint file
518
      file_close(file_id);
×
519

520
      // Write message
521
      write_message(
×
522
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
523

524
      // Open source file
525
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
526
    }
527

528
    // Read source
529
    read_source_bank(file_id, simulation::source_bank, true);
78✔
530
  }
531

532
  // Close file
533
  file_close(file_id);
78✔
534

535
  return 0;
78✔
536
}
78✔
537

538
hid_t h5banktype()
4,502✔
539
{
540
  // Create compound type for position
541
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
4,502✔
542
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
4,502✔
543
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
4,502✔
544
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
4,502✔
545

546
  // Create bank datatype
547
  //
548
  // If you make changes to the compound datatype here, make sure you update:
549
  // - openmc/source.py
550
  // - openmc/statepoint.py
551
  // - docs/source/io_formats/statepoint.rst
552
  // - docs/source/io_formats/source.rst
553
  hid_t banktype = H5Tcreate(H5T_COMPOUND, sizeof(struct SourceSite));
4,502✔
554
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
4,502✔
555
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
4,502✔
556
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
4,502✔
557
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
4,502✔
558
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
4,502✔
559
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
4,502✔
560
    H5T_NATIVE_INT);
4,502✔
561
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
4,502✔
562
  H5Tinsert(
4,502✔
563
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
4,502✔
564

565
  H5Tclose(postype);
4,502✔
566
  return banktype;
4,502✔
567
}
568

569
void write_source_point(const char* filename, gsl::span<SourceSite> source_bank,
514✔
570
  const vector<int64_t>& bank_index)
571
{
572
  // When using parallel HDF5, the file is written to collectively by all
573
  // processes. With MPI-only, the file is opened and written by the master
574
  // (note that the call to write_source_bank is by all processes since slave
575
  // processes need to send source bank data to the master.
576
#ifdef PHDF5
577
  bool parallel = true;
267✔
578
#else
579
  bool parallel = false;
247✔
580
#endif
581

582
  if (!filename)
514✔
583
    fatal_error("write_source_point filename needs a nonempty name.");
×
584

585
  std::string filename_(filename);
514✔
586
  const auto extension = get_file_extension(filename_);
514✔
587
  if (extension != "h5") {
514✔
UNCOV
588
    warning("write_source_point was passed a file extension differing "
×
589
            "from .h5, but an hdf5 file will be written.");
590
  }
591

592
  hid_t file_id;
593
  if (mpi::master || parallel) {
514✔
594
    file_id = file_open(filename_.c_str(), 'w', true);
514✔
595
    write_attribute(file_id, "filetype", "source");
514✔
596
  }
597

598
  // Get pointer to source bank and write to file
599
  write_source_bank(file_id, source_bank, bank_index);
514✔
600

601
  if (mpi::master || parallel)
514✔
602
    file_close(file_id);
514✔
603
}
514✔
604

605
void write_source_bank(hid_t group_id, gsl::span<SourceSite> source_bank,
4,344✔
606
  const vector<int64_t>& bank_index)
607
{
608
  hid_t banktype = h5banktype();
4,344✔
609

610
  // Set total and individual process dataspace sizes for source bank
611
  int64_t dims_size = bank_index.back();
4,344✔
612
  int64_t count_size = bank_index[mpi::rank + 1] - bank_index[mpi::rank];
4,344✔
613

614
#ifdef PHDF5
615
  // Set size of total dataspace for all procs and rank
616
  hsize_t dims[] {static_cast<hsize_t>(dims_size)};
2,358✔
617
  hid_t dspace = H5Screate_simple(1, dims, nullptr);
2,358✔
618
  hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace, H5P_DEFAULT,
2,358✔
619
    H5P_DEFAULT, H5P_DEFAULT);
620

621
  // Create another data space but for each proc individually
622
  hsize_t count[] {static_cast<hsize_t>(count_size)};
2,358✔
623
  hid_t memspace = H5Screate_simple(1, count, nullptr);
2,358✔
624

625
  // Select hyperslab for this dataspace
626
  hsize_t start[] {static_cast<hsize_t>(bank_index[mpi::rank])};
2,358✔
627
  H5Sselect_hyperslab(dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
2,358✔
628

629
  // Set up the property list for parallel writing
630
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
2,358✔
631
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
2,358✔
632

633
  // Write data to file in parallel
634
  H5Dwrite(dset, banktype, memspace, dspace, plist, source_bank.data());
2,358✔
635

636
  // Free resources
637
  H5Sclose(dspace);
2,358✔
638
  H5Sclose(memspace);
2,358✔
639
  H5Dclose(dset);
2,358✔
640
  H5Pclose(plist);
2,358✔
641

642
#else
643

644
  if (mpi::master) {
1,986✔
645
    // Create dataset big enough to hold all source sites
646
    hsize_t dims[] {static_cast<hsize_t>(dims_size)};
1,986✔
647
    hid_t dspace = H5Screate_simple(1, dims, nullptr);
1,986✔
648
    hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace,
1,986✔
649
      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
650

651
    // Save source bank sites since the array is overwritten below
652
#ifdef OPENMC_MPI
653
    vector<SourceSite> temp_source {source_bank.begin(), source_bank.end()};
654
#endif
655

656
    for (int i = 0; i < mpi::n_procs; ++i) {
3,972✔
657
      // Create memory space
658
      hsize_t count[] {static_cast<hsize_t>(bank_index[i + 1] - bank_index[i])};
1,986✔
659
      hid_t memspace = H5Screate_simple(1, count, nullptr);
1,986✔
660

661
#ifdef OPENMC_MPI
662
      // Receive source sites from other processes
663
      if (i > 0)
664
        MPI_Recv(source_bank.data(), count[0], mpi::source_site, i, i,
665
          mpi::intracomm, MPI_STATUS_IGNORE);
666
#endif
667

668
      // Select hyperslab for this dataspace
669
      dspace = H5Dget_space(dset);
1,986✔
670
      hsize_t start[] {static_cast<hsize_t>(bank_index[i])};
1,986✔
671
      H5Sselect_hyperslab(
1,986✔
672
        dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
673

674
      // Write data to hyperslab
675
      H5Dwrite(
1,986✔
676
        dset, banktype, memspace, dspace, H5P_DEFAULT, source_bank.data());
1,986✔
677

678
      H5Sclose(memspace);
1,986✔
679
      H5Sclose(dspace);
1,986✔
680
    }
681

682
    // Close all ids
683
    H5Dclose(dset);
1,986✔
684

685
#ifdef OPENMC_MPI
686
    // Restore state of source bank
687
    std::copy(temp_source.begin(), temp_source.end(), source_bank.begin());
688
#endif
689
  } else {
690
#ifdef OPENMC_MPI
691
    MPI_Send(source_bank.data(), count_size, mpi::source_site, 0, mpi::rank,
692
      mpi::intracomm);
693
#endif
694
  }
695
#endif
696

697
  H5Tclose(banktype);
4,344✔
698
}
4,344✔
699

700
// Determine member names of a compound HDF5 datatype
701
std::string dtype_member_names(hid_t dtype_id)
316✔
702
{
703
  int nmembers = H5Tget_nmembers(dtype_id);
316✔
704
  std::string names;
316✔
705
  for (int i = 0; i < nmembers; i++) {
2,794✔
706
    char* name = H5Tget_member_name(dtype_id, i);
2,478✔
707
    names = names.append(name);
2,478✔
708
    H5free_memory(name);
2,478✔
709
    if (i < nmembers - 1)
2,478✔
710
      names += ", ";
2,162✔
711
  }
712
  return names;
316✔
713
}
×
714

715
void read_source_bank(
158✔
716
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
717
{
718
  hid_t banktype = h5banktype();
158✔
719

720
  // Open the dataset
721
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
158✔
722

723
  // Make sure number of members matches
724
  hid_t dtype = H5Dget_type(dset);
158✔
725
  auto file_member_names = dtype_member_names(dtype);
158✔
726
  auto bank_member_names = dtype_member_names(banktype);
158✔
727
  if (file_member_names != bank_member_names) {
158✔
728
    fatal_error(fmt::format(
20✔
729
      "Source site attributes in file do not match what is "
730
      "expected for this version of OpenMC. File attributes = ({}). Expected "
731
      "attributes = ({})",
732
      file_member_names, bank_member_names));
733
  }
734

735
  hid_t dspace = H5Dget_space(dset);
148✔
736
  hsize_t n_sites;
737
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
148✔
738

739
  // Make sure vector is big enough in case where we're reading entire source on
740
  // each process
741
  if (!distribute)
148✔
742
    sites.resize(n_sites);
70✔
743

744
  hid_t memspace;
745
  if (distribute) {
148✔
746
    if (simulation::work_index[mpi::n_procs] > n_sites) {
78✔
747
      fatal_error("Number of source sites in source file is less "
×
748
                  "than number of source particles per generation.");
749
    }
750

751
    // Create another data space but for each proc individually
752
    hsize_t n_sites_local = simulation::work_per_rank;
78✔
753
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
78✔
754

755
    // Select hyperslab for each process
756
    hsize_t offset = simulation::work_index[mpi::rank];
78✔
757
    H5Sselect_hyperslab(
78✔
758
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
759
  } else {
760
    memspace = H5S_ALL;
70✔
761
  }
762

763
#ifdef PHDF5
764
  // Read data in parallel
765
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
78✔
766
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
78✔
767
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
78✔
768
  H5Pclose(plist);
78✔
769
#else
770
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
70✔
771
#endif
772

773
  // Close all ids
774
  H5Sclose(dspace);
148✔
775
  if (distribute)
148✔
776
    H5Sclose(memspace);
78✔
777
  H5Dclose(dset);
148✔
778
  H5Tclose(banktype);
148✔
779
}
148✔
780

781
void write_unstructured_mesh_results()
1,944✔
782
{
783

784
  for (auto& tally : model::tallies) {
10,125✔
785

786
    vector<std::string> tally_scores;
8,181✔
787
    for (auto filter_idx : tally->filters()) {
23,985✔
788
      auto& filter = model::tally_filters[filter_idx];
15,804✔
789
      if (filter->type() != FilterType::MESH)
15,804✔
790
        continue;
15,789✔
791

792
      // check if the filter uses an unstructured mesh
793
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
2,179✔
794
      auto mesh_idx = mesh_filter->mesh();
2,179✔
795
      auto umesh =
796
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
2,179✔
797

798
      if (!umesh)
2,179✔
799
        continue;
2,146✔
800

801
      if (!umesh->output_)
33✔
802
        continue;
×
803

804
      if (umesh->library() == "moab") {
33✔
805
        if (mpi::master)
18✔
806
          warning(fmt::format(
8✔
807
            "Output for a MOAB mesh (mesh {}) was "
808
            "requested but will not be written. Please use the Python "
809
            "API to generated the desired VTK tetrahedral mesh.",
810
            umesh->id_));
8✔
811
        continue;
18✔
812
      }
813

814
      // if this tally has more than one filter, print
815
      // warning and skip writing the mesh
816
      if (tally->filters().size() > 1) {
15✔
817
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
818
                            "{}. More than one filter is present on the tally.",
819
          tally->id_));
×
820
        break;
×
821
      }
822

823
      int n_realizations = tally->n_realizations_;
15✔
824

825
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
826
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
827
          // combine the score and nuclide into a name for the value
828
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30✔
829
            tally->nuclide_name(nuc_idx));
45✔
830
          // add this score to the mesh
831
          // (this is in a separate loop because all variables need to be added
832
          //  to libMesh's equation system before any are initialized, which
833
          //  happens in set_score_data)
834
          umesh->add_score(score_str);
15✔
835
        }
15✔
836
      }
837

838
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
839
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
840
          // combine the score and nuclide into a name for the value
841
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30✔
842
            tally->nuclide_name(nuc_idx));
45✔
843

844
          // index for this nuclide and score
845
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
15✔
846

847
          // construct result vectors
848
          vector<double> mean_vec(umesh->n_bins()),
15✔
849
            std_dev_vec(umesh->n_bins());
15✔
850
          for (int j = 0; j < tally->results_.shape()[0]; j++) {
146,799✔
851
            // get the volume for this bin
852
            double volume = umesh->volume(j);
146,784✔
853
            // compute the mean
854
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
146,784✔
855
                          n_realizations;
146,784✔
856
            mean_vec.at(j) = mean / volume;
146,784✔
857

858
            // compute the standard deviation
859
            double sum_sq =
860
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
146,784✔
861
            double std_dev {0.0};
146,784✔
862
            if (n_realizations > 1) {
146,784✔
863
              std_dev = sum_sq / n_realizations - mean * mean;
146,784✔
864
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
146,784✔
865
            }
866
            std_dev_vec[j] = std_dev / volume;
146,784✔
867
          }
868
#ifdef OPENMC_MPI
869
          MPI_Bcast(
10✔
870
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
10✔
871
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
10✔
872
            mpi::intracomm);
873
#endif
874
          // set the data for this score
875
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
15✔
876
        }
15✔
877
      }
878

879
      // Generate a file name based on the tally id
880
      // and the current batch number
881
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
15✔
882
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
15✔
883
        simulation::current_batch, batch_width);
15✔
884

885
      // Write the unstructured mesh and data to file
886
      umesh->write(filename);
15✔
887

888
      // remove score data added for this mesh write
889
      umesh->remove_scores();
15✔
890
    }
15✔
891
  }
8,181✔
892
}
1,944✔
893

894
void write_tally_results_nr(hid_t file_id)
×
895
{
896
  // ==========================================================================
897
  // COLLECT AND WRITE GLOBAL TALLIES
898

899
  hid_t tallies_group;
900
  if (mpi::master) {
×
901
    // Write number of realizations
902
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
×
903

904
    tallies_group = open_group(file_id, "tallies");
×
905
  }
906

907
  // Get global tallies
908
  auto& gt = simulation::global_tallies;
×
909

910
#ifdef OPENMC_MPI
911
  // Reduce global tallies
912
  xt::xtensor<double, 2> gt_reduced = xt::empty_like(gt);
913
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
914
    mpi::intracomm);
915

916
  // Transfer values to value on master
917
  if (mpi::master) {
918
    if (simulation::current_batch == settings::n_max_batches ||
919
        simulation::satisfy_triggers) {
920
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
921
    }
922
  }
923
#endif
924

925
  // Write out global tallies sum and sum_sq
926
  if (mpi::master) {
×
927
    write_dataset(file_id, "global_tallies", gt);
×
928
  }
929

930
  for (const auto& t : model::tallies) {
×
931
    // Skip any tallies that are not active
932
    if (!t->active_)
×
933
      continue;
×
934
    if (!t->writable_)
×
935
      continue;
×
936

937
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
×
938
      write_attribute(file_id, "tallies_present", 1);
×
939
    }
940

941
    // Get view of accumulated tally values
942
    auto values_view = xt::view(t->results_, xt::all(), xt::all(),
×
943
      xt::range(static_cast<int>(TallyResult::SUM),
×
944
        static_cast<int>(TallyResult::SUM_SQ) + 1));
945

946
    // Make copy of tally values in contiguous array
947
    xt::xtensor<double, 3> values = values_view;
×
948

949
    if (mpi::master) {
×
950
      // Open group for tally
951
      std::string groupname {"tally " + std::to_string(t->id_)};
×
952
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
×
953

954
      // The MPI_IN_PLACE specifier allows the master to copy values into
955
      // a receive buffer without having a temporary variable
956
#ifdef OPENMC_MPI
957
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
958
        MPI_SUM, 0, mpi::intracomm);
959
#endif
960

961
      // At the end of the simulation, store the results back in the
962
      // regular TallyResults array
963
      if (simulation::current_batch == settings::n_max_batches ||
×
964
          simulation::satisfy_triggers) {
965
        values_view = values;
×
966
      }
967

968
      // Put in temporary tally result
969
      xt::xtensor<double, 3> results_copy = xt::zeros_like(t->results_);
×
970
      auto copy_view = xt::view(results_copy, xt::all(), xt::all(),
×
971
        xt::range(static_cast<int>(TallyResult::SUM),
×
972
          static_cast<int>(TallyResult::SUM_SQ) + 1));
973
      copy_view = values;
×
974

975
      // Write reduced tally results to file
976
      auto shape = results_copy.shape();
×
977
      write_tally_results(tally_group, shape[0], shape[1], results_copy.data());
×
978

979
      close_group(tally_group);
×
980
    } else {
×
981
      // Receive buffer not significant at other processors
982
#ifdef OPENMC_MPI
983
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
984
        mpi::intracomm);
985
#endif
986
    }
987
  }
988

989
  if (mpi::master) {
×
990
    if (!object_exists(file_id, "tallies_present")) {
×
991
      // Indicate that tallies are off
992
      write_dataset(file_id, "tallies_present", 0);
×
993
    }
994

995
    close_group(tallies_group);
×
996
  }
997
}
998

999
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc