• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 4812163747

pending completion
4812163747

push

github

GitHub
Merge pull request #2494 from aprilnovak/delete-tally

1 of 2 new or added lines in 1 file covered. (50.0%)

124 existing lines in 19 files now uncovered.

42440 of 50752 relevant lines covered (83.62%)

78971666.53 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.02
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "xtensor/xbuilder.hpp" // for empty_like
8
#include "xtensor/xview.hpp"
9
#include <fmt/core.h>
10

11
#include "openmc/bank.h"
12
#include "openmc/capi.h"
13
#include "openmc/constants.h"
14
#include "openmc/eigenvalue.h"
15
#include "openmc/error.h"
16
#include "openmc/file_utils.h"
17
#include "openmc/hdf5_interface.h"
18
#include "openmc/mesh.h"
19
#include "openmc/message_passing.h"
20
#include "openmc/mgxs_interface.h"
21
#include "openmc/nuclide.h"
22
#include "openmc/output.h"
23
#include "openmc/settings.h"
24
#include "openmc/simulation.h"
25
#include "openmc/tallies/derivative.h"
26
#include "openmc/tallies/filter.h"
27
#include "openmc/tallies/filter_mesh.h"
28
#include "openmc/tallies/tally.h"
29
#include "openmc/timer.h"
30
#include "openmc/vector.h"
31

32
namespace openmc {
33

34
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
6,258✔
35
{
36
  simulation::time_statepoint.start();
6,258✔
37

38
  // If a nullptr is passed in, we assume that the user
39
  // wants a default name for this, of the form like output/statepoint.20.h5
40
  std::string filename_;
6,258✔
41
  if (filename) {
6,258✔
42
    filename_ = filename;
1,275✔
43
  } else {
44
    // Determine width for zero padding
45
    int w = std::to_string(settings::n_max_batches).size();
4,983✔
46

47
    // Set filename for state point
48
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
9,966✔
49
      simulation::current_batch, w);
4,983✔
50
  }
51

52
  // If a file name was specified, ensure it has .h5 file extension
53
  const auto extension = get_file_extension(filename_);
6,258✔
54
  if (extension == "") {
6,258✔
UNCOV
55
    filename_.append(".h5");
×
56
  } else if (extension != "h5") {
6,258✔
UNCOV
57
    warning("openmc_statepoint_write was passed a file extension differing "
×
58
            "from .h5, but an hdf5 file will be written.");
59
  }
60

61
  // Determine whether or not to write the source bank
62
  bool write_source_ = write_source ? *write_source : true;
6,258✔
63

64
  // Write message
65
  write_message("Creating state point " + filename_ + "...", 5);
6,258✔
66

67
  hid_t file_id;
68
  if (mpi::master) {
6,258✔
69
    // Create statepoint file
70
    file_id = file_open(filename_, 'w');
5,469✔
71

72
    // Write file type
73
    write_attribute(file_id, "filetype", "statepoint");
5,469✔
74

75
    // Write revision number for state point file
76
    write_attribute(file_id, "version", VERSION_STATEPOINT);
5,469✔
77

78
    // Write OpenMC version
79
    write_attribute(file_id, "openmc_version", VERSION);
5,469✔
80
#ifdef GIT_SHA1
81
    write_attribute(file_id, "git_sha1", GIT_SHA1);
5,469✔
82
#endif
83

84
    // Write current date and time
85
    write_attribute(file_id, "date_and_time", time_stamp());
5,469✔
86

87
    // Write path to input
88
    write_attribute(file_id, "path", settings::path_input);
5,469✔
89

90
    // Write out random number seed
91
    write_dataset(file_id, "seed", openmc_get_seed());
5,469✔
92

93
    // Write run information
94
    write_dataset(file_id, "energy_mode",
5,469✔
95
      settings::run_CE ? "continuous-energy" : "multi-group");
96
    switch (settings::run_mode) {
5,469✔
97
    case RunMode::FIXED_SOURCE:
1,327✔
98
      write_dataset(file_id, "run_mode", "fixed source");
1,327✔
99
      break;
1,327✔
100
    case RunMode::EIGENVALUE:
4,142✔
101
      write_dataset(file_id, "run_mode", "eigenvalue");
4,142✔
102
      break;
4,142✔
UNCOV
103
    default:
×
UNCOV
104
      break;
×
105
    }
106
    write_attribute(file_id, "photon_transport", settings::photon_transport);
5,469✔
107
    write_dataset(file_id, "n_particles", settings::n_particles);
5,469✔
108
    write_dataset(file_id, "n_batches", settings::n_batches);
5,469✔
109

110
    // Write out current batch number
111
    write_dataset(file_id, "current_batch", simulation::current_batch);
5,469✔
112

113
    // Indicate whether source bank is stored in statepoint
114
    write_attribute(file_id, "source_present", write_source_);
5,469✔
115

116
    // Write out information for eigenvalue run
117
    if (settings::run_mode == RunMode::EIGENVALUE)
5,469✔
118
      write_eigenvalue_hdf5(file_id);
4,142✔
119

120
    hid_t tallies_group = create_group(file_id, "tallies");
5,469✔
121

122
    // Write meshes
123
    meshes_to_hdf5(tallies_group);
5,469✔
124

125
    // Write information for derivatives
126
    if (!model::tally_derivs.empty()) {
5,469✔
127
      hid_t derivs_group = create_group(tallies_group, "derivatives");
14✔
128
      for (const auto& deriv : model::tally_derivs) {
84✔
129
        hid_t deriv_group =
130
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
70✔
131
        write_dataset(deriv_group, "material", deriv.diff_material);
70✔
132
        if (deriv.variable == DerivativeVariable::DENSITY) {
70✔
133
          write_dataset(deriv_group, "independent variable", "density");
28✔
134
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
42✔
135
          write_dataset(deriv_group, "independent variable", "nuclide_density");
28✔
136
          write_dataset(
28✔
137
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
28✔
138
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
14✔
139
          write_dataset(deriv_group, "independent variable", "temperature");
14✔
140
        } else {
UNCOV
141
          fatal_error("Independent variable for derivative " +
×
UNCOV
142
                      std::to_string(deriv.id) +
×
143
                      " not defined in state_point.cpp");
144
        }
145
        close_group(deriv_group);
70✔
146
      }
147
      close_group(derivs_group);
14✔
148
    }
149

150
    // Write information for filters
151
    hid_t filters_group = create_group(tallies_group, "filters");
5,469✔
152
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
5,469✔
153
    if (!model::tally_filters.empty()) {
5,469✔
154
      // Write filter IDs
155
      vector<int32_t> filter_ids;
3,432✔
156
      filter_ids.reserve(model::tally_filters.size());
3,432✔
157
      for (const auto& filt : model::tally_filters)
13,842✔
158
        filter_ids.push_back(filt->id());
10,410✔
159
      write_attribute(filters_group, "ids", filter_ids);
3,432✔
160

161
      // Write info for each filter
162
      for (const auto& filt : model::tally_filters) {
13,842✔
163
        hid_t filter_group =
164
          create_group(filters_group, "filter " + std::to_string(filt->id()));
10,410✔
165
        filt->to_statepoint(filter_group);
10,410✔
166
        close_group(filter_group);
10,410✔
167
      }
168
    }
3,432✔
169
    close_group(filters_group);
5,469✔
170

171
    // Write information for tallies
172
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
5,469✔
173
    if (!model::tallies.empty()) {
5,469✔
174
      // Write tally IDs
175
      vector<int32_t> tally_ids;
3,922✔
176
      tally_ids.reserve(model::tallies.size());
3,922✔
177
      for (const auto& tally : model::tallies)
29,170✔
178
        tally_ids.push_back(tally->id_);
25,248✔
179
      write_attribute(tallies_group, "ids", tally_ids);
3,922✔
180

181
      // Write all tally information except results
182
      for (const auto& tally : model::tallies) {
29,170✔
183
        hid_t tally_group =
184
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
25,248✔
185

186
        write_dataset(tally_group, "name", tally->name_);
25,248✔
187

188
        if (tally->writable_) {
25,248✔
189
          write_attribute(tally_group, "internal", 0);
24,504✔
190
        } else {
191
          write_attribute(tally_group, "internal", 1);
744✔
192
          close_group(tally_group);
744✔
193
          continue;
744✔
194
        }
195

196
        if (tally->estimator_ == TallyEstimator::ANALOG) {
24,504✔
197
          write_dataset(tally_group, "estimator", "analog");
10,823✔
198
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
13,681✔
199
          write_dataset(tally_group, "estimator", "tracklength");
12,906✔
200
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
775✔
201
          write_dataset(tally_group, "estimator", "collision");
775✔
202
        }
203

204
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
24,504✔
205

206
        // Write the ID of each filter attached to this tally
207
        write_dataset(tally_group, "n_filters", tally->filters().size());
24,504✔
208
        if (!tally->filters().empty()) {
24,504✔
209
          vector<int32_t> filter_ids;
23,440✔
210
          filter_ids.reserve(tally->filters().size());
23,440✔
211
          for (auto i_filt : tally->filters())
69,408✔
212
            filter_ids.push_back(model::tally_filters[i_filt]->id());
45,968✔
213
          write_dataset(tally_group, "filters", filter_ids);
23,440✔
214
        }
23,440✔
215

216
        // Write the nuclides this tally scores
217
        vector<std::string> nuclides;
24,504✔
218
        for (auto i_nuclide : tally->nuclides_) {
58,794✔
219
          if (i_nuclide == -1) {
34,290✔
220
            nuclides.push_back("total");
21,242✔
221
          } else {
222
            if (settings::run_CE) {
13,048✔
223
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
12,908✔
224
            } else {
225
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
140✔
226
            }
227
          }
228
        }
229
        write_dataset(tally_group, "nuclides", nuclides);
24,504✔
230

231
        if (tally->deriv_ != C_NONE)
24,504✔
232
          write_dataset(
280✔
233
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
280✔
234

235
        // Write the tally score bins
236
        vector<std::string> scores;
24,504✔
237
        for (auto sc : tally->scores_)
59,298✔
238
          scores.push_back(reaction_name(sc));
34,794✔
239
        write_dataset(tally_group, "n_score_bins", scores.size());
24,504✔
240
        write_dataset(tally_group, "score_bins", scores);
24,504✔
241

242
        close_group(tally_group);
24,504✔
243
      }
24,504✔
244
    }
3,922✔
245

246
    if (settings::reduce_tallies) {
5,469✔
247
      // Write global tallies
248
      write_dataset(file_id, "global_tallies", simulation::global_tallies);
5,469✔
249

250
      // Write tallies
251
      if (model::active_tallies.size() > 0) {
5,469✔
252
        // Indicate that tallies are on
253
        write_attribute(file_id, "tallies_present", 1);
3,880✔
254

255
        // Write all tally results
256
        for (const auto& tally : model::tallies) {
29,086✔
257
          if (!tally->writable_)
25,206✔
258
            continue;
702✔
259
          // Write sum and sum_sq for each bin
260
          std::string name = "tally " + std::to_string(tally->id_);
24,504✔
261
          hid_t tally_group = open_group(tallies_group, name.c_str());
24,504✔
262
          auto& results = tally->results_;
24,504✔
263
          write_tally_results(tally_group, results.shape()[0],
24,504✔
264
            results.shape()[1], results.data());
24,504✔
265
          close_group(tally_group);
24,504✔
266
        }
24,504✔
267
      } else {
268
        // Indicate tallies are off
269
        write_attribute(file_id, "tallies_present", 0);
1,589✔
270
      }
271
    }
272

273
    close_group(tallies_group);
5,469✔
274
  }
275

276
  // Check for the no-tally-reduction method
277
  if (!settings::reduce_tallies) {
6,258✔
278
    // If using the no-tally-reduction method, we need to collect tally
279
    // results before writing them to the state point file.
UNCOV
280
    write_tally_results_nr(file_id);
×
281

282
  } else if (mpi::master) {
6,258✔
283
    // Write number of global realizations
284
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
5,469✔
285
  }
286

287
  if (mpi::master) {
6,258✔
288
    // Write out the runtime metrics.
289
    using namespace simulation;
290
    hid_t runtime_group = create_group(file_id, "runtime");
5,469✔
291
    write_dataset(
5,469✔
292
      runtime_group, "total initialization", time_initialize.elapsed());
293
    write_dataset(
5,469✔
294
      runtime_group, "reading cross sections", time_read_xs.elapsed());
295
    write_dataset(runtime_group, "simulation",
5,469✔
296
      time_inactive.elapsed() + time_active.elapsed());
5,469✔
297
    write_dataset(runtime_group, "transport", time_transport.elapsed());
5,469✔
298
    if (settings::run_mode == RunMode::EIGENVALUE) {
5,469✔
299
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
4,142✔
300
    }
301
    write_dataset(runtime_group, "active batches", time_active.elapsed());
5,469✔
302
    if (settings::run_mode == RunMode::EIGENVALUE) {
5,469✔
303
      write_dataset(
4,142✔
304
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
305
      write_dataset(
4,142✔
306
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
307
      write_dataset(
4,142✔
308
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
309
    }
310
    write_dataset(
5,469✔
311
      runtime_group, "accumulating tallies", time_tallies.elapsed());
312
    write_dataset(runtime_group, "total", time_total.elapsed());
5,469✔
313
    write_dataset(
5,469✔
314
      runtime_group, "writing statepoints", time_statepoint.elapsed());
315
    close_group(runtime_group);
5,469✔
316

317
    file_close(file_id);
5,469✔
318
  }
319

320
#ifdef PHDF5
321
  bool parallel = true;
3,224✔
322
#else
323
  bool parallel = false;
3,034✔
324
#endif
325

326
  // Write the source bank if desired
327
  if (write_source_) {
6,258✔
328
    if (mpi::master || parallel)
4,294✔
329
      file_id = file_open(filename_, 'a', true);
4,294✔
330
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
4,294✔
331
    if (mpi::master || parallel)
4,294✔
332
      file_close(file_id);
4,294✔
333
  }
334

335
#if defined(LIBMESH) || defined(DAGMC)
336
  // write unstructured mesh tally files
337
  write_unstructured_mesh_results();
1,695✔
338
#endif
339

340
  simulation::time_statepoint.stop();
6,258✔
341

342
  return 0;
6,258✔
343
}
6,258✔
344

345
void restart_set_keff()
122✔
346
{
347
  if (simulation::restart_batch > settings::n_inactive) {
122✔
348
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
633✔
349
      simulation::k_sum[0] += simulation::k_generation[i];
511✔
350
      simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
511✔
351
    }
352
    int n = settings::gen_per_batch * simulation::n_realizations;
122✔
353
    simulation::keff = simulation::k_sum[0] / n;
122✔
354
  } else {
UNCOV
355
    simulation::keff = simulation::k_generation.back();
×
356
  }
357
}
122✔
358

359
void load_state_point()
134✔
360
{
361
  // Write message
362
  write_message("Loading state point " + settings::path_statepoint + "...", 5);
134✔
363

364
  // Open file for reading
365
  hid_t file_id = file_open(settings::path_statepoint.c_str(), 'r', true);
134✔
366

367
  // Read filetype
368
  std::string word;
134✔
369
  read_attribute(file_id, "filetype", word);
134✔
370
  if (word != "statepoint") {
134✔
UNCOV
371
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
372
  }
373

374
  // Read revision number for state point file and make sure it matches with
375
  // current version
376
  array<int, 2> array;
377
  read_attribute(file_id, "version", array);
134✔
378
  if (array != VERSION_STATEPOINT) {
134✔
UNCOV
379
    fatal_error(
×
380
      "State point version does not match current version in OpenMC.");
381
  }
382

383
  // Read and overwrite random number seed
384
  int64_t seed;
385
  read_dataset(file_id, "seed", seed);
134✔
386
  openmc_set_seed(seed);
134✔
387

388
  // It is not impossible for a state point to be generated from a CE run but
389
  // to be loaded in to an MG run (or vice versa), check to prevent that.
390
  read_dataset(file_id, "energy_mode", word);
134✔
391
  if (word == "multi-group" && settings::run_CE) {
134✔
UNCOV
392
    fatal_error("State point file is from multigroup run but current run is "
×
393
                "continous energy.");
394
  } else if (word == "continuous-energy" && !settings::run_CE) {
134✔
UNCOV
395
    fatal_error("State point file is from continuous-energy run but current "
×
396
                "run is multigroup!");
397
  }
398

399
  // Read and overwrite run information except number of batches
400
  read_dataset(file_id, "run_mode", word);
134✔
401
  if (word == "fixed source") {
134✔
UNCOV
402
    settings::run_mode = RunMode::FIXED_SOURCE;
×
403
  } else if (word == "eigenvalue") {
134✔
404
    settings::run_mode = RunMode::EIGENVALUE;
134✔
405
  }
406
  read_attribute(file_id, "photon_transport", settings::photon_transport);
134✔
407
  read_dataset(file_id, "n_particles", settings::n_particles);
134✔
408
  int temp;
409
  read_dataset(file_id, "n_batches", temp);
134✔
410

411
  // Take maximum of statepoint n_batches and input n_batches
412
  settings::n_batches = std::max(settings::n_batches, temp);
134✔
413

414
  // Read batch number to restart at
415
  read_dataset(file_id, "current_batch", simulation::restart_batch);
134✔
416

417
  if (simulation::restart_batch >= settings::n_max_batches) {
134✔
418
    fatal_error(fmt::format(
24✔
419
      "The number of batches specified for simulation ({}) is smaller"
420
      " than the number of batches in the restart statepoint file ({})",
421
      settings::n_max_batches, simulation::restart_batch));
422
  }
423

424
  // Logical flag for source present in statepoint file
425
  bool source_present;
426
  read_attribute(file_id, "source_present", source_present);
122✔
427

428
  // Read information specific to eigenvalue run
429
  if (settings::run_mode == RunMode::EIGENVALUE) {
122✔
430
    read_dataset(file_id, "n_inactive", temp);
122✔
431
    read_eigenvalue_hdf5(file_id);
122✔
432

433
    // Take maximum of statepoint n_inactive and input n_inactive
434
    settings::n_inactive = std::max(settings::n_inactive, temp);
122✔
435

436
    // Check to make sure source bank is present
437
    if (settings::path_sourcepoint == settings::path_statepoint &&
244✔
438
        !source_present) {
122✔
UNCOV
439
      fatal_error("Source bank must be contained in statepoint restart file");
×
440
    }
441
  }
442

443
  // Read number of realizations for global tallies
444
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
122✔
445

446
  // Set k_sum, keff, and current_batch based on whether restart file is part
447
  // of active cycle or inactive cycle
448
  if (settings::run_mode == RunMode::EIGENVALUE) {
122✔
449
    restart_set_keff();
122✔
450
  }
451

452
  // Set current batch number
453
  simulation::current_batch = simulation::restart_batch;
122✔
454

455
  // Read tallies to master. If we are using Parallel HDF5, all processes
456
  // need to be included in the HDF5 calls.
457
#ifdef PHDF5
458
  if (true) {
459
#else
460
  if (mpi::master) {
45✔
461
#endif
462
    // Read global tally data
463
    read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
122✔
464
      false, simulation::global_tallies.data());
122✔
465

466
    // Check if tally results are present
467
    bool present;
468
    read_attribute(file_id, "tallies_present", present);
122✔
469

470
    // Read in sum and sum squared
471
    if (present) {
122✔
472
      hid_t tallies_group = open_group(file_id, "tallies");
122✔
473

474
      for (auto& tally : model::tallies) {
529✔
475
        // Read sum, sum_sq, and N for each bin
476
        std::string name = "tally " + std::to_string(tally->id_);
407✔
477
        hid_t tally_group = open_group(tallies_group, name.c_str());
407✔
478

479
        int internal = 0;
407✔
480
        if (attribute_exists(tally_group, "internal")) {
407✔
481
          read_attribute(tally_group, "internal", internal);
407✔
482
        }
483
        if (internal) {
407✔
UNCOV
484
          tally->writable_ = false;
×
485
        } else {
486

487
          auto& results = tally->results_;
407✔
488
          read_tally_results(tally_group, results.shape()[0],
814✔
489
            results.shape()[1], results.data());
407✔
490
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
407✔
491
          close_group(tally_group);
407✔
492
        }
493
      }
407✔
494

495
      close_group(tallies_group);
122✔
496
    }
497
  }
498

499
  // Read source if in eigenvalue mode
500
  if (settings::run_mode == RunMode::EIGENVALUE) {
122✔
501

502
    // Check if source was written out separately
503
    if (!source_present) {
122✔
504

505
      // Close statepoint file
UNCOV
506
      file_close(file_id);
×
507

508
      // Write message
UNCOV
509
      write_message(
×
UNCOV
510
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
511

512
      // Open source file
UNCOV
513
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
514
    }
515

516
    // Read source
517
    read_source_bank(file_id, simulation::source_bank, true);
122✔
518
  }
519

520
  // Close file
521
  file_close(file_id);
122✔
522
}
122✔
523

524
hid_t h5banktype()
4,746✔
525
{
526
  // Create compound type for position
527
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
4,746✔
528
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
4,746✔
529
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
4,746✔
530
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
4,746✔
531

532
  // Create bank datatype
533
  //
534
  // If you make changes to the compound datatype here, make sure you update:
535
  // - openmc/source.py
536
  // - openmc/statepoint.py
537
  // - docs/source/io_formats/statepoint.rst
538
  // - docs/source/io_formats/source.rst
539
  hid_t banktype = H5Tcreate(H5T_COMPOUND, sizeof(struct SourceSite));
4,746✔
540
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
4,746✔
541
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
4,746✔
542
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
4,746✔
543
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
4,746✔
544
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
4,746✔
545
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
4,746✔
546
    H5T_NATIVE_INT);
4,746✔
547
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
4,746✔
548
  H5Tinsert(
4,746✔
549
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
4,746✔
550

551
  H5Tclose(postype);
4,746✔
552
  return banktype;
4,746✔
553
}
554

555
void write_source_point(const char* filename, gsl::span<SourceSite> source_bank,
266✔
556
  const vector<int64_t>& bank_index)
557
{
558
  // When using parallel HDF5, the file is written to collectively by all
559
  // processes. With MPI-only, the file is opened and written by the master
560
  // (note that the call to write_source_bank is by all processes since slave
561
  // processes need to send source bank data to the master.
562
#ifdef PHDF5
563
  bool parallel = true;
140✔
564
#else
565
  bool parallel = false;
126✔
566
#endif
567

568
  if (!filename)
266✔
UNCOV
569
    fatal_error("write_source_point filename needs a nonempty name.");
×
570

571
  std::string filename_(filename);
266✔
572
  const auto extension = get_file_extension(filename_);
266✔
573
  if (extension == "") {
266✔
574
    filename_.append(".h5");
266✔
UNCOV
575
  } else if (extension != "h5") {
×
UNCOV
576
    warning("write_source_point was passed a file extension differing "
×
577
            "from .h5, but an hdf5 file will be written.");
578
  }
579

580
  hid_t file_id;
581
  if (mpi::master || parallel) {
266✔
582
    file_id = file_open(filename_.c_str(), 'w', true);
266✔
583
    write_attribute(file_id, "filetype", "source");
266✔
584
  }
585

586
  // Get pointer to source bank and write to file
587
  write_source_bank(file_id, source_bank, bank_index);
266✔
588

589
  if (mpi::master || parallel)
266✔
590
    file_close(file_id);
266✔
591
}
266✔
592

593
void write_source_bank(hid_t group_id, gsl::span<SourceSite> source_bank,
4,560✔
594
  const vector<int64_t>& bank_index)
595
{
596
  hid_t banktype = h5banktype();
4,560✔
597

598
  // Set total and individual process dataspace sizes for source bank
599
  int64_t dims_size = bank_index.back();
4,560✔
600
  int64_t count_size = bank_index[mpi::rank + 1] - bank_index[mpi::rank];
4,560✔
601

602
#ifdef PHDF5
603
  // Set size of total dataspace for all procs and rank
604
  hsize_t dims[] {static_cast<hsize_t>(dims_size)};
2,524✔
605
  hid_t dspace = H5Screate_simple(1, dims, nullptr);
2,524✔
606
  hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace, H5P_DEFAULT,
2,524✔
607
    H5P_DEFAULT, H5P_DEFAULT);
608

609
  // Create another data space but for each proc individually
610
  hsize_t count[] {static_cast<hsize_t>(count_size)};
2,524✔
611
  hid_t memspace = H5Screate_simple(1, count, nullptr);
2,524✔
612

613
  // Select hyperslab for this dataspace
614
  hsize_t start[] {static_cast<hsize_t>(bank_index[mpi::rank])};
2,524✔
615
  H5Sselect_hyperslab(dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
2,524✔
616

617
  // Set up the property list for parallel writing
618
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
2,524✔
619
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
2,524✔
620

621
  // Write data to file in parallel
622
  H5Dwrite(dset, banktype, memspace, dspace, plist, source_bank.data());
2,524✔
623

624
  // Free resources
625
  H5Sclose(dspace);
2,524✔
626
  H5Sclose(memspace);
2,524✔
627
  H5Dclose(dset);
2,524✔
628
  H5Pclose(plist);
2,524✔
629

630
#else
631

632
  if (mpi::master) {
2,036✔
633
    // Create dataset big enough to hold all source sites
634
    hsize_t dims[] {static_cast<hsize_t>(dims_size)};
2,036✔
635
    hid_t dspace = H5Screate_simple(1, dims, nullptr);
2,036✔
636
    hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace,
2,036✔
637
      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
638

639
    // Save source bank sites since the array is overwritten below
640
#ifdef OPENMC_MPI
641
    vector<SourceSite> temp_source {source_bank.begin(), source_bank.end()};
642
#endif
643

644
    for (int i = 0; i < mpi::n_procs; ++i) {
4,072✔
645
      // Create memory space
646
      hsize_t count[] {static_cast<hsize_t>(bank_index[i + 1] - bank_index[i])};
2,036✔
647
      hid_t memspace = H5Screate_simple(1, count, nullptr);
2,036✔
648

649
#ifdef OPENMC_MPI
650
      // Receive source sites from other processes
651
      if (i > 0)
652
        MPI_Recv(source_bank.data(), count[0], mpi::source_site, i, i,
653
          mpi::intracomm, MPI_STATUS_IGNORE);
654
#endif
655

656
      // Select hyperslab for this dataspace
657
      dspace = H5Dget_space(dset);
2,036✔
658
      hsize_t start[] {static_cast<hsize_t>(bank_index[i])};
2,036✔
659
      H5Sselect_hyperslab(
2,036✔
660
        dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
661

662
      // Write data to hyperslab
663
      H5Dwrite(
2,036✔
664
        dset, banktype, memspace, dspace, H5P_DEFAULT, source_bank.data());
2,036✔
665

666
      H5Sclose(memspace);
2,036✔
667
      H5Sclose(dspace);
2,036✔
668
    }
669

670
    // Close all ids
671
    H5Dclose(dset);
2,036✔
672

673
#ifdef OPENMC_MPI
674
    // Restore state of source bank
675
    std::copy(temp_source.begin(), temp_source.end(), source_bank.begin());
676
#endif
677
  } else {
678
#ifdef OPENMC_MPI
679
    MPI_Send(source_bank.data(), count_size, mpi::source_site, 0, mpi::rank,
680
      mpi::intracomm);
681
#endif
682
  }
683
#endif
684

685
  H5Tclose(banktype);
4,560✔
686
}
4,560✔
687

688
// Determine member names of a compound HDF5 datatype
689
std::string dtype_member_names(hid_t dtype_id)
372✔
690
{
691
  int nmembers = H5Tget_nmembers(dtype_id);
372✔
692
  std::string names;
372✔
693
  for (int i = 0; i < nmembers; i++) {
3,288✔
694
    char* name = H5Tget_member_name(dtype_id, i);
2,916✔
695
    names = names.append(name);
2,916✔
696
    H5free_memory(name);
2,916✔
697
    if (i < nmembers - 1)
2,916✔
698
      names += ", ";
2,544✔
699
  }
700
  return names;
372✔
UNCOV
701
}
×
702

703
void read_source_bank(
186✔
704
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
705
{
706
  hid_t banktype = h5banktype();
186✔
707

708
  // Open the dataset
709
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
186✔
710

711
  // Make sure number of members matches
712
  hid_t dtype = H5Dget_type(dset);
186✔
713
  auto file_member_names = dtype_member_names(dtype);
186✔
714
  auto bank_member_names = dtype_member_names(banktype);
186✔
715
  if (file_member_names != bank_member_names) {
186✔
716
    fatal_error(fmt::format(
24✔
717
      "Source site attributes in file do not match what is "
718
      "expected for this version of OpenMC. File attributes = ({}). Expected "
719
      "attributes = ({})",
720
      file_member_names, bank_member_names));
721
  }
722

723
  hid_t dspace = H5Dget_space(dset);
174✔
724
  hsize_t n_sites;
725
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
174✔
726

727
  // Make sure vector is big enough in case where we're reading entire source on
728
  // each process
729
  if (!distribute)
174✔
730
    sites.resize(n_sites);
52✔
731

732
  hid_t memspace;
733
  if (distribute) {
174✔
734
    if (simulation::work_index[mpi::n_procs] > n_sites) {
122✔
UNCOV
735
      fatal_error("Number of source sites in source file is less "
×
736
                  "than number of source particles per generation.");
737
    }
738

739
    // Create another data space but for each proc individually
740
    hsize_t n_sites_local = simulation::work_per_rank;
122✔
741
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
122✔
742

743
    // Select hyperslab for each process
744
    hsize_t offset = simulation::work_index[mpi::rank];
122✔
745
    H5Sselect_hyperslab(
122✔
746
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
747
  } else {
748
    memspace = H5S_ALL;
52✔
749
  }
750

751
#ifdef PHDF5
752
  // Read data in parallel
753
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
102✔
754
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
102✔
755
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
102✔
756
  H5Pclose(plist);
102✔
757
#else
758
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
72✔
759
#endif
760

761
  // Close all ids
762
  H5Sclose(dspace);
174✔
763
  if (distribute)
174✔
764
    H5Sclose(memspace);
122✔
765
  H5Dclose(dset);
174✔
766
  H5Tclose(banktype);
174✔
767
}
174✔
768

769
void write_unstructured_mesh_results()
1,695✔
770
{
771

772
  for (auto& tally : model::tallies) {
10,460✔
773

774
    vector<std::string> tally_scores;
8,765✔
775
    for (auto filter_idx : tally->filters()) {
25,309✔
776
      auto& filter = model::tally_filters[filter_idx];
16,544✔
777
      if (filter->type() != FilterType::MESH)
16,544✔
778
        continue;
16,525✔
779

780
      // check if the filter uses an unstructured mesh
781
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
2,892✔
782
      auto mesh_idx = mesh_filter->mesh();
2,892✔
783
      auto umesh =
784
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
2,892✔
785

786
      if (!umesh)
2,892✔
787
        continue;
2,854✔
788

789
      if (!umesh->output_)
38✔
UNCOV
790
        continue;
×
791

792
      if (umesh->library() == "moab") {
38✔
793
        if (mpi::master)
19✔
794
          warning(fmt::format(
9✔
795
            "Output for a MOAB mesh (mesh {}) was "
796
            "requested but will not be written. Please use the Python "
797
            "API to generated the desired VTK tetrahedral mesh.",
798
            umesh->id_));
9✔
799
        continue;
19✔
800
      }
801

802
      // if this tally has more than one filter, print
803
      // warning and skip writing the mesh
804
      if (tally->filters().size() > 1) {
19✔
UNCOV
805
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
806
                            "{}. More than one filter is present on the tally.",
UNCOV
807
          tally->id_));
×
UNCOV
808
        break;
×
809
      }
810

811
      int n_realizations = tally->n_realizations_;
19✔
812

813
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
38✔
814
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
38✔
815
          // combine the score and nuclide into a name for the value
816
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
38✔
817
            tally->nuclide_name(nuc_idx));
57✔
818
          // add this score to the mesh
819
          // (this is in a separate loop because all variables need to be added
820
          //  to libMesh's equation system before any are initialized, which
821
          //  happens in set_score_data)
822
          umesh->add_score(score_str);
19✔
823
        }
19✔
824
      }
825

826
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
38✔
827
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
38✔
828
          // combine the score and nuclide into a name for the value
829
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
38✔
830
            tally->nuclide_name(nuc_idx));
57✔
831

832
          // index for this nuclide and score
833
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
19✔
834

835
          // construct result vectors
836
          vector<double> mean_vec(umesh->n_bins()),
19✔
837
            std_dev_vec(umesh->n_bins());
19✔
838
          for (int j = 0; j < tally->results_.shape()[0]; j++) {
147,151✔
839
            // get the volume for this bin
840
            double volume = umesh->volume(j);
147,132✔
841
            // compute the mean
842
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
147,132✔
843
                          n_realizations;
147,132✔
844
            mean_vec.at(j) = mean / volume;
147,132✔
845

846
            // compute the standard deviation
847
            double sum_sq =
848
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
147,132✔
849
            double std_dev {0.0};
147,132✔
850
            if (n_realizations > 1) {
147,132✔
851
              std_dev = sum_sq / n_realizations - mean * mean;
147,132✔
852
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
147,132✔
853
            }
854
            std_dev_vec[j] = std_dev / volume;
147,132✔
855
          }
856
#ifdef OPENMC_MPI
857
          MPI_Bcast(
12✔
858
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
12✔
859
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
12✔
860
            mpi::intracomm);
861
#endif
862
          // set the data for this score
863
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
19✔
864
        }
19✔
865
      }
866

867
      // Generate a file name based on the tally id
868
      // and the current batch number
869
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
19✔
870
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
19✔
871
        simulation::current_batch, batch_width);
19✔
872

873
      // Write the unstructured mesh and data to file
874
      umesh->write(filename);
19✔
875

876
      // remove score data added for this mesh write
877
      umesh->remove_scores();
19✔
878
    }
19✔
879
  }
8,765✔
880
}
1,695✔
881

UNCOV
882
void write_tally_results_nr(hid_t file_id)
×
883
{
884
  // ==========================================================================
885
  // COLLECT AND WRITE GLOBAL TALLIES
886

887
  hid_t tallies_group;
UNCOV
888
  if (mpi::master) {
×
889
    // Write number of realizations
UNCOV
890
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
×
891

UNCOV
892
    tallies_group = open_group(file_id, "tallies");
×
893
  }
894

895
  // Get global tallies
UNCOV
896
  auto& gt = simulation::global_tallies;
×
897

898
#ifdef OPENMC_MPI
899
  // Reduce global tallies
900
  xt::xtensor<double, 2> gt_reduced = xt::empty_like(gt);
901
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
902
    mpi::intracomm);
903

904
  // Transfer values to value on master
905
  if (mpi::master) {
906
    if (simulation::current_batch == settings::n_max_batches ||
907
        simulation::satisfy_triggers) {
908
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
909
    }
910
  }
911
#endif
912

913
  // Write out global tallies sum and sum_sq
914
  if (mpi::master) {
×
UNCOV
915
    write_dataset(file_id, "global_tallies", gt);
×
916
  }
917

UNCOV
918
  for (const auto& t : model::tallies) {
×
919
    // Skip any tallies that are not active
920
    if (!t->active_)
×
UNCOV
921
      continue;
×
922
    if (!t->writable_)
×
UNCOV
923
      continue;
×
924

UNCOV
925
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
×
UNCOV
926
      write_attribute(file_id, "tallies_present", 1);
×
927
    }
928

929
    // Get view of accumulated tally values
UNCOV
930
    auto values_view = xt::view(t->results_, xt::all(), xt::all(),
×
UNCOV
931
      xt::range(static_cast<int>(TallyResult::SUM),
×
932
        static_cast<int>(TallyResult::SUM_SQ) + 1));
933

934
    // Make copy of tally values in contiguous array
UNCOV
935
    xt::xtensor<double, 3> values = values_view;
×
936

UNCOV
937
    if (mpi::master) {
×
938
      // Open group for tally
UNCOV
939
      std::string groupname {"tally " + std::to_string(t->id_)};
×
UNCOV
940
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
×
941

942
      // The MPI_IN_PLACE specifier allows the master to copy values into
943
      // a receive buffer without having a temporary variable
944
#ifdef OPENMC_MPI
945
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
946
        MPI_SUM, 0, mpi::intracomm);
947
#endif
948

949
      // At the end of the simulation, store the results back in the
950
      // regular TallyResults array
UNCOV
951
      if (simulation::current_batch == settings::n_max_batches ||
×
952
          simulation::satisfy_triggers) {
953
        values_view = values;
×
954
      }
955

956
      // Put in temporary tally result
957
      xt::xtensor<double, 3> results_copy = xt::zeros_like(t->results_);
×
958
      auto copy_view = xt::view(results_copy, xt::all(), xt::all(),
×
UNCOV
959
        xt::range(static_cast<int>(TallyResult::SUM),
×
960
          static_cast<int>(TallyResult::SUM_SQ) + 1));
UNCOV
961
      copy_view = values;
×
962

963
      // Write reduced tally results to file
UNCOV
964
      auto shape = results_copy.shape();
×
UNCOV
965
      write_tally_results(tally_group, shape[0], shape[1], results_copy.data());
×
966

967
      close_group(tally_group);
×
UNCOV
968
    } else {
×
969
      // Receive buffer not significant at other processors
970
#ifdef OPENMC_MPI
971
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
972
        mpi::intracomm);
973
#endif
974
    }
975
  }
976

UNCOV
977
  if (mpi::master) {
×
UNCOV
978
    if (!object_exists(file_id, "tallies_present")) {
×
979
      // Indicate that tallies are off
UNCOV
980
      write_dataset(file_id, "tallies_present", 0);
×
981
    }
982

983
    close_group(tallies_group);
×
984
  }
985
}
986

987
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc