• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 14503480972

16 Apr 2025 10:04PM UTC coverage: 85.46% (+0.05%) from 85.414%
14503480972

Pull #3363

github

web-flow
Merge 6bd6e420c into 47ca2916a
Pull Request #3363: Figure of Merit implementation

18 of 21 new or added lines in 1 file covered. (85.71%)

271 existing lines in 2 files now uncovered.

52516 of 61451 relevant lines covered (85.46%)

37218618.45 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.37
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "xtensor/xbuilder.hpp" // for empty_like
8
#include "xtensor/xview.hpp"
9
#include <fmt/core.h>
10

11
#include "openmc/bank.h"
12
#include "openmc/capi.h"
13
#include "openmc/constants.h"
14
#include "openmc/eigenvalue.h"
15
#include "openmc/error.h"
16
#include "openmc/file_utils.h"
17
#include "openmc/hdf5_interface.h"
18
#include "openmc/mcpl_interface.h"
19
#include "openmc/mesh.h"
20
#include "openmc/message_passing.h"
21
#include "openmc/mgxs_interface.h"
22
#include "openmc/nuclide.h"
23
#include "openmc/output.h"
24
#include "openmc/settings.h"
25
#include "openmc/simulation.h"
26
#include "openmc/tallies/derivative.h"
27
#include "openmc/tallies/filter.h"
28
#include "openmc/tallies/filter_mesh.h"
29
#include "openmc/tallies/tally.h"
30
#include "openmc/timer.h"
31
#include "openmc/vector.h"
32

33
#ifdef _OPENMP
34
#include <omp.h>
35
#endif
36

37
namespace openmc {
38

39
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
6,689✔
40
{
41
  simulation::time_statepoint.start();
6,689✔
42

43
  // If a nullptr is passed in, we assume that the user
44
  // wants a default name for this, of the form like output/statepoint.20.h5
45
  std::string filename_;
6,689✔
46
  if (filename) {
6,689✔
47
    filename_ = filename;
674✔
48
  } else {
49
    // Determine width for zero padding
50
    int w = std::to_string(settings::n_max_batches).size();
6,015✔
51

52
    // Set filename for state point
53
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
10,905✔
54
      simulation::current_batch, w);
6,015✔
55
  }
56

57
  // If a file name was specified, ensure it has .h5 file extension
58
  const auto extension = get_file_extension(filename_);
6,689✔
59
  if (extension != "h5") {
6,689✔
UNCOV
60
    warning("openmc_statepoint_write was passed a file extension differing "
×
61
            "from .h5, but an hdf5 file will be written.");
62
  }
63

64
  // Determine whether or not to write the source bank
65
  bool write_source_ = write_source ? *write_source : true;
6,689✔
66

67
  // Write message
68
  write_message("Creating state point " + filename_ + "...", 5);
6,689✔
69

70
  hid_t file_id;
71
  if (mpi::master) {
6,689✔
72
    // Create statepoint file
73
    file_id = file_open(filename_, 'w');
5,649✔
74

75
    // Write file type
76
    write_attribute(file_id, "filetype", "statepoint");
5,649✔
77

78
    // Write revision number for state point file
79
    write_attribute(file_id, "version", VERSION_STATEPOINT);
5,649✔
80

81
    // Write OpenMC version
82
    write_attribute(file_id, "openmc_version", VERSION);
5,649✔
83
#ifdef GIT_SHA1
84
    write_attribute(file_id, "git_sha1", GIT_SHA1);
85
#endif
86

87
    // Write current date and time
88
    write_attribute(file_id, "date_and_time", time_stamp());
5,649✔
89

90
    // Write path to input
91
    write_attribute(file_id, "path", settings::path_input);
5,649✔
92

93
    // Write out random number seed
94
    write_dataset(file_id, "seed", openmc_get_seed());
5,649✔
95

96
    // Write out random number stride
97
    write_dataset(file_id, "stride", openmc_get_stride());
5,649✔
98

99
    // Write run information
100
    write_dataset(file_id, "energy_mode",
5,649✔
101
      settings::run_CE ? "continuous-energy" : "multi-group");
102
    switch (settings::run_mode) {
5,649✔
103
    case RunMode::FIXED_SOURCE:
2,154✔
104
      write_dataset(file_id, "run_mode", "fixed source");
2,154✔
105
      break;
2,154✔
106
    case RunMode::EIGENVALUE:
3,495✔
107
      write_dataset(file_id, "run_mode", "eigenvalue");
3,495✔
108
      break;
3,495✔
UNCOV
109
    default:
×
UNCOV
110
      break;
×
111
    }
112
    write_attribute(file_id, "photon_transport", settings::photon_transport);
5,649✔
113
    write_dataset(file_id, "n_particles", settings::n_particles);
5,649✔
114
    write_dataset(file_id, "n_batches", settings::n_batches);
5,649✔
115

116
    // Write out current batch number
117
    write_dataset(file_id, "current_batch", simulation::current_batch);
5,649✔
118

119
    // Indicate whether source bank is stored in statepoint
120
    write_attribute(file_id, "source_present", write_source_);
5,649✔
121

122
    // Write out information for eigenvalue run
123
    if (settings::run_mode == RunMode::EIGENVALUE)
5,649✔
124
      write_eigenvalue_hdf5(file_id);
3,495✔
125

126
    hid_t tallies_group = create_group(file_id, "tallies");
5,649✔
127

128
    // Write meshes
129
    meshes_to_hdf5(tallies_group);
5,649✔
130

131
    // Write information for derivatives
132
    if (!model::tally_derivs.empty()) {
5,649✔
133
      hid_t derivs_group = create_group(tallies_group, "derivatives");
11✔
134
      for (const auto& deriv : model::tally_derivs) {
66✔
135
        hid_t deriv_group =
136
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
55✔
137
        write_dataset(deriv_group, "material", deriv.diff_material);
55✔
138
        if (deriv.variable == DerivativeVariable::DENSITY) {
55✔
139
          write_dataset(deriv_group, "independent variable", "density");
22✔
140
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
33✔
141
          write_dataset(deriv_group, "independent variable", "nuclide_density");
22✔
142
          write_dataset(
22✔
143
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
22✔
144
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
11✔
145
          write_dataset(deriv_group, "independent variable", "temperature");
11✔
146
        } else {
UNCOV
147
          fatal_error("Independent variable for derivative " +
×
UNCOV
148
                      std::to_string(deriv.id) +
×
149
                      " not defined in state_point.cpp");
150
        }
151
        close_group(deriv_group);
55✔
152
      }
153
      close_group(derivs_group);
11✔
154
    }
155

156
    // Write information for filters
157
    hid_t filters_group = create_group(tallies_group, "filters");
5,649✔
158
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
5,649✔
159
    if (!model::tally_filters.empty()) {
5,649✔
160
      // Write filter IDs
161
      vector<int32_t> filter_ids;
3,556✔
162
      filter_ids.reserve(model::tally_filters.size());
3,556✔
163
      for (const auto& filt : model::tally_filters)
12,795✔
164
        filter_ids.push_back(filt->id());
9,239✔
165
      write_attribute(filters_group, "ids", filter_ids);
3,556✔
166

167
      // Write info for each filter
168
      for (const auto& filt : model::tally_filters) {
12,795✔
169
        hid_t filter_group =
170
          create_group(filters_group, "filter " + std::to_string(filt->id()));
9,239✔
171
        filt->to_statepoint(filter_group);
9,239✔
172
        close_group(filter_group);
9,239✔
173
      }
174
    }
3,556✔
175
    close_group(filters_group);
5,649✔
176

177
    // Write information for tallies
178
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
5,649✔
179
    if (!model::tallies.empty()) {
5,649✔
180
      // Write tally IDs
181
      vector<int32_t> tally_ids;
4,029✔
182
      tally_ids.reserve(model::tallies.size());
4,029✔
183
      for (const auto& tally : model::tallies)
23,971✔
184
        tally_ids.push_back(tally->id_);
19,942✔
185
      write_attribute(tallies_group, "ids", tally_ids);
4,029✔
186

187
      // Write all tally information except results
188
      for (const auto& tally : model::tallies) {
23,971✔
189
        hid_t tally_group =
190
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
19,942✔
191

192
        write_dataset(tally_group, "name", tally->name_);
19,942✔
193

194
        if (tally->writable_) {
19,942✔
195
          write_attribute(tally_group, "internal", 0);
19,095✔
196
        } else {
197
          write_attribute(tally_group, "internal", 1);
847✔
198
          close_group(tally_group);
847✔
199
          continue;
847✔
200
        }
201

202
        if (tally->multiply_density()) {
19,095✔
203
          write_attribute(tally_group, "multiply_density", 1);
19,062✔
204
        } else {
205
          write_attribute(tally_group, "multiply_density", 0);
33✔
206
        }
207

208
        if (tally->estimator_ == TallyEstimator::ANALOG) {
19,095✔
209
          write_dataset(tally_group, "estimator", "analog");
6,809✔
210
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
12,286✔
211
          write_dataset(tally_group, "estimator", "tracklength");
11,642✔
212
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
644✔
213
          write_dataset(tally_group, "estimator", "collision");
644✔
214
        }
215

216
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
19,095✔
217

218
        // Write the ID of each filter attached to this tally
219
        write_dataset(tally_group, "n_filters", tally->filters().size());
19,095✔
220
        if (!tally->filters().empty()) {
19,095✔
221
          vector<int32_t> filter_ids;
18,094✔
222
          filter_ids.reserve(tally->filters().size());
18,094✔
223
          for (auto i_filt : tally->filters())
54,569✔
224
            filter_ids.push_back(model::tally_filters[i_filt]->id());
36,475✔
225
          write_dataset(tally_group, "filters", filter_ids);
18,094✔
226
        }
18,094✔
227

228
        // Write the nuclides this tally scores
229
        vector<std::string> nuclides;
19,095✔
230
        for (auto i_nuclide : tally->nuclides_) {
45,923✔
231
          if (i_nuclide == -1) {
26,828✔
232
            nuclides.push_back("total");
16,499✔
233
          } else {
234
            if (settings::run_CE) {
10,329✔
235
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
10,219✔
236
            } else {
237
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
110✔
238
            }
239
          }
240
        }
241
        write_dataset(tally_group, "nuclides", nuclides);
19,095✔
242

243
        if (tally->deriv_ != C_NONE)
19,095✔
244
          write_dataset(
220✔
245
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
220✔
246

247
        // Write the tally score bins
248
        vector<std::string> scores;
19,095✔
249
        for (auto sc : tally->scores_)
45,978✔
250
          scores.push_back(reaction_name(sc));
26,883✔
251
        write_dataset(tally_group, "n_score_bins", scores.size());
19,095✔
252
        write_dataset(tally_group, "score_bins", scores);
19,095✔
253

254
        close_group(tally_group);
19,095✔
255
      }
19,095✔
256
    }
4,029✔
257

258
    if (settings::reduce_tallies) {
5,649✔
259
      // Write global tallies
260
      write_dataset(file_id, "global_tallies", simulation::global_tallies);
5,649✔
261

262
      // Write tallies
263
      if (model::active_tallies.size() > 0) {
5,649✔
264
        // Indicate that tallies are on
265
        write_attribute(file_id, "tallies_present", 1);
3,864✔
266

267
        // Write all tally results
268
        for (const auto& tally : model::tallies) {
23,641✔
269
          if (!tally->writable_)
19,777✔
270
            continue;
682✔
271
          // Write sum and sum_sq for each bin
272
          std::string name = "tally " + std::to_string(tally->id_);
19,095✔
273
          hid_t tally_group = open_group(tallies_group, name.c_str());
19,095✔
274
          auto& results = tally->results_;
19,095✔
275
          write_tally_results(tally_group, results.shape()[0],
19,095✔
276
            results.shape()[1], results.data());
19,095✔
277
          close_group(tally_group);
19,095✔
278
        }
19,095✔
279
      } else {
280
        // Indicate tallies are off
281
        write_attribute(file_id, "tallies_present", 0);
1,785✔
282
      }
283
    }
284

285
    close_group(tallies_group);
5,649✔
286
  }
287

288
  // Check for the no-tally-reduction method
289
  if (!settings::reduce_tallies) {
6,689✔
290
    // If using the no-tally-reduction method, we need to collect tally
291
    // results before writing them to the state point file.
UNCOV
292
    write_tally_results_nr(file_id);
×
293

294
  } else if (mpi::master) {
6,689✔
295
    // Write number of global realizations
296
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
5,649✔
297
  }
298

299
  if (mpi::master) {
6,689✔
300
    // Write out the runtime metrics.
301
    using namespace simulation;
302
    hid_t runtime_group = create_group(file_id, "runtime");
5,649✔
303
    write_dataset(
5,649✔
304
      runtime_group, "total initialization", time_initialize.elapsed());
305
    write_dataset(
5,649✔
306
      runtime_group, "reading cross sections", time_read_xs.elapsed());
307
    write_dataset(runtime_group, "simulation",
5,649✔
308
      time_inactive.elapsed() + time_active.elapsed());
5,649✔
309
    write_dataset(runtime_group, "transport", time_transport.elapsed());
5,649✔
310
    if (settings::run_mode == RunMode::EIGENVALUE) {
5,649✔
311
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
3,495✔
312
    }
313
    write_dataset(runtime_group, "active batches", time_active.elapsed());
5,649✔
314
    if (settings::run_mode == RunMode::EIGENVALUE) {
5,649✔
315
      write_dataset(
3,495✔
316
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
317
      write_dataset(
3,495✔
318
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
319
      write_dataset(
3,495✔
320
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
321
    }
322
    write_dataset(
5,649✔
323
      runtime_group, "accumulating tallies", time_tallies.elapsed());
324
    write_dataset(runtime_group, "total", time_total.elapsed());
5,649✔
325
    write_dataset(
5,649✔
326
      runtime_group, "writing statepoints", time_statepoint.elapsed());
327
    // Write out number of threads used
328
#ifdef _OPENMP
329
    write_dataset(runtime_group, "threads", omp_get_max_threads());
3,094✔
330
#else
331
    write_dataset(runtime_group, "threads",
2,555✔
332
      1); // Default to 1 thread if OpenMP is not available
333
#endif
334

335
    close_group(runtime_group);
5,649✔
336

337
    file_close(file_id);
5,649✔
338
  }
339

340
#ifdef PHDF5
341
  bool parallel = true;
3,633✔
342
#else
343
  bool parallel = false;
3,056✔
344
#endif
345

346
  // Write the source bank if desired
347
  if (write_source_) {
6,689✔
348
    if (mpi::master || parallel)
3,473✔
349
      file_id = file_open(filename_, 'a', true);
3,473✔
350
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
3,473✔
351
    if (mpi::master || parallel)
3,473✔
352
      file_close(file_id);
3,473✔
353
  }
354

355
#if defined(LIBMESH) || defined(DAGMC)
356
  // write unstructured mesh tally files
357
  write_unstructured_mesh_results();
2,008✔
358
#endif
359

360
  simulation::time_statepoint.stop();
6,689✔
361

362
  return 0;
6,689✔
363
}
6,689✔
364

365
void restart_set_keff()
65✔
366
{
367
  if (simulation::restart_batch > settings::n_inactive) {
65✔
368
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
309✔
369
      simulation::k_sum[0] += simulation::k_generation[i];
244✔
370
      simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
244✔
371
    }
372
    int n = settings::gen_per_batch * simulation::n_realizations;
65✔
373
    simulation::keff = simulation::k_sum[0] / n;
65✔
374
  } else {
UNCOV
375
    simulation::keff = simulation::k_generation.back();
×
376
  }
377
}
65✔
378

379
void load_state_point()
65✔
380
{
381
  write_message(
65✔
382
    fmt::format("Loading state point {}...", settings::path_statepoint_c), 5);
118✔
383
  openmc_statepoint_load(settings::path_statepoint.c_str());
65✔
384
}
65✔
385

386
void statepoint_version_check(hid_t file_id)
65✔
387
{
388
  // Read revision number for state point file and make sure it matches with
389
  // current version
390
  array<int, 2> version_array;
391
  read_attribute(file_id, "version", version_array);
65✔
392
  if (version_array != VERSION_STATEPOINT) {
65✔
UNCOV
393
    fatal_error(
×
394
      "State point version does not match current version in OpenMC.");
395
  }
396
}
65✔
397

398
extern "C" int openmc_statepoint_load(const char* filename)
65✔
399
{
400
  // Open file for reading
401
  hid_t file_id = file_open(filename, 'r', true);
65✔
402

403
  // Read filetype
404
  std::string word;
65✔
405
  read_attribute(file_id, "filetype", word);
65✔
406
  if (word != "statepoint") {
65✔
UNCOV
407
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
408
  }
409

410
  statepoint_version_check(file_id);
65✔
411

412
  // Read and overwrite random number seed
413
  int64_t seed;
414
  read_dataset(file_id, "seed", seed);
65✔
415
  openmc_set_seed(seed);
65✔
416

417
  // Read and overwrite random number stride
418
  uint64_t stride;
419
  read_dataset(file_id, "stride", stride);
65✔
420
  openmc_set_stride(stride);
65✔
421

422
  // It is not impossible for a state point to be generated from a CE run but
423
  // to be loaded in to an MG run (or vice versa), check to prevent that.
424
  read_dataset(file_id, "energy_mode", word);
65✔
425
  if (word == "multi-group" && settings::run_CE) {
65✔
UNCOV
426
    fatal_error("State point file is from multigroup run but current run is "
×
427
                "continous energy.");
428
  } else if (word == "continuous-energy" && !settings::run_CE) {
65✔
UNCOV
429
    fatal_error("State point file is from continuous-energy run but current "
×
430
                "run is multigroup!");
431
  }
432

433
  // Read and overwrite run information except number of batches
434
  read_dataset(file_id, "run_mode", word);
65✔
435
  if (word == "fixed source") {
65✔
UNCOV
436
    settings::run_mode = RunMode::FIXED_SOURCE;
×
437
  } else if (word == "eigenvalue") {
65✔
438
    settings::run_mode = RunMode::EIGENVALUE;
65✔
439
  }
440
  read_attribute(file_id, "photon_transport", settings::photon_transport);
65✔
441
  read_dataset(file_id, "n_particles", settings::n_particles);
65✔
442
  int temp;
443
  read_dataset(file_id, "n_batches", temp);
65✔
444

445
  // Take maximum of statepoint n_batches and input n_batches
446
  settings::n_batches = std::max(settings::n_batches, temp);
65✔
447

448
  // Read batch number to restart at
449
  read_dataset(file_id, "current_batch", simulation::restart_batch);
65✔
450

451
  if (simulation::restart_batch >= settings::n_max_batches) {
65✔
452
    warning(fmt::format(
11✔
453
      "The number of batches specified for simulation ({}) is smaller "
454
      "than or equal to the number of batches in the restart statepoint file "
455
      "({})",
456
      settings::n_max_batches, simulation::restart_batch));
457
  }
458

459
  // Logical flag for source present in statepoint file
460
  bool source_present;
461
  read_attribute(file_id, "source_present", source_present);
65✔
462

463
  // Read information specific to eigenvalue run
464
  if (settings::run_mode == RunMode::EIGENVALUE) {
65✔
465
    read_dataset(file_id, "n_inactive", temp);
65✔
466
    read_eigenvalue_hdf5(file_id);
65✔
467

468
    // Take maximum of statepoint n_inactive and input n_inactive
469
    settings::n_inactive = std::max(settings::n_inactive, temp);
65✔
470

471
    // Check to make sure source bank is present
472
    if (settings::path_sourcepoint == settings::path_statepoint &&
130✔
473
        !source_present) {
65✔
UNCOV
474
      fatal_error("Source bank must be contained in statepoint restart file");
×
475
    }
476
  }
477

478
  // Read number of realizations for global tallies
479
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
65✔
480

481
  // Set k_sum, keff, and current_batch based on whether restart file is part
482
  // of active cycle or inactive cycle
483
  if (settings::run_mode == RunMode::EIGENVALUE) {
65✔
484
    restart_set_keff();
65✔
485
  }
486

487
  // Set current batch number
488
  simulation::current_batch = simulation::restart_batch;
65✔
489

490
  // Read tallies to master. If we are using Parallel HDF5, all processes
491
  // need to be included in the HDF5 calls.
492
#ifdef PHDF5
493
  if (true) {
494
#else
495
  if (mpi::master) {
30✔
496
#endif
497
    // Read global tally data
498
    read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
65✔
499
      false, simulation::global_tallies.data());
65✔
500

501
    // Check if tally results are present
502
    bool present;
503
    read_attribute(file_id, "tallies_present", present);
65✔
504

505
    // Read in sum and sum squared
506
    if (present) {
65✔
507
      hid_t tallies_group = open_group(file_id, "tallies");
65✔
508

509
      for (auto& tally : model::tallies) {
223✔
510
        // Read sum, sum_sq, and N for each bin
511
        std::string name = "tally " + std::to_string(tally->id_);
158✔
512
        hid_t tally_group = open_group(tallies_group, name.c_str());
158✔
513

514
        int internal = 0;
158✔
515
        if (attribute_exists(tally_group, "internal")) {
158✔
516
          read_attribute(tally_group, "internal", internal);
158✔
517
        }
518
        if (internal) {
158✔
UNCOV
519
          tally->writable_ = false;
×
520
        } else {
521
          auto& results = tally->results_;
158✔
522
          read_tally_results(tally_group, results.shape()[0],
316✔
523
            results.shape()[1], results.data());
158✔
524
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
158✔
525
          close_group(tally_group);
158✔
526
        }
527
      }
158✔
528
      close_group(tallies_group);
65✔
529
    }
530
  }
531

532
  // Read source if in eigenvalue mode
533
  if (settings::run_mode == RunMode::EIGENVALUE) {
65✔
534

535
    // Check if source was written out separately
536
    if (!source_present) {
65✔
537

538
      // Close statepoint file
UNCOV
539
      file_close(file_id);
×
540

541
      // Write message
UNCOV
542
      write_message(
×
UNCOV
543
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
544

545
      // Open source file
UNCOV
546
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
547
    }
548

549
    // Read source
550
    read_source_bank(file_id, simulation::source_bank, true);
65✔
551
  }
552

553
  // Close file
554
  file_close(file_id);
65✔
555

556
  return 0;
65✔
557
}
65✔
558

559
hid_t h5banktype()
4,429✔
560
{
561
  // Create compound type for position
562
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
4,429✔
563
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
4,429✔
564
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
4,429✔
565
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
4,429✔
566

567
  // Create bank datatype
568
  //
569
  // If you make changes to the compound datatype here, make sure you update:
570
  // - openmc/source.py
571
  // - openmc/statepoint.py
572
  // - docs/source/io_formats/statepoint.rst
573
  // - docs/source/io_formats/source.rst
574
  hid_t banktype = H5Tcreate(H5T_COMPOUND, sizeof(struct SourceSite));
4,429✔
575
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
4,429✔
576
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
4,429✔
577
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
4,429✔
578
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
4,429✔
579
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
4,429✔
580
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
4,429✔
581
    H5T_NATIVE_INT);
4,429✔
582
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
4,429✔
583
  H5Tinsert(
4,429✔
584
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
4,429✔
585

586
  H5Tclose(postype);
4,429✔
587
  return banktype;
4,429✔
588
}
589

590
void write_source_point(std::string filename, span<SourceSite> source_bank,
833✔
591
  const vector<int64_t>& bank_index, bool use_mcpl)
592
{
593
  std::string ext = use_mcpl ? "mcpl" : "h5";
833✔
594
  write_message("Creating source file {}.{} with {} particles ...", filename,
833✔
595
    ext, source_bank.size(), 5);
833✔
596

597
  // Dispatch to appropriate function based on file type
598
  if (use_mcpl) {
833✔
599
    filename.append(".mcpl");
16✔
600
    write_mcpl_source_point(filename.c_str(), source_bank, bank_index);
16✔
601
  } else {
602
    filename.append(".h5");
817✔
603
    write_h5_source_point(filename.c_str(), source_bank, bank_index);
817✔
604
  }
605
}
833✔
606

607
void write_h5_source_point(const char* filename, span<SourceSite> source_bank,
817✔
608
  const vector<int64_t>& bank_index)
609
{
610
  // When using parallel HDF5, the file is written to collectively by all
611
  // processes. With MPI-only, the file is opened and written by the master
612
  // (note that the call to write_source_bank is by all processes since slave
613
  // processes need to send source bank data to the master.
614
#ifdef PHDF5
615
  bool parallel = true;
422✔
616
#else
617
  bool parallel = false;
395✔
618
#endif
619

620
  if (!filename)
817✔
UNCOV
621
    fatal_error("write_source_point filename needs a nonempty name.");
×
622

623
  std::string filename_(filename);
817✔
624
  const auto extension = get_file_extension(filename_);
817✔
625
  if (extension != "h5") {
817✔
UNCOV
626
    warning("write_source_point was passed a file extension differing "
×
627
            "from .h5, but an hdf5 file will be written.");
628
  }
629

630
  hid_t file_id;
631
  if (mpi::master || parallel) {
817✔
632
    file_id = file_open(filename_.c_str(), 'w', true);
817✔
633
    write_attribute(file_id, "filetype", "source");
817✔
634
  }
635

636
  // Get pointer to source bank and write to file
637
  write_source_bank(file_id, source_bank, bank_index);
817✔
638

639
  if (mpi::master || parallel)
817✔
640
    file_close(file_id);
817✔
641
}
817✔
642

643
void write_source_bank(hid_t group_id, span<SourceSite> source_bank,
4,290✔
644
  const vector<int64_t>& bank_index)
645
{
646
  hid_t banktype = h5banktype();
4,290✔
647

648
  // Set total and individual process dataspace sizes for source bank
649
  int64_t dims_size = bank_index.back();
4,290✔
650
  int64_t count_size = bank_index[mpi::rank + 1] - bank_index[mpi::rank];
4,290✔
651

652
#ifdef PHDF5
653
  // Set size of total dataspace for all procs and rank
654
  hsize_t dims[] {static_cast<hsize_t>(dims_size)};
2,371✔
655
  hid_t dspace = H5Screate_simple(1, dims, nullptr);
2,371✔
656
  hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace, H5P_DEFAULT,
2,371✔
657
    H5P_DEFAULT, H5P_DEFAULT);
658

659
  // Create another data space but for each proc individually
660
  hsize_t count[] {static_cast<hsize_t>(count_size)};
2,371✔
661
  hid_t memspace = H5Screate_simple(1, count, nullptr);
2,371✔
662

663
  // Select hyperslab for this dataspace
664
  hsize_t start[] {static_cast<hsize_t>(bank_index[mpi::rank])};
2,371✔
665
  H5Sselect_hyperslab(dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
2,371✔
666

667
  // Set up the property list for parallel writing
668
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
2,371✔
669
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
2,371✔
670

671
  // Write data to file in parallel
672
  H5Dwrite(dset, banktype, memspace, dspace, plist, source_bank.data());
2,371✔
673

674
  // Free resources
675
  H5Sclose(dspace);
2,371✔
676
  H5Sclose(memspace);
2,371✔
677
  H5Dclose(dset);
2,371✔
678
  H5Pclose(plist);
2,371✔
679

680
#else
681

682
  if (mpi::master) {
1,919✔
683
    // Create dataset big enough to hold all source sites
684
    hsize_t dims[] {static_cast<hsize_t>(dims_size)};
1,919✔
685
    hid_t dspace = H5Screate_simple(1, dims, nullptr);
1,919✔
686
    hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace,
1,919✔
687
      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
688

689
    // Save source bank sites since the array is overwritten below
690
#ifdef OPENMC_MPI
691
    vector<SourceSite> temp_source {source_bank.begin(), source_bank.end()};
692
#endif
693

694
    for (int i = 0; i < mpi::n_procs; ++i) {
3,838✔
695
      // Create memory space
696
      hsize_t count[] {static_cast<hsize_t>(bank_index[i + 1] - bank_index[i])};
1,919✔
697
      hid_t memspace = H5Screate_simple(1, count, nullptr);
1,919✔
698

699
#ifdef OPENMC_MPI
700
      // Receive source sites from other processes
701
      if (i > 0)
702
        MPI_Recv(source_bank.data(), count[0], mpi::source_site, i, i,
703
          mpi::intracomm, MPI_STATUS_IGNORE);
704
#endif
705

706
      // Select hyperslab for this dataspace
707
      dspace = H5Dget_space(dset);
1,919✔
708
      hsize_t start[] {static_cast<hsize_t>(bank_index[i])};
1,919✔
709
      H5Sselect_hyperslab(
1,919✔
710
        dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
711

712
      // Write data to hyperslab
713
      H5Dwrite(
1,919✔
714
        dset, banktype, memspace, dspace, H5P_DEFAULT, source_bank.data());
1,919✔
715

716
      H5Sclose(memspace);
1,919✔
717
      H5Sclose(dspace);
1,919✔
718
    }
719

720
    // Close all ids
721
    H5Dclose(dset);
1,919✔
722

723
#ifdef OPENMC_MPI
724
    // Restore state of source bank
725
    std::copy(temp_source.begin(), temp_source.end(), source_bank.begin());
726
#endif
727
  } else {
728
#ifdef OPENMC_MPI
729
    MPI_Send(source_bank.data(), count_size, mpi::source_site, 0, mpi::rank,
730
      mpi::intracomm);
731
#endif
732
  }
733
#endif
734

735
  H5Tclose(banktype);
4,290✔
736
}
4,290✔
737

738
// Determine member names of a compound HDF5 datatype
739
std::string dtype_member_names(hid_t dtype_id)
278✔
740
{
741
  int nmembers = H5Tget_nmembers(dtype_id);
278✔
742
  std::string names;
278✔
743
  for (int i = 0; i < nmembers; i++) {
2,457✔
744
    char* name = H5Tget_member_name(dtype_id, i);
2,179✔
745
    names = names.append(name);
2,179✔
746
    H5free_memory(name);
2,179✔
747
    if (i < nmembers - 1)
2,179✔
748
      names += ", ";
1,901✔
749
  }
750
  return names;
278✔
UNCOV
751
}
×
752

753
void read_source_bank(
139✔
754
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
755
{
756
  hid_t banktype = h5banktype();
139✔
757

758
  // Open the dataset
759
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
139✔
760

761
  // Make sure number of members matches
762
  hid_t dtype = H5Dget_type(dset);
139✔
763
  auto file_member_names = dtype_member_names(dtype);
139✔
764
  auto bank_member_names = dtype_member_names(banktype);
139✔
765
  if (file_member_names != bank_member_names) {
139✔
766
    fatal_error(fmt::format(
9✔
767
      "Source site attributes in file do not match what is "
768
      "expected for this version of OpenMC. File attributes = ({}). Expected "
769
      "attributes = ({})",
770
      file_member_names, bank_member_names));
771
  }
772

773
  hid_t dspace = H5Dget_space(dset);
130✔
774
  hsize_t n_sites;
775
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
130✔
776

777
  // Make sure vector is big enough in case where we're reading entire source on
778
  // each process
779
  if (!distribute)
130✔
780
    sites.resize(n_sites);
65✔
781

782
  hid_t memspace;
783
  if (distribute) {
130✔
784
    if (simulation::work_index[mpi::n_procs] > n_sites) {
65✔
UNCOV
785
      fatal_error("Number of source sites in source file is less "
×
786
                  "than number of source particles per generation.");
787
    }
788

789
    // Create another data space but for each proc individually
790
    hsize_t n_sites_local = simulation::work_per_rank;
65✔
791
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
65✔
792

793
    // Select hyperslab for each process
794
    hsize_t offset = simulation::work_index[mpi::rank];
65✔
795
    H5Sselect_hyperslab(
65✔
796
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
797
  } else {
798
    memspace = H5S_ALL;
65✔
799
  }
800

801
#ifdef PHDF5
802
  // Read data in parallel
803
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
70✔
804
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
70✔
805
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
70✔
806
  H5Pclose(plist);
70✔
807
#else
808
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
60✔
809
#endif
810

811
  // Close all ids
812
  H5Sclose(dspace);
130✔
813
  if (distribute)
130✔
814
    H5Sclose(memspace);
65✔
815
  H5Dclose(dset);
130✔
816
  H5Tclose(banktype);
130✔
817
}
130✔
818

819
void write_unstructured_mesh_results()
2,008✔
820
{
821

822
  for (auto& tally : model::tallies) {
10,234✔
823

824
    vector<std::string> tally_scores;
8,226✔
825
    for (auto filter_idx : tally->filters()) {
24,206✔
826
      auto& filter = model::tally_filters[filter_idx];
15,980✔
827
      if (filter->type() != FilterType::MESH)
15,980✔
828
        continue;
15,965✔
829

830
      // check if the filter uses an unstructured mesh
831
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
1,898✔
832
      auto mesh_idx = mesh_filter->mesh();
1,898✔
833
      auto umesh =
834
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
1,898✔
835

836
      if (!umesh)
1,898✔
837
        continue;
1,864✔
838

839
      if (!umesh->output_)
34✔
UNCOV
840
        continue;
×
841

842
      if (umesh->library() == "moab") {
34✔
843
        if (mpi::master)
19✔
844
          warning(fmt::format(
9✔
845
            "Output for a MOAB mesh (mesh {}) was "
846
            "requested but will not be written. Please use the Python "
847
            "API to generated the desired VTK tetrahedral mesh.",
848
            umesh->id_));
9✔
849
        continue;
19✔
850
      }
851

852
      // if this tally has more than one filter, print
853
      // warning and skip writing the mesh
854
      if (tally->filters().size() > 1) {
15✔
UNCOV
855
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
856
                            "{}. More than one filter is present on the tally.",
UNCOV
857
          tally->id_));
×
UNCOV
858
        break;
×
859
      }
860

861
      int n_realizations = tally->n_realizations_;
15✔
862

863
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
864
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
865
          // combine the score and nuclide into a name for the value
866
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30✔
867
            tally->nuclide_name(nuc_idx));
30✔
868
          // add this score to the mesh
869
          // (this is in a separate loop because all variables need to be added
870
          //  to libMesh's equation system before any are initialized, which
871
          //  happens in set_score_data)
872
          umesh->add_score(score_str);
15✔
873
        }
15✔
874
      }
875

876
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
877
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
878
          // combine the score and nuclide into a name for the value
879
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30✔
880
            tally->nuclide_name(nuc_idx));
30✔
881

882
          // index for this nuclide and score
883
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
15✔
884

885
          // construct result vectors
886
          vector<double> mean_vec(umesh->n_bins()),
15✔
887
            std_dev_vec(umesh->n_bins());
15✔
888
          for (int j = 0; j < tally->results_.shape()[0]; j++) {
146,799✔
889
            // get the volume for this bin
890
            double volume = umesh->volume(j);
146,784✔
891
            // compute the mean
892
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
146,784✔
893
                          n_realizations;
146,784✔
894
            mean_vec.at(j) = mean / volume;
146,784✔
895

896
            // compute the standard deviation
897
            double sum_sq =
898
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
146,784✔
899
            double std_dev {0.0};
146,784✔
900
            if (n_realizations > 1) {
146,784✔
901
              std_dev = sum_sq / n_realizations - mean * mean;
146,784✔
902
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
146,784✔
903
            }
904
            std_dev_vec[j] = std_dev / volume;
146,784✔
905
          }
906
#ifdef OPENMC_MPI
907
          MPI_Bcast(
10✔
908
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
10✔
909
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
10✔
910
            mpi::intracomm);
911
#endif
912
          // set the data for this score
913
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
15✔
914
        }
15✔
915
      }
916

917
      // Generate a file name based on the tally id
918
      // and the current batch number
919
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
15✔
920
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
15✔
UNCOV
921
        simulation::current_batch, batch_width);
×
922

923
      // Write the unstructured mesh and data to file
924
      umesh->write(filename);
15✔
925

926
      // remove score data added for this mesh write
927
      umesh->remove_scores();
15✔
928
    }
15✔
929
  }
8,226✔
930
}
2,008✔
931

UNCOV
932
void write_tally_results_nr(hid_t file_id)
×
933
{
934
  // ==========================================================================
935
  // COLLECT AND WRITE GLOBAL TALLIES
936

937
  hid_t tallies_group;
UNCOV
938
  if (mpi::master) {
×
939
    // Write number of realizations
UNCOV
940
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
×
941

UNCOV
942
    tallies_group = open_group(file_id, "tallies");
×
943
  }
944

945
  // Get global tallies
UNCOV
946
  auto& gt = simulation::global_tallies;
×
947

948
#ifdef OPENMC_MPI
949
  // Reduce global tallies
950
  xt::xtensor<double, 2> gt_reduced = xt::empty_like(gt);
951
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
952
    mpi::intracomm);
953

954
  // Transfer values to value on master
955
  if (mpi::master) {
956
    if (simulation::current_batch == settings::n_max_batches ||
957
        simulation::satisfy_triggers) {
958
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
959
    }
960
  }
961
#endif
962

963
  // Write out global tallies sum and sum_sq
964
  if (mpi::master) {
×
UNCOV
965
    write_dataset(file_id, "global_tallies", gt);
×
966
  }
967

968
  for (const auto& t : model::tallies) {
×
969
    // Skip any tallies that are not active
UNCOV
970
    if (!t->active_)
×
UNCOV
971
      continue;
×
UNCOV
972
    if (!t->writable_)
×
973
      continue;
×
974

975
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
×
UNCOV
976
      write_attribute(file_id, "tallies_present", 1);
×
977
    }
978

979
    // Get view of accumulated tally values
UNCOV
980
    auto values_view = xt::view(t->results_, xt::all(), xt::all(),
×
UNCOV
981
      xt::range(static_cast<int>(TallyResult::SUM),
×
982
        static_cast<int>(TallyResult::SUM_SQ) + 1));
983

984
    // Make copy of tally values in contiguous array
UNCOV
985
    xt::xtensor<double, 3> values = values_view;
×
986

UNCOV
987
    if (mpi::master) {
×
988
      // Open group for tally
989
      std::string groupname {"tally " + std::to_string(t->id_)};
×
UNCOV
990
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
×
991

992
      // The MPI_IN_PLACE specifier allows the master to copy values into
993
      // a receive buffer without having a temporary variable
994
#ifdef OPENMC_MPI
995
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
996
        MPI_SUM, 0, mpi::intracomm);
997
#endif
998

999
      // At the end of the simulation, store the results back in the
1000
      // regular TallyResults array
UNCOV
1001
      if (simulation::current_batch == settings::n_max_batches ||
×
1002
          simulation::satisfy_triggers) {
1003
        values_view = values;
×
1004
      }
1005

1006
      // Put in temporary tally result
UNCOV
1007
      xt::xtensor<double, 3> results_copy = xt::zeros_like(t->results_);
×
UNCOV
1008
      auto copy_view = xt::view(results_copy, xt::all(), xt::all(),
×
UNCOV
1009
        xt::range(static_cast<int>(TallyResult::SUM),
×
1010
          static_cast<int>(TallyResult::SUM_SQ) + 1));
UNCOV
1011
      copy_view = values;
×
1012

1013
      // Write reduced tally results to file
UNCOV
1014
      auto shape = results_copy.shape();
×
1015
      write_tally_results(tally_group, shape[0], shape[1], results_copy.data());
×
1016

UNCOV
1017
      close_group(tally_group);
×
1018
    } else {
×
1019
      // Receive buffer not significant at other processors
1020
#ifdef OPENMC_MPI
1021
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
1022
        mpi::intracomm);
1023
#endif
1024
    }
1025
  }
1026

UNCOV
1027
  if (mpi::master) {
×
UNCOV
1028
    if (!object_exists(file_id, "tallies_present")) {
×
1029
      // Indicate that tallies are off
UNCOV
1030
      write_dataset(file_id, "tallies_present", 0);
×
1031
    }
1032

UNCOV
1033
    close_group(tallies_group);
×
1034
  }
1035
}
1036

1037
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc