• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 21979737004

13 Feb 2026 08:19AM UTC coverage: 81.451% (-0.4%) from 81.851%
21979737004

Pull #3801

github

web-flow
Merge a4595d510 into bcb939520
Pull Request #3801: avoid need to set particles and batches for dagmc models when calling convert_to_multigroup

16960 of 23616 branches covered (71.82%)

Branch coverage included in aggregate %.

0 of 1 new or added line in 1 file covered. (0.0%)

908 existing lines in 38 files now uncovered.

55950 of 65898 relevant lines covered (84.9%)

36199990.53 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.02
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "xtensor/xbuilder.hpp" // for empty_like
8
#include "xtensor/xview.hpp"
9
#include <fmt/core.h>
10

11
#include "openmc/bank.h"
12
#include "openmc/bank_io.h"
13
#include "openmc/capi.h"
14
#include "openmc/constants.h"
15
#include "openmc/eigenvalue.h"
16
#include "openmc/error.h"
17
#include "openmc/file_utils.h"
18
#include "openmc/hdf5_interface.h"
19
#include "openmc/mcpl_interface.h"
20
#include "openmc/mesh.h"
21
#include "openmc/message_passing.h"
22
#include "openmc/mgxs_interface.h"
23
#include "openmc/nuclide.h"
24
#include "openmc/output.h"
25
#include "openmc/particle_type.h"
26
#include "openmc/settings.h"
27
#include "openmc/simulation.h"
28
#include "openmc/tallies/derivative.h"
29
#include "openmc/tallies/filter.h"
30
#include "openmc/tallies/filter_mesh.h"
31
#include "openmc/tallies/tally.h"
32
#include "openmc/timer.h"
33
#include "openmc/vector.h"
34

35
namespace openmc {
36

37
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
5,115✔
38
{
39
  simulation::time_statepoint.start();
5,115✔
40

41
  // If a nullptr is passed in, we assume that the user
42
  // wants a default name for this, of the form like output/statepoint.20.h5
43
  std::string filename_;
5,115✔
44
  if (filename) {
5,115✔
45
    filename_ = filename;
507✔
46
  } else {
47
    // Determine width for zero padding
48
    int w = std::to_string(settings::n_max_batches).size();
4,608✔
49

50
    // Set filename for state point
51
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
8,414✔
52
      simulation::current_batch, w);
4,608✔
53
  }
54

55
  // If a file name was specified, ensure it has .h5 file extension
56
  const auto extension = get_file_extension(filename_);
5,115✔
57
  if (extension != "h5") {
5,115!
58
    warning("openmc_statepoint_write was passed a file extension differing "
×
59
            "from .h5, but an hdf5 file will be written.");
60
  }
61

62
  // Determine whether or not to write the source bank
63
  bool write_source_ = write_source ? *write_source : true;
5,115!
64

65
  // Write message
66
  write_message("Creating state point " + filename_ + "...", 5);
5,115✔
67

68
  hid_t file_id;
69
  if (mpi::master) {
5,115✔
70
    // Create statepoint file
71
    file_id = file_open(filename_, 'w');
4,399✔
72

73
    // Write file type
74
    write_attribute(file_id, "filetype", "statepoint");
4,399✔
75

76
    // Write revision number for state point file
77
    write_attribute(file_id, "version", VERSION_STATEPOINT);
4,399✔
78

79
    // Write OpenMC version
80
    write_attribute(file_id, "openmc_version", VERSION);
4,399✔
81
#ifdef GIT_SHA1
82
    write_attribute(file_id, "git_sha1", GIT_SHA1);
83
#endif
84

85
    // Write current date and time
86
    write_attribute(file_id, "date_and_time", time_stamp());
4,399✔
87

88
    // Write path to input
89
    write_attribute(file_id, "path", settings::path_input);
4,399✔
90

91
    // Write out random number seed
92
    write_dataset(file_id, "seed", openmc_get_seed());
4,399✔
93

94
    // Write out random number stride
95
    write_dataset(file_id, "stride", openmc_get_stride());
4,399✔
96

97
    // Write run information
98
    write_dataset(file_id, "energy_mode",
4,399✔
99
      settings::run_CE ? "continuous-energy" : "multi-group");
100
    switch (settings::run_mode) {
4,399!
101
    case RunMode::FIXED_SOURCE:
1,686✔
102
      write_dataset(file_id, "run_mode", "fixed source");
1,686✔
103
      break;
1,686✔
104
    case RunMode::EIGENVALUE:
2,713✔
105
      write_dataset(file_id, "run_mode", "eigenvalue");
2,713✔
106
      break;
2,713✔
107
    default:
×
108
      break;
×
109
    }
110
    write_attribute(file_id, "photon_transport", settings::photon_transport);
4,399✔
111
    write_dataset(file_id, "n_particles", settings::n_particles);
4,399✔
112
    write_dataset(file_id, "n_batches", settings::n_batches);
4,399✔
113

114
    // Write out current batch number
115
    write_dataset(file_id, "current_batch", simulation::current_batch);
4,399✔
116

117
    // Indicate whether source bank is stored in statepoint
118
    write_attribute(file_id, "source_present", write_source_);
4,399✔
119

120
    // Write out information for eigenvalue run
121
    if (settings::run_mode == RunMode::EIGENVALUE)
4,399✔
122
      write_eigenvalue_hdf5(file_id);
2,713✔
123

124
    hid_t tallies_group = create_group(file_id, "tallies");
4,399✔
125

126
    // Write meshes
127
    meshes_to_hdf5(tallies_group);
4,399✔
128

129
    // Write information for derivatives
130
    if (!model::tally_derivs.empty()) {
4,399✔
131
      hid_t derivs_group = create_group(tallies_group, "derivatives");
7✔
132
      for (const auto& deriv : model::tally_derivs) {
42✔
133
        hid_t deriv_group =
134
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
35✔
135
        write_dataset(deriv_group, "material", deriv.diff_material);
35✔
136
        if (deriv.variable == DerivativeVariable::DENSITY) {
35✔
137
          write_dataset(deriv_group, "independent variable", "density");
14✔
138
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
21✔
139
          write_dataset(deriv_group, "independent variable", "nuclide_density");
14✔
140
          write_dataset(
14✔
141
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
14✔
142
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
7!
143
          write_dataset(deriv_group, "independent variable", "temperature");
7✔
144
        } else {
145
          fatal_error("Independent variable for derivative " +
×
146
                      std::to_string(deriv.id) +
×
147
                      " not defined in state_point.cpp");
148
        }
149
        close_group(deriv_group);
35✔
150
      }
151
      close_group(derivs_group);
7✔
152
    }
153

154
    // Write information for filters
155
    hid_t filters_group = create_group(tallies_group, "filters");
4,399✔
156
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
4,399✔
157
    if (!model::tally_filters.empty()) {
4,399✔
158
      // Write filter IDs
159
      vector<int32_t> filter_ids;
2,732✔
160
      filter_ids.reserve(model::tally_filters.size());
2,732✔
161
      for (const auto& filt : model::tally_filters)
9,872✔
162
        filter_ids.push_back(filt->id());
7,140✔
163
      write_attribute(filters_group, "ids", filter_ids);
2,732✔
164

165
      // Write info for each filter
166
      for (const auto& filt : model::tally_filters) {
9,872✔
167
        hid_t filter_group =
168
          create_group(filters_group, "filter " + std::to_string(filt->id()));
7,140✔
169
        filt->to_statepoint(filter_group);
7,140✔
170
        close_group(filter_group);
7,140✔
171
      }
172
    }
2,732✔
173
    close_group(filters_group);
4,399✔
174

175
    // Write information for tallies
176
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
4,399✔
177
    if (!model::tallies.empty()) {
4,399✔
178
      // Write tally IDs
179
      vector<int32_t> tally_ids;
3,075✔
180
      tally_ids.reserve(model::tallies.size());
3,075✔
181
      for (const auto& tally : model::tallies)
17,075✔
182
        tally_ids.push_back(tally->id_);
14,000✔
183
      write_attribute(tallies_group, "ids", tally_ids);
3,075✔
184

185
      // Write all tally information except results
186
      for (const auto& tally : model::tallies) {
17,075✔
187
        hid_t tally_group =
188
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
14,000✔
189

190
        write_dataset(tally_group, "name", tally->name_);
14,000✔
191

192
        if (tally->writable_) {
14,000✔
193
          write_attribute(tally_group, "internal", 0);
13,338✔
194
        } else {
195
          write_attribute(tally_group, "internal", 1);
662✔
196
          close_group(tally_group);
662✔
197
          continue;
662✔
198
        }
199

200
        if (tally->multiply_density()) {
13,338✔
201
          write_attribute(tally_group, "multiply_density", 1);
13,303✔
202
        } else {
203
          write_attribute(tally_group, "multiply_density", 0);
35✔
204
        }
205

206
        if (tally->higher_moments()) {
13,338✔
207
          write_attribute(tally_group, "higher_moments", 1);
7✔
208
        } else {
209
          write_attribute(tally_group, "higher_moments", 0);
13,331✔
210
        }
211

212
        if (tally->estimator_ == TallyEstimator::ANALOG) {
13,338✔
213
          write_dataset(tally_group, "estimator", "analog");
5,033✔
214
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
8,305✔
215
          write_dataset(tally_group, "estimator", "tracklength");
7,763✔
216
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
542!
217
          write_dataset(tally_group, "estimator", "collision");
542✔
218
        }
219

220
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
13,338✔
221

222
        // Write the ID of each filter attached to this tally
223
        write_dataset(tally_group, "n_filters", tally->filters().size());
13,338✔
224
        if (!tally->filters().empty()) {
13,338✔
225
          vector<int32_t> filter_ids;
12,582✔
226
          filter_ids.reserve(tally->filters().size());
12,582✔
227
          for (auto i_filt : tally->filters())
38,261✔
228
            filter_ids.push_back(model::tally_filters[i_filt]->id());
25,679✔
229
          write_dataset(tally_group, "filters", filter_ids);
12,582✔
230
        }
12,582✔
231

232
        // Write the nuclides this tally scores
233
        vector<std::string> nuclides;
13,338✔
234
        for (auto i_nuclide : tally->nuclides_) {
31,618✔
235
          if (i_nuclide == -1) {
18,280✔
236
            nuclides.push_back("total");
11,665✔
237
          } else {
238
            if (settings::run_CE) {
6,615✔
239
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
6,545✔
240
            } else {
241
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
70✔
242
            }
243
          }
244
        }
245
        write_dataset(tally_group, "nuclides", nuclides);
13,338✔
246

247
        if (tally->deriv_ != C_NONE)
13,338✔
248
          write_dataset(
140✔
249
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
140✔
250

251
        // Write the tally score bins
252
        vector<std::string> scores;
13,338✔
253
        for (auto sc : tally->scores_)
32,920✔
254
          scores.push_back(reaction_name(sc));
19,582✔
255
        write_dataset(tally_group, "n_score_bins", scores.size());
13,338✔
256
        write_dataset(tally_group, "score_bins", scores);
13,338✔
257

258
        close_group(tally_group);
13,338✔
259
      }
13,338✔
260
    }
3,075✔
261

262
    if (settings::reduce_tallies) {
4,399✔
263
      // Write global tallies
264
      write_dataset(file_id, "global_tallies", simulation::global_tallies);
4,392✔
265

266
      // Write tallies
267
      if (model::active_tallies.size() > 0) {
4,392✔
268
        // Indicate that tallies are on
269
        write_attribute(file_id, "tallies_present", 1);
2,928✔
270

271
        // Write all tally results
272
        for (const auto& tally : model::tallies) {
16,781✔
273
          if (!tally->writable_)
13,853✔
274
            continue;
522✔
275

276
          // Write results for each bin
277
          std::string name = "tally " + std::to_string(tally->id_);
13,331✔
278
          hid_t tally_group = open_group(tallies_group, name.c_str());
13,331✔
279
          auto& results = tally->results_;
13,331✔
280
          write_tally_results(tally_group, results.shape()[0],
13,331✔
281
            results.shape()[1], results.shape()[2], results.data());
13,331✔
282
          close_group(tally_group);
13,331✔
283
        }
13,331✔
284
      } else {
285
        // Indicate tallies are off
286
        write_attribute(file_id, "tallies_present", 0);
1,464✔
287
      }
288
    }
289

290
    close_group(tallies_group);
4,399✔
291
  }
292

293
  // Check for the no-tally-reduction method
294
  if (!settings::reduce_tallies) {
5,115✔
295
    // If using the no-tally-reduction method, we need to collect tally
296
    // results before writing them to the state point file.
297
    write_tally_results_nr(file_id);
10✔
298

299
  } else if (mpi::master) {
5,105✔
300
    // Write number of global realizations
301
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
4,392✔
302
  }
303

304
  if (mpi::master) {
5,115✔
305
    // Write out the runtime metrics.
306
    using namespace simulation;
307
    hid_t runtime_group = create_group(file_id, "runtime");
4,399✔
308
    write_dataset(
4,399✔
309
      runtime_group, "total initialization", time_initialize.elapsed());
310
    write_dataset(
4,399✔
311
      runtime_group, "reading cross sections", time_read_xs.elapsed());
312
    write_dataset(runtime_group, "simulation",
4,399✔
313
      time_inactive.elapsed() + time_active.elapsed());
4,399✔
314
    write_dataset(runtime_group, "transport", time_transport.elapsed());
4,399✔
315
    if (settings::run_mode == RunMode::EIGENVALUE) {
4,399✔
316
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
2,713✔
317
    }
318
    write_dataset(runtime_group, "active batches", time_active.elapsed());
4,399✔
319
    if (settings::run_mode == RunMode::EIGENVALUE) {
4,399✔
320
      write_dataset(
2,713✔
321
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
322
      write_dataset(
2,713✔
323
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
324
      write_dataset(
2,713✔
325
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
326
    }
327
    write_dataset(
4,399✔
328
      runtime_group, "accumulating tallies", time_tallies.elapsed());
329
    write_dataset(runtime_group, "total", time_total.elapsed());
4,399✔
330
    write_dataset(
4,399✔
331
      runtime_group, "writing statepoints", time_statepoint.elapsed());
332
    close_group(runtime_group);
4,399✔
333

334
    file_close(file_id);
4,399✔
335
  }
336

337
#ifdef PHDF5
338
  bool parallel = true;
2,591✔
339
#else
340
  bool parallel = false;
2,524✔
341
#endif
342

343
  // Write the source bank if desired
344
  if (write_source_) {
5,115✔
345
    if (mpi::master || parallel)
2,536!
346
      file_id = file_open(filename_, 'a', true);
2,536✔
347
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
2,536✔
348
    if (mpi::master || parallel)
2,536!
349
      file_close(file_id);
2,536✔
350
  }
351

352
#if defined(OPENMC_LIBMESH_ENABLED) || defined(OPENMC_DAGMC_ENABLED)
353
  // write unstructured mesh tally files
354
  write_unstructured_mesh_results();
873✔
355
#endif
356

357
  simulation::time_statepoint.stop();
5,115✔
358

359
  return 0;
5,115✔
360
}
5,115✔
361

362
void restart_set_keff()
41✔
363
{
364
  if (simulation::restart_batch > settings::n_inactive) {
41!
365
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
195✔
366
      simulation::k_sum[0] += simulation::k_generation[i];
154✔
367
      simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
154✔
368
    }
369
    int n = settings::gen_per_batch * simulation::n_realizations;
41✔
370
    simulation::keff = simulation::k_sum[0] / n;
41✔
371
  } else {
372
    simulation::keff = simulation::k_generation.back();
×
373
  }
374
}
41✔
375

376
void load_state_point()
41✔
377
{
378
  write_message(
41✔
379
    fmt::format("Loading state point {}...", settings::path_statepoint_c), 5);
75✔
380
  openmc_statepoint_load(settings::path_statepoint.c_str());
41✔
381
}
41✔
382

383
void statepoint_version_check(hid_t file_id)
41✔
384
{
385
  // Read revision number for state point file and make sure it matches with
386
  // current version
387
  array<int, 2> version_array;
388
  read_attribute(file_id, "version", version_array);
41✔
389
  if (version_array != VERSION_STATEPOINT) {
41!
390
    fatal_error(
×
391
      "State point version does not match current version in OpenMC.");
392
  }
393
}
41✔
394

395
extern "C" int openmc_statepoint_load(const char* filename)
41✔
396
{
397
  // Open file for reading
398
  hid_t file_id = file_open(filename, 'r', true);
41✔
399

400
  // Read filetype
401
  std::string word;
41✔
402
  read_attribute(file_id, "filetype", word);
41✔
403
  if (word != "statepoint") {
41!
404
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
405
  }
406

407
  statepoint_version_check(file_id);
41✔
408

409
  // Read and overwrite random number seed
410
  int64_t seed;
411
  read_dataset(file_id, "seed", seed);
41✔
412
  openmc_set_seed(seed);
41✔
413

414
  // Read and overwrite random number stride
415
  uint64_t stride;
416
  read_dataset(file_id, "stride", stride);
41✔
417
  openmc_set_stride(stride);
41✔
418

419
  // It is not impossible for a state point to be generated from a CE run but
420
  // to be loaded in to an MG run (or vice versa), check to prevent that.
421
  read_dataset(file_id, "energy_mode", word);
41✔
422
  if (word == "multi-group" && settings::run_CE) {
41!
423
    fatal_error("State point file is from multigroup run but current run is "
×
424
                "continous energy.");
425
  } else if (word == "continuous-energy" && !settings::run_CE) {
41!
426
    fatal_error("State point file is from continuous-energy run but current "
×
427
                "run is multigroup!");
428
  }
429

430
  // Read and overwrite run information except number of batches
431
  read_dataset(file_id, "run_mode", word);
41✔
432
  if (word == "fixed source") {
41!
433
    settings::run_mode = RunMode::FIXED_SOURCE;
×
434
  } else if (word == "eigenvalue") {
41!
435
    settings::run_mode = RunMode::EIGENVALUE;
41✔
436
  }
437
  read_attribute(file_id, "photon_transport", settings::photon_transport);
41✔
438
  read_dataset(file_id, "n_particles", settings::n_particles);
41✔
439
  int temp;
440
  read_dataset(file_id, "n_batches", temp);
41✔
441

442
  // Take maximum of statepoint n_batches and input n_batches
443
  settings::n_batches = std::max(settings::n_batches, temp);
41✔
444

445
  // Read batch number to restart at
446
  read_dataset(file_id, "current_batch", simulation::restart_batch);
41✔
447

448
  if (settings::restart_run &&
41!
449
      simulation::restart_batch >= settings::n_max_batches) {
41✔
450
    warning(fmt::format(
7✔
451
      "The number of batches specified for simulation ({}) is smaller "
452
      "than or equal to the number of batches in the restart statepoint file "
453
      "({})",
454
      settings::n_max_batches, simulation::restart_batch));
455
  }
456

457
  // Logical flag for source present in statepoint file
458
  bool source_present;
459
  read_attribute(file_id, "source_present", source_present);
41✔
460

461
  // Read information specific to eigenvalue run
462
  if (settings::run_mode == RunMode::EIGENVALUE) {
41!
463
    read_dataset(file_id, "n_inactive", temp);
41✔
464
    read_eigenvalue_hdf5(file_id);
41✔
465

466
    // Take maximum of statepoint n_inactive and input n_inactive
467
    settings::n_inactive = std::max(settings::n_inactive, temp);
41✔
468

469
    // Check to make sure source bank is present
470
    if (settings::path_sourcepoint == settings::path_statepoint &&
82!
471
        !source_present) {
41!
472
      fatal_error("Source bank must be contained in statepoint restart file");
×
473
    }
474
  }
475

476
  // Read number of realizations for global tallies
477
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
41✔
478

479
  // Set k_sum, keff, and current_batch based on whether restart file is part
480
  // of active cycle or inactive cycle
481
  if (settings::run_mode == RunMode::EIGENVALUE) {
41!
482
    restart_set_keff();
41✔
483
  }
484

485
  // Set current batch number
486
  simulation::current_batch = simulation::restart_batch;
41✔
487

488
  // Read tallies to master. If we are using Parallel HDF5, all processes
489
  // need to be included in the HDF5 calls.
490
#ifdef PHDF5
491
  if (true) {
492
#else
493
  if (mpi::master) {
20!
494
#endif
495
    // Read global tally data
496
    read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
41✔
497
      false, simulation::global_tallies.data());
41✔
498

499
    // Check if tally results are present
500
    bool present;
501
    read_attribute(file_id, "tallies_present", present);
41✔
502

503
    // Read in sum and sum squared
504
    if (present) {
41!
505
      hid_t tallies_group = open_group(file_id, "tallies");
41✔
506

507
      for (auto& tally : model::tallies) {
141✔
508
        // Read sum, sum_sq, and N for each bin
509
        std::string name = "tally " + std::to_string(tally->id_);
100✔
510
        hid_t tally_group = open_group(tallies_group, name.c_str());
100✔
511

512
        int internal = 0;
100✔
513
        if (attribute_exists(tally_group, "internal")) {
100!
514
          read_attribute(tally_group, "internal", internal);
100✔
515
        }
516
        if (internal) {
100!
517
          tally->writable_ = false;
×
518
        } else {
519
          auto& results = tally->results_;
100✔
520
          read_tally_results(tally_group, results.shape()[0],
200✔
521
            results.shape()[1], results.shape()[2], results.data());
100✔
522

523
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
100✔
524
          close_group(tally_group);
100✔
525
        }
526
      }
100✔
527
      close_group(tallies_group);
41✔
528
    }
529
  }
530

531
  // Read source if in eigenvalue mode
532
  if (settings::run_mode == RunMode::EIGENVALUE) {
41!
533

534
    // Check if source was written out separately
535
    if (!source_present) {
41!
536

537
      // Close statepoint file
538
      file_close(file_id);
×
539

540
      // Write message
541
      write_message(
×
542
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
543

544
      // Open source file
545
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
546
    }
547

548
    // Read source
549
    read_source_bank(file_id, simulation::source_bank, true);
41✔
550
  }
551

552
  // Close file
553
  file_close(file_id);
41✔
554

555
  return 0;
41✔
556
}
41✔
557

558
hid_t h5banktype(bool memory)
6,691✔
559
{
560
  // Create compound type for position
561
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
6,691✔
562
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
6,691✔
563
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
6,691✔
564
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
6,691✔
565

566
  // Create bank datatype
567
  //
568
  // If you make changes to the compound datatype here, make sure you update:
569
  // - openmc/source.py
570
  // - openmc/statepoint.py
571
  // - docs/source/io_formats/statepoint.rst
572
  // - docs/source/io_formats/source.rst
573
  auto n = sizeof(SourceSite);
6,691✔
574
  if (!memory)
6,691✔
575
    n = 2 * sizeof(struct Position) + 3 * sizeof(double) + 3 * sizeof(int);
3,305✔
576
  hid_t banktype = H5Tcreate(H5T_COMPOUND, n);
6,691✔
577
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
6,691✔
578
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
6,691✔
579
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
6,691✔
580
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
6,691✔
581
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
6,691✔
582
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
6,691✔
583
    H5T_NATIVE_INT);
6,691✔
584
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
6,691✔
585
  H5Tinsert(
6,691✔
586
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
6,691✔
587

588
  H5Tclose(postype);
6,691✔
589
  return banktype;
6,691✔
590
}
591

592
void write_source_point(std::string filename, span<SourceSite> source_bank,
793✔
593
  const vector<int64_t>& bank_index, bool use_mcpl)
594
{
595
  std::string ext = use_mcpl ? "mcpl" : "h5";
793✔
596
  write_message("Creating source file {}.{} with {} particles ...", filename,
793✔
597
    ext, source_bank.size(), 5);
793✔
598

599
  // Dispatch to appropriate function based on file type
600
  if (use_mcpl) {
793✔
601
    filename.append(".mcpl");
24✔
602
    write_mcpl_source_point(filename.c_str(), source_bank, bank_index);
24✔
603
  } else {
604
    filename.append(".h5");
769✔
605
    write_h5_source_point(filename.c_str(), source_bank, bank_index);
769✔
606
  }
607
}
793✔
608

609
void write_h5_source_point(const char* filename, span<SourceSite> source_bank,
769✔
610
  const vector<int64_t>& bank_index)
611
{
612
  // When using parallel HDF5, the file is written to collectively by all
613
  // processes. With MPI-only, the file is opened and written by the master
614
  // (note that the call to write_source_bank is by all processes since slave
615
  // processes need to send source bank data to the master.
616
#ifdef PHDF5
617
  bool parallel = true;
345✔
618
#else
619
  bool parallel = false;
424✔
620
#endif
621

622
  if (!filename)
769!
623
    fatal_error("write_source_point filename needs a nonempty name.");
×
624

625
  std::string filename_(filename);
769✔
626
  const auto extension = get_file_extension(filename_);
769✔
627
  if (extension != "h5") {
769!
628
    warning("write_source_point was passed a file extension differing "
×
629
            "from .h5, but an hdf5 file will be written.");
630
  }
631

632
  hid_t file_id;
633
  if (mpi::master || parallel) {
769!
634
    file_id = file_open(filename_.c_str(), 'w', true);
769✔
635
    write_attribute(file_id, "filetype", "source");
769✔
636
    write_attribute(file_id, "version", VERSION_STATEPOINT);
769✔
637
  }
638

639
  // Get pointer to source bank and write to file
640
  write_source_bank(file_id, source_bank, bank_index);
769✔
641

642
  if (mpi::master || parallel)
769!
643
    file_close(file_id);
769✔
644
}
769✔
645

646
void write_source_bank(hid_t group_id, span<SourceSite> source_bank,
3,305✔
647
  const vector<int64_t>& bank_index)
648
{
649
  hid_t membanktype = h5banktype(true);
3,305✔
650
  hid_t filebanktype = h5banktype(false);
3,305✔
651

652
#ifdef OPENMC_MPI
653
  write_bank_dataset("source_bank", group_id, source_bank, bank_index,
1,669✔
654
    membanktype, filebanktype, mpi::source_site);
655
#else
656
  write_bank_dataset("source_bank", group_id, source_bank, bank_index,
1,636✔
657
    membanktype, filebanktype);
658
#endif
659

660
  H5Tclose(membanktype);
3,305✔
661
  H5Tclose(filebanktype);
3,305✔
662
}
3,305✔
663

664
// Determine member names of a compound HDF5 datatype
665
std::string dtype_member_names(hid_t dtype_id)
162✔
666
{
667
  int nmembers = H5Tget_nmembers(dtype_id);
162✔
668
  std::string names;
162✔
669
  for (int i = 0; i < nmembers; i++) {
1,428✔
670
    char* name = H5Tget_member_name(dtype_id, i);
1,266✔
671
    names = names.append(name);
1,266✔
672
    H5free_memory(name);
1,266✔
673
    if (i < nmembers - 1)
1,266✔
674
      names += ", ";
1,104✔
675
  }
676
  return names;
162✔
677
}
×
678

679
void read_source_bank(
81✔
680
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
681
{
682
  bool legacy_particle_codes = true;
81✔
683
  if (attribute_exists(group_id, "version")) {
81✔
684
    array<int, 2> version;
685
    read_attribute(group_id, "version", version);
75✔
686
    if (version[0] > VERSION_STATEPOINT[0] ||
150!
687
        (version[0] == VERSION_STATEPOINT[0] && version[1] >= 2)) {
75!
688
      legacy_particle_codes = false;
75✔
689
    }
690
  }
691

692
  hid_t banktype = h5banktype(true);
81✔
693

694
  // Open the dataset
695
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
81✔
696

697
  // Make sure number of members matches
698
  hid_t dtype = H5Dget_type(dset);
81✔
699
  auto file_member_names = dtype_member_names(dtype);
81✔
700
  auto bank_member_names = dtype_member_names(banktype);
81✔
701
  if (file_member_names != bank_member_names) {
81✔
702
    fatal_error(fmt::format(
6✔
703
      "Source site attributes in file do not match what is "
704
      "expected for this version of OpenMC. File attributes = ({}). Expected "
705
      "attributes = ({})",
706
      file_member_names, bank_member_names));
707
  }
708

709
  hid_t dspace = H5Dget_space(dset);
75✔
710
  hsize_t n_sites;
711
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
75✔
712

713
  // Make sure vector is big enough in case where we're reading entire source on
714
  // each process
715
  if (!distribute)
75✔
716
    sites.resize(n_sites);
34✔
717

718
  hid_t memspace;
719
  if (distribute) {
75✔
720
    if (simulation::work_index[mpi::n_procs] > n_sites) {
41!
721
      fatal_error("Number of source sites in source file is less "
×
722
                  "than number of source particles per generation.");
723
    }
724

725
    // Create another data space but for each proc individually
726
    hsize_t n_sites_local = simulation::work_per_rank;
41✔
727
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
41✔
728

729
    // Select hyperslab for each process
730
    hsize_t offset = simulation::work_index[mpi::rank];
41✔
731
    H5Sselect_hyperslab(
41✔
732
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
733
  } else {
734
    memspace = H5S_ALL;
34✔
735
  }
736

737
#ifdef PHDF5
738
  // Read data in parallel
739
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
39✔
740
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
39✔
741
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
39✔
742
  H5Pclose(plist);
39✔
743
#else
744
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
36✔
745
#endif
746

747
  // Close all ids
748
  H5Sclose(dspace);
75✔
749
  if (distribute)
75✔
750
    H5Sclose(memspace);
41✔
751
  H5Dclose(dset);
75✔
752
  H5Tclose(banktype);
75✔
753

754
  if (legacy_particle_codes) {
75!
755
    for (auto& site : sites) {
×
756
      site.particle = legacy_particle_index_to_type(site.particle.pdg_number());
×
757
    }
758
  }
759
}
75✔
760

761
void write_unstructured_mesh_results()
873✔
762
{
763

764
  for (auto& tally : model::tallies) {
4,271✔
765

766
    vector<std::string> tally_scores;
3,398✔
767
    for (auto filter_idx : tally->filters()) {
10,061✔
768
      auto& filter = model::tally_filters[filter_idx];
6,663✔
769
      if (filter->type() != FilterType::MESH)
6,663!
770
        continue;
6,653✔
771

772
      // check if the filter uses an unstructured mesh
773
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
736!
774
      auto mesh_idx = mesh_filter->mesh();
736!
775
      auto umesh =
776
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
736!
777

778
      if (!umesh)
736✔
779
        continue;
726✔
780

781
      if (!umesh->output_)
10!
782
        continue;
×
783

784
      if (umesh->library() == "moab") {
10!
UNCOV
785
        if (mpi::master)
×
UNCOV
786
          warning(fmt::format(
×
787
            "Output for a MOAB mesh (mesh {}) was "
788
            "requested but will not be written. Please use the Python "
789
            "API to generated the desired VTK tetrahedral mesh.",
UNCOV
790
            umesh->id_));
×
UNCOV
791
        continue;
×
792
      }
793

794
      // if this tally has more than one filter, print
795
      // warning and skip writing the mesh
796
      if (tally->filters().size() > 1) {
10!
797
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
798
                            "{}. More than one filter is present on the tally.",
799
          tally->id_));
×
800
        break;
×
801
      }
802

803
      int n_realizations = tally->n_realizations_;
10✔
804

805
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
20✔
806
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
20✔
807
          // combine the score and nuclide into a name for the value
808
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
20!
809
            tally->nuclide_name(nuc_idx));
20!
810
          // add this score to the mesh
811
          // (this is in a separate loop because all variables need to be added
812
          //  to libMesh's equation system before any are initialized, which
813
          //  happens in set_score_data)
814
          umesh->add_score(score_str);
10!
815
        }
10✔
816
      }
817

818
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
20✔
819
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
20✔
820
          // combine the score and nuclide into a name for the value
821
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
20!
822
            tally->nuclide_name(nuc_idx));
20!
823

824
          // index for this nuclide and score
825
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
10✔
826

827
          // construct result vectors
828
          vector<double> mean_vec(umesh->n_bins()),
10!
829
            std_dev_vec(umesh->n_bins());
10!
830
          for (int j = 0; j < tally->results_.shape()[0]; j++) {
97,866✔
831
            // get the volume for this bin
832
            double volume = umesh->volume(j);
97,856!
833
            // compute the mean
834
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
97,856!
835
                          n_realizations;
97,856✔
836
            mean_vec.at(j) = mean / volume;
97,856!
837

838
            // compute the standard deviation
839
            double sum_sq =
840
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
97,856!
841
            double std_dev {0.0};
97,856✔
842
            if (n_realizations > 1) {
97,856!
843
              std_dev = sum_sq / n_realizations - mean * mean;
97,856✔
844
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
97,856✔
845
            }
846
            std_dev_vec[j] = std_dev / volume;
97,856✔
847
          }
848
#ifdef OPENMC_MPI
849
          MPI_Bcast(
10!
850
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
10✔
851
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
10!
852
            mpi::intracomm);
853
#endif
854
          // set the data for this score
855
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
10!
856
        }
10✔
857
      }
858

859
      // Generate a file name based on the tally id
860
      // and the current batch number
861
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
10!
862
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
10✔
863
        simulation::current_batch, batch_width);
×
864

865
      // Write the unstructured mesh and data to file
866
      umesh->write(filename);
10!
867

868
      // remove score data added for this mesh write
869
      umesh->remove_scores();
10!
870
    }
10✔
871
  }
3,398✔
872
}
873✔
873

874
void write_tally_results_nr(hid_t file_id)
10✔
875
{
876
  // ==========================================================================
877
  // COLLECT AND WRITE GLOBAL TALLIES
878

879
  hid_t tallies_group;
880
  if (mpi::master) {
10✔
881
    // Write number of realizations
882
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
7✔
883

884
    tallies_group = open_group(file_id, "tallies");
7✔
885
  }
886

887
  // Get global tallies
888
  auto& gt = simulation::global_tallies;
10✔
889

890
#ifdef OPENMC_MPI
891
  // Reduce global tallies
892
  xt::xtensor<double, 2> gt_reduced = xt::empty_like(gt);
6✔
893
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
6✔
894
    mpi::intracomm);
895

896
  // Transfer values to value on master
897
  if (mpi::master) {
6✔
898
    if (simulation::current_batch == settings::n_max_batches ||
3!
899
        simulation::satisfy_triggers) {
900
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
3✔
901
    }
902
  }
903
#endif
904

905
  // Write out global tallies sum and sum_sq
906
  if (mpi::master) {
10✔
907
    write_dataset(file_id, "global_tallies", gt);
7✔
908
  }
909

910
  for (const auto& t : model::tallies) {
20✔
911
    // Skip any tallies that are not active
912
    if (!t->active_)
10!
913
      continue;
×
914
    if (!t->writable_)
10!
915
      continue;
×
916

917
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
10!
918
      write_attribute(file_id, "tallies_present", 1);
7✔
919
    }
920

921
    // Get view of accumulated tally values
922
    auto values_view = xt::view(t->results_, xt::all(), xt::all(),
10✔
923
      xt::range(static_cast<int>(TallyResult::SUM),
10✔
924
        static_cast<int>(TallyResult::SUM_SQ) + 1));
10✔
925

926
    // Make copy of tally values in contiguous array
927
    xt::xtensor<double, 3> values = values_view;
10✔
928

929
    if (mpi::master) {
10✔
930
      // Open group for tally
931
      std::string groupname {"tally " + std::to_string(t->id_)};
7✔
932
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
7✔
933

934
      // The MPI_IN_PLACE specifier allows the master to copy values into
935
      // a receive buffer without having a temporary variable
936
#ifdef OPENMC_MPI
937
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
3✔
938
        MPI_SUM, 0, mpi::intracomm);
939
#endif
940

941
      // At the end of the simulation, store the results back in the
942
      // regular TallyResults array
943
      if (simulation::current_batch == settings::n_max_batches ||
7!
944
          simulation::satisfy_triggers) {
945
        values_view = values;
7✔
946
      }
947

948
      // Put in temporary tally result
949
      xt::xtensor<double, 3> results_copy = xt::zeros_like(t->results_);
7✔
950
      auto copy_view = xt::view(results_copy, xt::all(), xt::all(),
7✔
951
        xt::range(static_cast<int>(TallyResult::SUM),
7✔
952
          static_cast<int>(TallyResult::SUM_SQ) + 1));
7✔
953
      copy_view = values;
7✔
954

955
      // Write reduced tally results to file
956
      auto shape = results_copy.shape();
7✔
957
      write_tally_results(
7✔
958
        tally_group, shape[0], shape[1], shape[2], results_copy.data());
7✔
959

960
      close_group(tally_group);
7✔
961
    } else {
7✔
962
      // Receive buffer not significant at other processors
963
#ifdef OPENMC_MPI
964
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
3✔
965
        mpi::intracomm);
966
#endif
967
    }
968
  }
10✔
969

970
  if (mpi::master) {
10✔
971
    if (!object_exists(file_id, "tallies_present")) {
7!
972
      // Indicate that tallies are off
973
      write_dataset(file_id, "tallies_present", 0);
7✔
974
    }
975

976
    close_group(tallies_group);
7✔
977
  }
978
}
10✔
979

980
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc