• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 18537555145

15 Oct 2025 05:37PM UTC coverage: 81.983% (-3.2%) from 85.194%
18537555145

Pull #3417

github

web-flow
Merge 3615a1fcc into e9077b137
Pull Request #3417: Addition of a collision tracking feature

16802 of 23354 branches covered (71.94%)

Branch coverage included in aggregate %.

480 of 522 new or added lines in 13 files covered. (91.95%)

483 existing lines in 53 files now uncovered.

54134 of 63171 relevant lines covered (85.69%)

43199115.04 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

75.48
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "xtensor/xbuilder.hpp" // for empty_like
8
#include "xtensor/xview.hpp"
9
#include <fmt/core.h>
10

11
#include "openmc/bank.h"
12
#include "openmc/bank_io.h"
13
#include "openmc/capi.h"
14
#include "openmc/constants.h"
15
#include "openmc/eigenvalue.h"
16
#include "openmc/error.h"
17
#include "openmc/file_utils.h"
18
#include "openmc/hdf5_interface.h"
19
#include "openmc/mcpl_interface.h"
20
#include "openmc/mesh.h"
21
#include "openmc/message_passing.h"
22
#include "openmc/mgxs_interface.h"
23
#include "openmc/nuclide.h"
24
#include "openmc/output.h"
25
#include "openmc/settings.h"
26
#include "openmc/simulation.h"
27
#include "openmc/tallies/derivative.h"
28
#include "openmc/tallies/filter.h"
29
#include "openmc/tallies/filter_mesh.h"
30
#include "openmc/tallies/tally.h"
31
#include "openmc/timer.h"
32
#include "openmc/vector.h"
33

34
namespace openmc {
35

36
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
8,065✔
37
{
38
  simulation::time_statepoint.start();
8,065✔
39

40
  // If a nullptr is passed in, we assume that the user
41
  // wants a default name for this, of the form like output/statepoint.20.h5
42
  std::string filename_;
8,065✔
43
  if (filename) {
8,065✔
44
    filename_ = filename;
1,216✔
45
  } else {
46
    // Determine width for zero padding
47
    int w = std::to_string(settings::n_max_batches).size();
6,849✔
48

49
    // Set filename for state point
50
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
12,445✔
51
      simulation::current_batch, w);
6,849✔
52
  }
53

54
  // If a file name was specified, ensure it has .h5 file extension
55
  const auto extension = get_file_extension(filename_);
8,065✔
56
  if (extension != "h5") {
8,065!
57
    warning("openmc_statepoint_write was passed a file extension differing "
×
58
            "from .h5, but an hdf5 file will be written.");
59
  }
60

61
  // Determine whether or not to write the source bank
62
  bool write_source_ = write_source ? *write_source : true;
8,065!
63

64
  // Write message
65
  write_message("Creating state point " + filename_ + "...", 5);
8,065✔
66

67
  hid_t file_id;
68
  if (mpi::master) {
8,065✔
69
    // Create statepoint file
70
    file_id = file_open(filename_, 'w');
6,975✔
71

72
    // Write file type
73
    write_attribute(file_id, "filetype", "statepoint");
6,975✔
74

75
    // Write revision number for state point file
76
    write_attribute(file_id, "version", VERSION_STATEPOINT);
6,975✔
77

78
    // Write OpenMC version
79
    write_attribute(file_id, "openmc_version", VERSION);
6,975✔
80
#ifdef GIT_SHA1
81
    write_attribute(file_id, "git_sha1", GIT_SHA1);
82
#endif
83

84
    // Write current date and time
85
    write_attribute(file_id, "date_and_time", time_stamp());
6,975✔
86

87
    // Write path to input
88
    write_attribute(file_id, "path", settings::path_input);
6,975✔
89

90
    // Write out random number seed
91
    write_dataset(file_id, "seed", openmc_get_seed());
6,975✔
92

93
    // Write out random number stride
94
    write_dataset(file_id, "stride", openmc_get_stride());
6,975✔
95

96
    // Write run information
97
    write_dataset(file_id, "energy_mode",
6,975✔
98
      settings::run_CE ? "continuous-energy" : "multi-group");
99
    switch (settings::run_mode) {
6,975!
100
    case RunMode::FIXED_SOURCE:
2,914✔
101
      write_dataset(file_id, "run_mode", "fixed source");
2,914✔
102
      break;
2,914✔
103
    case RunMode::EIGENVALUE:
4,061✔
104
      write_dataset(file_id, "run_mode", "eigenvalue");
4,061✔
105
      break;
4,061✔
106
    default:
×
107
      break;
×
108
    }
109
    write_attribute(file_id, "photon_transport", settings::photon_transport);
6,975✔
110
    write_dataset(file_id, "n_particles", settings::n_particles);
6,975✔
111
    write_dataset(file_id, "n_batches", settings::n_batches);
6,975✔
112

113
    // Write out current batch number
114
    write_dataset(file_id, "current_batch", simulation::current_batch);
6,975✔
115

116
    // Indicate whether source bank is stored in statepoint
117
    write_attribute(file_id, "source_present", write_source_);
6,975✔
118

119
    // Write out information for eigenvalue run
120
    if (settings::run_mode == RunMode::EIGENVALUE)
6,975✔
121
      write_eigenvalue_hdf5(file_id);
4,061✔
122

123
    hid_t tallies_group = create_group(file_id, "tallies");
6,975✔
124

125
    // Write meshes
126
    meshes_to_hdf5(tallies_group);
6,975✔
127

128
    // Write information for derivatives
129
    if (!model::tally_derivs.empty()) {
6,975✔
130
      hid_t derivs_group = create_group(tallies_group, "derivatives");
11✔
131
      for (const auto& deriv : model::tally_derivs) {
66✔
132
        hid_t deriv_group =
133
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
55✔
134
        write_dataset(deriv_group, "material", deriv.diff_material);
55✔
135
        if (deriv.variable == DerivativeVariable::DENSITY) {
55✔
136
          write_dataset(deriv_group, "independent variable", "density");
22✔
137
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
33✔
138
          write_dataset(deriv_group, "independent variable", "nuclide_density");
22✔
139
          write_dataset(
22✔
140
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
22✔
141
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
11!
142
          write_dataset(deriv_group, "independent variable", "temperature");
11✔
143
        } else {
144
          fatal_error("Independent variable for derivative " +
×
145
                      std::to_string(deriv.id) +
×
146
                      " not defined in state_point.cpp");
147
        }
148
        close_group(deriv_group);
55✔
149
      }
150
      close_group(derivs_group);
11✔
151
    }
152

153
    // Write information for filters
154
    hid_t filters_group = create_group(tallies_group, "filters");
6,975✔
155
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
6,975✔
156
    if (!model::tally_filters.empty()) {
6,975✔
157
      // Write filter IDs
158
      vector<int32_t> filter_ids;
4,652✔
159
      filter_ids.reserve(model::tally_filters.size());
4,652✔
160
      for (const auto& filt : model::tally_filters)
15,991✔
161
        filter_ids.push_back(filt->id());
11,339✔
162
      write_attribute(filters_group, "ids", filter_ids);
4,652✔
163

164
      // Write info for each filter
165
      for (const auto& filt : model::tally_filters) {
15,991✔
166
        hid_t filter_group =
167
          create_group(filters_group, "filter " + std::to_string(filt->id()));
11,339✔
168
        filt->to_statepoint(filter_group);
11,339✔
169
        close_group(filter_group);
11,339✔
170
      }
171
    }
4,652✔
172
    close_group(filters_group);
6,975✔
173

174
    // Write information for tallies
175
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
6,975✔
176
    if (!model::tallies.empty()) {
6,975✔
177
      // Write tally IDs
178
      vector<int32_t> tally_ids;
5,169✔
179
      tally_ids.reserve(model::tallies.size());
5,169✔
180
      for (const auto& tally : model::tallies)
27,313✔
181
        tally_ids.push_back(tally->id_);
22,144✔
182
      write_attribute(tallies_group, "ids", tally_ids);
5,169✔
183

184
      // Write all tally information except results
185
      for (const auto& tally : model::tallies) {
27,313✔
186
        hid_t tally_group =
187
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
22,144✔
188

189
        write_dataset(tally_group, "name", tally->name_);
22,144✔
190

191
        if (tally->writable_) {
22,144✔
192
          write_attribute(tally_group, "internal", 0);
20,320✔
193
        } else {
194
          write_attribute(tally_group, "internal", 1);
1,824✔
195
          close_group(tally_group);
1,824✔
196
          continue;
1,824✔
197
        }
198

199
        if (tally->multiply_density()) {
20,320✔
200
          write_attribute(tally_group, "multiply_density", 1);
20,287✔
201
        } else {
202
          write_attribute(tally_group, "multiply_density", 0);
33✔
203
        }
204

205
        if (tally->estimator_ == TallyEstimator::ANALOG) {
20,320✔
206
          write_dataset(tally_group, "estimator", "analog");
6,952✔
207
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
13,368✔
208
          write_dataset(tally_group, "estimator", "tracklength");
12,487✔
209
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
881!
210
          write_dataset(tally_group, "estimator", "collision");
881✔
211
        }
212

213
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
20,320✔
214

215
        // Write the ID of each filter attached to this tally
216
        write_dataset(tally_group, "n_filters", tally->filters().size());
20,320✔
217
        if (!tally->filters().empty()) {
20,320✔
218
          vector<int32_t> filter_ids;
18,658✔
219
          filter_ids.reserve(tally->filters().size());
18,658✔
220
          for (auto i_filt : tally->filters())
55,918✔
221
            filter_ids.push_back(model::tally_filters[i_filt]->id());
37,260✔
222
          write_dataset(tally_group, "filters", filter_ids);
18,658✔
223
        }
18,658✔
224

225
        // Write the nuclides this tally scores
226
        vector<std::string> nuclides;
20,320✔
227
        for (auto i_nuclide : tally->nuclides_) {
48,214✔
228
          if (i_nuclide == -1) {
27,894✔
229
            nuclides.push_back("total");
17,685✔
230
          } else {
231
            if (settings::run_CE) {
10,209✔
232
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
10,099✔
233
            } else {
234
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
110✔
235
            }
236
          }
237
        }
238
        write_dataset(tally_group, "nuclides", nuclides);
20,320✔
239

240
        if (tally->deriv_ != C_NONE)
20,320✔
241
          write_dataset(
220✔
242
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
220✔
243

244
        // Write the tally score bins
245
        vector<std::string> scores;
20,320✔
246
        for (auto sc : tally->scores_)
48,612✔
247
          scores.push_back(reaction_name(sc));
28,292✔
248
        write_dataset(tally_group, "n_score_bins", scores.size());
20,320✔
249
        write_dataset(tally_group, "score_bins", scores);
20,320✔
250

251
        close_group(tally_group);
20,320✔
252
      }
20,320✔
253
    }
5,169✔
254

255
    if (settings::reduce_tallies) {
6,975!
256
      // Write global tallies
257
      write_dataset(file_id, "global_tallies", simulation::global_tallies);
6,975✔
258

259
      // Write tallies
260
      if (model::active_tallies.size() > 0) {
6,975✔
261
        // Indicate that tallies are on
262
        write_attribute(file_id, "tallies_present", 1);
4,783✔
263

264
        // Write all tally results
265
        for (const auto& tally : model::tallies) {
26,541✔
266
          if (!tally->writable_)
21,758✔
267
            continue;
1,438✔
268
          // Write sum and sum_sq for each bin
269
          std::string name = "tally " + std::to_string(tally->id_);
20,320✔
270
          hid_t tally_group = open_group(tallies_group, name.c_str());
20,320✔
271
          auto& results = tally->results_;
20,320✔
272
          write_tally_results(tally_group, results.shape()[0],
20,320✔
273
            results.shape()[1], results.data());
20,320✔
274
          close_group(tally_group);
20,320✔
275
        }
20,320✔
276
      } else {
277
        // Indicate tallies are off
278
        write_attribute(file_id, "tallies_present", 0);
2,192✔
279
      }
280
    }
281

282
    close_group(tallies_group);
6,975✔
283
  }
284

285
  // Check for the no-tally-reduction method
286
  if (!settings::reduce_tallies) {
8,065!
287
    // If using the no-tally-reduction method, we need to collect tally
288
    // results before writing them to the state point file.
289
    write_tally_results_nr(file_id);
×
290

291
  } else if (mpi::master) {
8,065✔
292
    // Write number of global realizations
293
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
6,975✔
294
  }
295

296
  if (mpi::master) {
8,065✔
297
    // Write out the runtime metrics.
298
    using namespace simulation;
299
    hid_t runtime_group = create_group(file_id, "runtime");
6,975✔
300
    write_dataset(
6,975✔
301
      runtime_group, "total initialization", time_initialize.elapsed());
302
    write_dataset(
6,975✔
303
      runtime_group, "reading cross sections", time_read_xs.elapsed());
304
    write_dataset(runtime_group, "simulation",
6,975✔
305
      time_inactive.elapsed() + time_active.elapsed());
6,975✔
306
    write_dataset(runtime_group, "transport", time_transport.elapsed());
6,975✔
307
    if (settings::run_mode == RunMode::EIGENVALUE) {
6,975✔
308
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
4,061✔
309
    }
310
    write_dataset(runtime_group, "active batches", time_active.elapsed());
6,975✔
311
    if (settings::run_mode == RunMode::EIGENVALUE) {
6,975✔
312
      write_dataset(
4,061✔
313
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
314
      write_dataset(
4,061✔
315
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
316
      write_dataset(
4,061✔
317
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
318
    }
319
    write_dataset(
6,975✔
320
      runtime_group, "accumulating tallies", time_tallies.elapsed());
321
    write_dataset(runtime_group, "total", time_total.elapsed());
6,975✔
322
    write_dataset(
6,975✔
323
      runtime_group, "writing statepoints", time_statepoint.elapsed());
324
    close_group(runtime_group);
6,975✔
325

326
    file_close(file_id);
6,975✔
327
  }
328

329
#ifdef PHDF5
330
  bool parallel = true;
4,746✔
331
#else
332
  bool parallel = false;
3,319✔
333
#endif
334

335
  // Write the source bank if desired
336
  if (write_source_) {
8,065✔
337
    if (mpi::master || parallel)
3,858!
338
      file_id = file_open(filename_, 'a', true);
3,858✔
339
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
3,858✔
340
    if (mpi::master || parallel)
3,858!
341
      file_close(file_id);
3,858✔
342
  }
343

344
#if defined(OPENMC_LIBMESH_ENABLED) || defined(OPENMC_DAGMC_ENABLED)
345
  // write unstructured mesh tally files
346
  write_unstructured_mesh_results();
2,570✔
347
#endif
348

349
  simulation::time_statepoint.stop();
8,065✔
350

351
  return 0;
8,065✔
352
}
8,065✔
353

354
void restart_set_keff()
69✔
355
{
356
  if (simulation::restart_batch > settings::n_inactive) {
69!
357
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
327✔
358
      simulation::k_sum[0] += simulation::k_generation[i];
258✔
359
      simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
258✔
360
    }
361
    int n = settings::gen_per_batch * simulation::n_realizations;
69✔
362
    simulation::keff = simulation::k_sum[0] / n;
69✔
363
  } else {
364
    simulation::keff = simulation::k_generation.back();
×
365
  }
366
}
69✔
367

368
void load_state_point()
69✔
369
{
370
  write_message(
69✔
371
    fmt::format("Loading state point {}...", settings::path_statepoint_c), 5);
126✔
372
  openmc_statepoint_load(settings::path_statepoint.c_str());
69✔
373
}
69✔
374

375
void statepoint_version_check(hid_t file_id)
69✔
376
{
377
  // Read revision number for state point file and make sure it matches with
378
  // current version
379
  array<int, 2> version_array;
380
  read_attribute(file_id, "version", version_array);
69✔
381
  if (version_array != VERSION_STATEPOINT) {
69!
382
    fatal_error(
×
383
      "State point version does not match current version in OpenMC.");
384
  }
385
}
69✔
386

387
extern "C" int openmc_statepoint_load(const char* filename)
69✔
388
{
389
  // Open file for reading
390
  hid_t file_id = file_open(filename, 'r', true);
69✔
391

392
  // Read filetype
393
  std::string word;
69✔
394
  read_attribute(file_id, "filetype", word);
69✔
395
  if (word != "statepoint") {
69!
396
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
397
  }
398

399
  statepoint_version_check(file_id);
69✔
400

401
  // Read and overwrite random number seed
402
  int64_t seed;
403
  read_dataset(file_id, "seed", seed);
69✔
404
  openmc_set_seed(seed);
69✔
405

406
  // Read and overwrite random number stride
407
  uint64_t stride;
408
  read_dataset(file_id, "stride", stride);
69✔
409
  openmc_set_stride(stride);
69✔
410

411
  // It is not impossible for a state point to be generated from a CE run but
412
  // to be loaded in to an MG run (or vice versa), check to prevent that.
413
  read_dataset(file_id, "energy_mode", word);
69✔
414
  if (word == "multi-group" && settings::run_CE) {
69!
415
    fatal_error("State point file is from multigroup run but current run is "
×
416
                "continous energy.");
417
  } else if (word == "continuous-energy" && !settings::run_CE) {
69!
418
    fatal_error("State point file is from continuous-energy run but current "
×
419
                "run is multigroup!");
420
  }
421

422
  // Read and overwrite run information except number of batches
423
  read_dataset(file_id, "run_mode", word);
69✔
424
  if (word == "fixed source") {
69!
425
    settings::run_mode = RunMode::FIXED_SOURCE;
×
426
  } else if (word == "eigenvalue") {
69!
427
    settings::run_mode = RunMode::EIGENVALUE;
69✔
428
  }
429
  read_attribute(file_id, "photon_transport", settings::photon_transport);
69✔
430
  read_dataset(file_id, "n_particles", settings::n_particles);
69✔
431
  int temp;
432
  read_dataset(file_id, "n_batches", temp);
69✔
433

434
  // Take maximum of statepoint n_batches and input n_batches
435
  settings::n_batches = std::max(settings::n_batches, temp);
69✔
436

437
  // Read batch number to restart at
438
  read_dataset(file_id, "current_batch", simulation::restart_batch);
69✔
439

440
  if (settings::restart_run &&
69!
441
      simulation::restart_batch >= settings::n_max_batches) {
69✔
442
    warning(fmt::format(
11✔
443
      "The number of batches specified for simulation ({}) is smaller "
444
      "than or equal to the number of batches in the restart statepoint file "
445
      "({})",
446
      settings::n_max_batches, simulation::restart_batch));
447
  }
448

449
  // Logical flag for source present in statepoint file
450
  bool source_present;
451
  read_attribute(file_id, "source_present", source_present);
69✔
452

453
  // Read information specific to eigenvalue run
454
  if (settings::run_mode == RunMode::EIGENVALUE) {
69!
455
    read_dataset(file_id, "n_inactive", temp);
69✔
456
    read_eigenvalue_hdf5(file_id);
69✔
457

458
    // Take maximum of statepoint n_inactive and input n_inactive
459
    settings::n_inactive = std::max(settings::n_inactive, temp);
69✔
460

461
    // Check to make sure source bank is present
462
    if (settings::path_sourcepoint == settings::path_statepoint &&
138!
463
        !source_present) {
69!
464
      fatal_error("Source bank must be contained in statepoint restart file");
×
465
    }
466
  }
467

468
  // Read number of realizations for global tallies
469
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
69✔
470

471
  // Set k_sum, keff, and current_batch based on whether restart file is part
472
  // of active cycle or inactive cycle
473
  if (settings::run_mode == RunMode::EIGENVALUE) {
69!
474
    restart_set_keff();
69✔
475
  }
476

477
  // Set current batch number
478
  simulation::current_batch = simulation::restart_batch;
69✔
479

480
  // Read tallies to master. If we are using Parallel HDF5, all processes
481
  // need to be included in the HDF5 calls.
482
#ifdef PHDF5
483
  if (true) {
484
#else
485
  if (mpi::master) {
30!
486
#endif
487
    // Read global tally data
488
    read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
69✔
489
      false, simulation::global_tallies.data());
69✔
490

491
    // Check if tally results are present
492
    bool present;
493
    read_attribute(file_id, "tallies_present", present);
69✔
494

495
    // Read in sum and sum squared
496
    if (present) {
69!
497
      hid_t tallies_group = open_group(file_id, "tallies");
69✔
498

499
      for (auto& tally : model::tallies) {
243✔
500
        // Read sum, sum_sq, and N for each bin
501
        std::string name = "tally " + std::to_string(tally->id_);
174✔
502
        hid_t tally_group = open_group(tallies_group, name.c_str());
174✔
503

504
        int internal = 0;
174✔
505
        if (attribute_exists(tally_group, "internal")) {
174!
506
          read_attribute(tally_group, "internal", internal);
174✔
507
        }
508
        if (internal) {
174!
509
          tally->writable_ = false;
×
510
        } else {
511
          auto& results = tally->results_;
174✔
512
          read_tally_results(tally_group, results.shape()[0],
348✔
513
            results.shape()[1], results.data());
174✔
514
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
174✔
515
          close_group(tally_group);
174✔
516
        }
517
      }
174✔
518
      close_group(tallies_group);
69✔
519
    }
520
  }
521

522
  // Read source if in eigenvalue mode
523
  if (settings::run_mode == RunMode::EIGENVALUE) {
69!
524

525
    // Check if source was written out separately
526
    if (!source_present) {
69!
527

528
      // Close statepoint file
529
      file_close(file_id);
×
530

531
      // Write message
532
      write_message(
×
533
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
534

535
      // Open source file
536
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
537
    }
538

539
    // Read source
540
    read_source_bank(file_id, simulation::source_bank, true);
69✔
541
  }
542

543
  // Close file
544
  file_close(file_id);
69✔
545

546
  return 0;
69✔
547
}
69✔
548

549
hid_t h5banktype()
5,240✔
550
{
551
  // Create compound type for position
552
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
5,240✔
553
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
5,240✔
554
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
5,240✔
555
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
5,240✔
556

557
  // Create bank datatype
558
  //
559
  // If you make changes to the compound datatype here, make sure you update:
560
  // - openmc/source.py
561
  // - openmc/statepoint.py
562
  // - docs/source/io_formats/statepoint.rst
563
  // - docs/source/io_formats/source.rst
564
  hid_t banktype = H5Tcreate(H5T_COMPOUND, sizeof(struct SourceSite));
5,240✔
565
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
5,240✔
566
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
5,240✔
567
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
5,240✔
568
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
5,240✔
569
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
5,240✔
570
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
5,240✔
571
    H5T_NATIVE_INT);
5,240✔
572
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
5,240✔
573
  H5Tinsert(
5,240✔
574
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
5,240✔
575

576
  H5Tclose(postype);
5,240✔
577
  return banktype;
5,240✔
578
}
579

580
void write_source_point(std::string filename, span<SourceSite> source_bank,
1,286✔
581
  const vector<int64_t>& bank_index, bool use_mcpl)
582
{
583
  std::string ext = use_mcpl ? "mcpl" : "h5";
1,286✔
584
  write_message("Creating source file {}.{} with {} particles ...", filename,
1,286✔
585
    ext, source_bank.size(), 5);
1,286✔
586

587
  // Dispatch to appropriate function based on file type
588
  if (use_mcpl) {
1,286✔
589
    filename.append(".mcpl");
38✔
590
    write_mcpl_source_point(filename.c_str(), source_bank, bank_index);
38✔
591
  } else {
592
    filename.append(".h5");
1,248✔
593
    write_h5_source_point(filename.c_str(), source_bank, bank_index);
1,248✔
594
  }
595
}
1,286✔
596

597
void write_h5_source_point(const char* filename, span<SourceSite> source_bank,
1,248✔
598
  const vector<int64_t>& bank_index)
599
{
600
  // When using parallel HDF5, the file is written to collectively by all
601
  // processes. With MPI-only, the file is opened and written by the master
602
  // (note that the call to write_source_bank is by all processes since slave
603
  // processes need to send source bank data to the master.
604
#ifdef PHDF5
605
  bool parallel = true;
667✔
606
#else
607
  bool parallel = false;
581✔
608
#endif
609

610
  if (!filename)
1,248!
611
    fatal_error("write_source_point filename needs a nonempty name.");
×
612

613
  std::string filename_(filename);
1,248✔
614
  const auto extension = get_file_extension(filename_);
1,248✔
615
  if (extension != "h5") {
1,248!
616
    warning("write_source_point was passed a file extension differing "
×
617
            "from .h5, but an hdf5 file will be written.");
618
  }
619

620
  hid_t file_id;
621
  if (mpi::master || parallel) {
1,248!
622
    file_id = file_open(filename_.c_str(), 'w', true);
1,248✔
623
    write_attribute(file_id, "filetype", "source");
1,248✔
624
  }
625

626
  // Get pointer to source bank and write to file
627
  write_source_bank(file_id, source_bank, bank_index);
1,248✔
628

629
  if (mpi::master || parallel)
1,248!
630
    file_close(file_id);
1,248✔
631
}
1,248✔
632

633
void write_source_bank(hid_t group_id, span<SourceSite> source_bank,
5,106✔
634
  const vector<int64_t>& bank_index)
635
{
636
  hid_t banktype = h5banktype();
5,106✔
637

638
#ifdef OPENMC_MPI
639
  write_bank_dataset(
2,882✔
640
    "source_bank", group_id, source_bank, bank_index, banktype, mpi::source_site);
641
#else
642
  write_bank_dataset("source_bank", group_id, source_bank, bank_index, banktype);
2,224✔
643
#endif
644

645
  H5Tclose(banktype);
5,106✔
646
}
5,106✔
647

648
// Determine member names of a compound HDF5 datatype
649
std::string dtype_member_names(hid_t dtype_id)
268✔
650
{
651
  int nmembers = H5Tget_nmembers(dtype_id);
268✔
652
  std::string names;
268✔
653
  for (int i = 0; i < nmembers; i++) {
2,367✔
654
    char* name = H5Tget_member_name(dtype_id, i);
2,099✔
655
    names = names.append(name);
2,099✔
656
    H5free_memory(name);
2,099✔
657
    if (i < nmembers - 1)
2,099✔
658
      names += ", ";
1,831✔
659
  }
660
  return names;
268✔
UNCOV
661
}
×
662

663
void read_source_bank(
134✔
664
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
665
{
666
  hid_t banktype = h5banktype();
134✔
667

668
  // Open the dataset
669
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
134✔
670

671
  // Make sure number of members matches
672
  hid_t dtype = H5Dget_type(dset);
134✔
673
  auto file_member_names = dtype_member_names(dtype);
134✔
674
  auto bank_member_names = dtype_member_names(banktype);
134✔
675
  if (file_member_names != bank_member_names) {
134✔
676
    fatal_error(fmt::format(
9✔
677
      "Source site attributes in file do not match what is "
678
      "expected for this version of OpenMC. File attributes = ({}). Expected "
679
      "attributes = ({})",
680
      file_member_names, bank_member_names));
681
  }
682

683
  hid_t dspace = H5Dget_space(dset);
125✔
684
  hsize_t n_sites;
685
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
125✔
686

687
  // Make sure vector is big enough in case where we're reading entire source on
688
  // each process
689
  if (!distribute)
125✔
690
    sites.resize(n_sites);
56✔
691

692
  hid_t memspace;
693
  if (distribute) {
125✔
694
    if (simulation::work_index[mpi::n_procs] > n_sites) {
69!
UNCOV
695
      fatal_error("Number of source sites in source file is less "
×
696
                  "than number of source particles per generation.");
697
    }
698

699
    // Create another data space but for each proc individually
700
    hsize_t n_sites_local = simulation::work_per_rank;
69✔
701
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
69✔
702

703
    // Select hyperslab for each process
704
    hsize_t offset = simulation::work_index[mpi::rank];
69✔
705
    H5Sselect_hyperslab(
69✔
706
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
707
  } else {
708
    memspace = H5S_ALL;
56✔
709
  }
710

711
#ifdef PHDF5
712
  // Read data in parallel
713
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
71✔
714
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
71✔
715
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
71✔
716
  H5Pclose(plist);
71✔
717
#else
718
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
54✔
719
#endif
720

721
  // Close all ids
722
  H5Sclose(dspace);
125✔
723
  if (distribute)
125✔
724
    H5Sclose(memspace);
69✔
725
  H5Dclose(dset);
125✔
726
  H5Tclose(banktype);
125✔
727
}
125✔
728

729
void write_unstructured_mesh_results()
2,570✔
730
{
731

732
  for (auto& tally : model::tallies) {
11,766✔
733

734
    vector<std::string> tally_scores;
9,196✔
735
    for (auto filter_idx : tally->filters()) {
26,064✔
736
      auto& filter = model::tally_filters[filter_idx];
16,868✔
737
      if (filter->type() != FilterType::MESH)
16,868!
738
        continue;
16,853✔
739

740
      // check if the filter uses an unstructured mesh
741
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
1,918!
742
      auto mesh_idx = mesh_filter->mesh();
1,918!
743
      auto umesh =
744
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
1,918!
745

746
      if (!umesh)
1,918✔
747
        continue;
1,884✔
748

749
      if (!umesh->output_)
34!
UNCOV
750
        continue;
×
751

752
      if (umesh->library() == "moab") {
34!
753
        if (mpi::master)
19✔
754
          warning(fmt::format(
9!
755
            "Output for a MOAB mesh (mesh {}) was "
756
            "requested but will not be written. Please use the Python "
757
            "API to generated the desired VTK tetrahedral mesh.",
758
            umesh->id_));
9✔
759
        continue;
19✔
760
      }
761

762
      // if this tally has more than one filter, print
763
      // warning and skip writing the mesh
764
      if (tally->filters().size() > 1) {
15!
UNCOV
765
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
766
                            "{}. More than one filter is present on the tally.",
UNCOV
767
          tally->id_));
×
768
        break;
×
769
      }
770

771
      int n_realizations = tally->n_realizations_;
15✔
772

773
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
774
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
775
          // combine the score and nuclide into a name for the value
776
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30!
777
            tally->nuclide_name(nuc_idx));
30!
778
          // add this score to the mesh
779
          // (this is in a separate loop because all variables need to be added
780
          //  to libMesh's equation system before any are initialized, which
781
          //  happens in set_score_data)
782
          umesh->add_score(score_str);
15!
783
        }
15✔
784
      }
785

786
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
787
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
788
          // combine the score and nuclide into a name for the value
789
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30!
790
            tally->nuclide_name(nuc_idx));
30!
791

792
          // index for this nuclide and score
793
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
15✔
794

795
          // construct result vectors
796
          vector<double> mean_vec(umesh->n_bins()),
15!
797
            std_dev_vec(umesh->n_bins());
15!
798
          for (int j = 0; j < tally->results_.shape()[0]; j++) {
146,799✔
799
            // get the volume for this bin
800
            double volume = umesh->volume(j);
146,784!
801
            // compute the mean
802
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
146,784!
803
                          n_realizations;
146,784✔
804
            mean_vec.at(j) = mean / volume;
146,784!
805

806
            // compute the standard deviation
807
            double sum_sq =
808
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
146,784!
809
            double std_dev {0.0};
146,784✔
810
            if (n_realizations > 1) {
146,784!
811
              std_dev = sum_sq / n_realizations - mean * mean;
146,784✔
812
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
146,784✔
813
            }
814
            std_dev_vec[j] = std_dev / volume;
146,784✔
815
          }
816
#ifdef OPENMC_MPI
817
          MPI_Bcast(
10!
818
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
10✔
819
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
10!
820
            mpi::intracomm);
821
#endif
822
          // set the data for this score
823
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
15!
824
        }
15✔
825
      }
826

827
      // Generate a file name based on the tally id
828
      // and the current batch number
829
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
15!
830
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
15✔
UNCOV
831
        simulation::current_batch, batch_width);
×
832

833
      // Write the unstructured mesh and data to file
834
      umesh->write(filename);
15!
835

836
      // remove score data added for this mesh write
837
      umesh->remove_scores();
15!
838
    }
15✔
839
  }
9,196✔
840
}
2,570✔
841

UNCOV
842
void write_tally_results_nr(hid_t file_id)
×
843
{
844
  // ==========================================================================
845
  // COLLECT AND WRITE GLOBAL TALLIES
846

847
  hid_t tallies_group;
UNCOV
848
  if (mpi::master) {
×
849
    // Write number of realizations
UNCOV
850
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
×
851

UNCOV
852
    tallies_group = open_group(file_id, "tallies");
×
853
  }
854

855
  // Get global tallies
UNCOV
856
  auto& gt = simulation::global_tallies;
×
857

858
#ifdef OPENMC_MPI
859
  // Reduce global tallies
860
  xt::xtensor<double, 2> gt_reduced = xt::empty_like(gt);
×
861
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
×
862
    mpi::intracomm);
863

864
  // Transfer values to value on master
865
  if (mpi::master) {
×
866
    if (simulation::current_batch == settings::n_max_batches ||
×
867
        simulation::satisfy_triggers) {
868
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
×
869
    }
870
  }
871
#endif
872

873
  // Write out global tallies sum and sum_sq
UNCOV
874
  if (mpi::master) {
×
875
    write_dataset(file_id, "global_tallies", gt);
×
876
  }
877

UNCOV
878
  for (const auto& t : model::tallies) {
×
879
    // Skip any tallies that are not active
UNCOV
880
    if (!t->active_)
×
881
      continue;
×
882
    if (!t->writable_)
×
883
      continue;
×
884

UNCOV
885
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
×
886
      write_attribute(file_id, "tallies_present", 1);
×
887
    }
888

889
    // Get view of accumulated tally values
UNCOV
890
    auto values_view = xt::view(t->results_, xt::all(), xt::all(),
×
891
      xt::range(static_cast<int>(TallyResult::SUM),
×
892
        static_cast<int>(TallyResult::SUM_SQ) + 1));
×
893

894
    // Make copy of tally values in contiguous array
UNCOV
895
    xt::xtensor<double, 3> values = values_view;
×
896

UNCOV
897
    if (mpi::master) {
×
898
      // Open group for tally
UNCOV
899
      std::string groupname {"tally " + std::to_string(t->id_)};
×
900
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
×
901

902
      // The MPI_IN_PLACE specifier allows the master to copy values into
903
      // a receive buffer without having a temporary variable
904
#ifdef OPENMC_MPI
905
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
×
906
        MPI_SUM, 0, mpi::intracomm);
907
#endif
908

909
      // At the end of the simulation, store the results back in the
910
      // regular TallyResults array
UNCOV
911
      if (simulation::current_batch == settings::n_max_batches ||
×
912
          simulation::satisfy_triggers) {
UNCOV
913
        values_view = values;
×
914
      }
915

916
      // Put in temporary tally result
UNCOV
917
      xt::xtensor<double, 3> results_copy = xt::zeros_like(t->results_);
×
918
      auto copy_view = xt::view(results_copy, xt::all(), xt::all(),
×
919
        xt::range(static_cast<int>(TallyResult::SUM),
×
920
          static_cast<int>(TallyResult::SUM_SQ) + 1));
×
UNCOV
921
      copy_view = values;
×
922

923
      // Write reduced tally results to file
UNCOV
924
      auto shape = results_copy.shape();
×
925
      write_tally_results(tally_group, shape[0], shape[1], results_copy.data());
×
926

UNCOV
927
      close_group(tally_group);
×
928
    } else {
×
929
      // Receive buffer not significant at other processors
930
#ifdef OPENMC_MPI
931
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
×
932
        mpi::intracomm);
933
#endif
934
    }
UNCOV
935
  }
×
936

UNCOV
937
  if (mpi::master) {
×
938
    if (!object_exists(file_id, "tallies_present")) {
×
939
      // Indicate that tallies are off
UNCOV
940
      write_dataset(file_id, "tallies_present", 0);
×
941
    }
942

UNCOV
943
    close_group(tallies_group);
×
944
  }
UNCOV
945
}
×
946

947
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc