• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 13183837570

06 Feb 2025 04:53PM UTC coverage: 82.603% (-2.3%) from 84.867%
13183837570

Pull #3087

github

web-flow
Merge e28236a86 into 6e0f156d3
Pull Request #3087: wheel building with scikit build core

107125 of 129687 relevant lines covered (82.6%)

12608230.73 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.22
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "xtensor/xbuilder.hpp" // for empty_like
8
#include "xtensor/xview.hpp"
9
#include <fmt/core.h>
10

11
#include "openmc/bank.h"
12
#include "openmc/capi.h"
13
#include "openmc/constants.h"
14
#include "openmc/eigenvalue.h"
15
#include "openmc/error.h"
16
#include "openmc/file_utils.h"
17
#include "openmc/hdf5_interface.h"
18
#include "openmc/mcpl_interface.h"
19
#include "openmc/mesh.h"
20
#include "openmc/message_passing.h"
21
#include "openmc/mgxs_interface.h"
22
#include "openmc/nuclide.h"
23
#include "openmc/output.h"
24
#include "openmc/settings.h"
25
#include "openmc/simulation.h"
26
#include "openmc/tallies/derivative.h"
27
#include "openmc/tallies/filter.h"
28
#include "openmc/tallies/filter_mesh.h"
29
#include "openmc/tallies/tally.h"
30
#include "openmc/timer.h"
31
#include "openmc/vector.h"
32

33
namespace openmc {
34

35
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
5,289✔
36
{
37
  simulation::time_statepoint.start();
5,289✔
38

39
  // If a nullptr is passed in, we assume that the user
40
  // wants a default name for this, of the form like output/statepoint.20.h5
41
  std::string filename_;
5,289✔
42
  if (filename) {
5,289✔
43
    filename_ = filename;
×
44
  } else {
45
    // Determine width for zero padding
46
    int w = std::to_string(settings::n_max_batches).size();
5,289✔
47

48
    // Set filename for state point
49
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
9,655✔
50
      simulation::current_batch, w);
5,289✔
51
  }
52

53
  // If a file name was specified, ensure it has .h5 file extension
54
  const auto extension = get_file_extension(filename_);
5,289✔
55
  if (extension != "h5") {
5,289✔
56
    warning("openmc_statepoint_write was passed a file extension differing "
×
57
            "from .h5, but an hdf5 file will be written.");
58
  }
59

60
  // Determine whether or not to write the source bank
61
  bool write_source_ = write_source ? *write_source : true;
5,289✔
62

63
  // Write message
64
  write_message("Creating state point " + filename_ + "...", 5);
5,289✔
65

66
  hid_t file_id;
67
  if (mpi::master) {
5,289✔
68
    // Create statepoint file
69
    file_id = file_open(filename_, 'w');
4,339✔
70

71
    // Write file type
72
    write_attribute(file_id, "filetype", "statepoint");
4,339✔
73

74
    // Write revision number for state point file
75
    write_attribute(file_id, "version", VERSION_STATEPOINT);
4,339✔
76

77
    // Write OpenMC version
78
    write_attribute(file_id, "openmc_version", VERSION);
4,339✔
79
#ifdef GIT_SHA1
80
    write_attribute(file_id, "git_sha1", GIT_SHA1);
4,339✔
81
#endif
82

83
    // Write current date and time
84
    write_attribute(file_id, "date_and_time", time_stamp());
4,339✔
85

86
    // Write path to input
87
    write_attribute(file_id, "path", settings::path_input);
4,339✔
88

89
    // Write out random number seed
90
    write_dataset(file_id, "seed", openmc_get_seed());
4,339✔
91

92
    // Write run information
93
    write_dataset(file_id, "energy_mode",
4,339✔
94
      settings::run_CE ? "continuous-energy" : "multi-group");
95
    switch (settings::run_mode) {
4,339✔
96
    case RunMode::FIXED_SOURCE:
1,630✔
97
      write_dataset(file_id, "run_mode", "fixed source");
1,630✔
98
      break;
1,630✔
99
    case RunMode::EIGENVALUE:
2,709✔
100
      write_dataset(file_id, "run_mode", "eigenvalue");
2,709✔
101
      break;
2,709✔
102
    default:
×
103
      break;
×
104
    }
105
    write_attribute(file_id, "photon_transport", settings::photon_transport);
4,339✔
106
    write_dataset(file_id, "n_particles", settings::n_particles);
4,339✔
107
    write_dataset(file_id, "n_batches", settings::n_batches);
4,339✔
108

109
    // Write out current batch number
110
    write_dataset(file_id, "current_batch", simulation::current_batch);
4,339✔
111

112
    // Indicate whether source bank is stored in statepoint
113
    write_attribute(file_id, "source_present", write_source_);
4,339✔
114

115
    // Write out information for eigenvalue run
116
    if (settings::run_mode == RunMode::EIGENVALUE)
4,339✔
117
      write_eigenvalue_hdf5(file_id);
2,709✔
118

119
    hid_t tallies_group = create_group(file_id, "tallies");
4,339✔
120

121
    // Write meshes
122
    meshes_to_hdf5(tallies_group);
4,339✔
123

124
    // Write information for derivatives
125
    if (!model::tally_derivs.empty()) {
4,339✔
126
      hid_t derivs_group = create_group(tallies_group, "derivatives");
12✔
127
      for (const auto& deriv : model::tally_derivs) {
72✔
128
        hid_t deriv_group =
129
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
60✔
130
        write_dataset(deriv_group, "material", deriv.diff_material);
60✔
131
        if (deriv.variable == DerivativeVariable::DENSITY) {
60✔
132
          write_dataset(deriv_group, "independent variable", "density");
24✔
133
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
36✔
134
          write_dataset(deriv_group, "independent variable", "nuclide_density");
24✔
135
          write_dataset(
24✔
136
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
24✔
137
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
12✔
138
          write_dataset(deriv_group, "independent variable", "temperature");
12✔
139
        } else {
140
          fatal_error("Independent variable for derivative " +
×
141
                      std::to_string(deriv.id) +
×
142
                      " not defined in state_point.cpp");
143
        }
144
        close_group(deriv_group);
60✔
145
      }
146
      close_group(derivs_group);
12✔
147
    }
148

149
    // Write information for filters
150
    hid_t filters_group = create_group(tallies_group, "filters");
4,339✔
151
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
4,339✔
152
    if (!model::tally_filters.empty()) {
4,339✔
153
      // Write filter IDs
154
      vector<int32_t> filter_ids;
2,161✔
155
      filter_ids.reserve(model::tally_filters.size());
2,161✔
156
      for (const auto& filt : model::tally_filters)
7,462✔
157
        filter_ids.push_back(filt->id());
5,301✔
158
      write_attribute(filters_group, "ids", filter_ids);
2,161✔
159

160
      // Write info for each filter
161
      for (const auto& filt : model::tally_filters) {
7,462✔
162
        hid_t filter_group =
163
          create_group(filters_group, "filter " + std::to_string(filt->id()));
5,301✔
164
        filt->to_statepoint(filter_group);
5,301✔
165
        close_group(filter_group);
5,301✔
166
      }
167
    }
2,161✔
168
    close_group(filters_group);
4,339✔
169

170
    // Write information for tallies
171
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
4,339✔
172
    if (!model::tallies.empty()) {
4,339✔
173
      // Write tally IDs
174
      vector<int32_t> tally_ids;
2,665✔
175
      tally_ids.reserve(model::tallies.size());
2,665✔
176
      for (const auto& tally : model::tallies)
20,468✔
177
        tally_ids.push_back(tally->id_);
17,803✔
178
      write_attribute(tallies_group, "ids", tally_ids);
2,665✔
179

180
      // Write all tally information except results
181
      for (const auto& tally : model::tallies) {
20,468✔
182
        hid_t tally_group =
183
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
17,803✔
184

185
        write_dataset(tally_group, "name", tally->name_);
17,803✔
186

187
        if (tally->writable_) {
17,803✔
188
          write_attribute(tally_group, "internal", 0);
17,803✔
189
        } else {
190
          write_attribute(tally_group, "internal", 1);
×
191
          close_group(tally_group);
×
192
          continue;
×
193
        }
194

195
        if (tally->multiply_density()) {
17,803✔
196
          write_attribute(tally_group, "multiply_density", 1);
17,767✔
197
        } else {
198
          write_attribute(tally_group, "multiply_density", 0);
36✔
199
        }
200

201
        if (tally->estimator_ == TallyEstimator::ANALOG) {
17,803✔
202
          write_dataset(tally_group, "estimator", "analog");
6,313✔
203
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
11,490✔
204
          write_dataset(tally_group, "estimator", "tracklength");
10,970✔
205
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
520✔
206
          write_dataset(tally_group, "estimator", "collision");
520✔
207
        }
208

209
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
17,803✔
210

211
        // Write the ID of each filter attached to this tally
212
        write_dataset(tally_group, "n_filters", tally->filters().size());
17,803✔
213
        if (!tally->filters().empty()) {
17,803✔
214
          vector<int32_t> filter_ids;
17,095✔
215
          filter_ids.reserve(tally->filters().size());
17,095✔
216
          for (auto i_filt : tally->filters())
52,540✔
217
            filter_ids.push_back(model::tally_filters[i_filt]->id());
35,445✔
218
          write_dataset(tally_group, "filters", filter_ids);
17,095✔
219
        }
17,095✔
220

221
        // Write the nuclides this tally scores
222
        vector<std::string> nuclides;
17,803✔
223
        for (auto i_nuclide : tally->nuclides_) {
43,874✔
224
          if (i_nuclide == -1) {
26,071✔
225
            nuclides.push_back("total");
15,139✔
226
          } else {
227
            if (settings::run_CE) {
10,932✔
228
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
10,812✔
229
            } else {
230
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
120✔
231
            }
232
          }
233
        }
234
        write_dataset(tally_group, "nuclides", nuclides);
17,803✔
235

236
        if (tally->deriv_ != C_NONE)
17,803✔
237
          write_dataset(
240✔
238
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
240✔
239

240
        // Write the tally score bins
241
        vector<std::string> scores;
17,803✔
242
        for (auto sc : tally->scores_)
42,626✔
243
          scores.push_back(reaction_name(sc));
24,823✔
244
        write_dataset(tally_group, "n_score_bins", scores.size());
17,803✔
245
        write_dataset(tally_group, "score_bins", scores);
17,803✔
246

247
        close_group(tally_group);
17,803✔
248
      }
17,803✔
249
    }
2,665✔
250

251
    if (settings::reduce_tallies) {
4,339✔
252
      // Write global tallies
253
      write_dataset(file_id, "global_tallies", simulation::global_tallies);
4,339✔
254

255
      // Write tallies
256
      if (model::active_tallies.size() > 0) {
4,339✔
257
        // Indicate that tallies are on
258
        write_attribute(file_id, "tallies_present", 1);
2,665✔
259

260
        // Write all tally results
261
        for (const auto& tally : model::tallies) {
20,468✔
262
          if (!tally->writable_)
17,803✔
263
            continue;
×
264
          // Write sum and sum_sq for each bin
265
          std::string name = "tally " + std::to_string(tally->id_);
17,803✔
266
          hid_t tally_group = open_group(tallies_group, name.c_str());
17,803✔
267
          auto& results = tally->results_;
17,803✔
268
          write_tally_results(tally_group, results.shape()[0],
17,803✔
269
            results.shape()[1], results.data());
17,803✔
270
          close_group(tally_group);
17,803✔
271
        }
17,803✔
272
      } else {
273
        // Indicate tallies are off
274
        write_attribute(file_id, "tallies_present", 0);
1,674✔
275
      }
276
    }
277

278
    close_group(tallies_group);
4,339✔
279
  }
280

281
  // Check for the no-tally-reduction method
282
  if (!settings::reduce_tallies) {
5,289✔
283
    // If using the no-tally-reduction method, we need to collect tally
284
    // results before writing them to the state point file.
285
    write_tally_results_nr(file_id);
×
286

287
  } else if (mpi::master) {
5,289✔
288
    // Write number of global realizations
289
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
4,339✔
290
  }
291

292
  if (mpi::master) {
5,289✔
293
    // Write out the runtime metrics.
294
    using namespace simulation;
295
    hid_t runtime_group = create_group(file_id, "runtime");
4,339✔
296
    write_dataset(
4,339✔
297
      runtime_group, "total initialization", time_initialize.elapsed());
298
    write_dataset(
4,339✔
299
      runtime_group, "reading cross sections", time_read_xs.elapsed());
300
    write_dataset(runtime_group, "simulation",
4,339✔
301
      time_inactive.elapsed() + time_active.elapsed());
4,339✔
302
    write_dataset(runtime_group, "transport", time_transport.elapsed());
4,339✔
303
    if (settings::run_mode == RunMode::EIGENVALUE) {
4,339✔
304
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
2,709✔
305
    }
306
    write_dataset(runtime_group, "active batches", time_active.elapsed());
4,339✔
307
    if (settings::run_mode == RunMode::EIGENVALUE) {
4,339✔
308
      write_dataset(
2,709✔
309
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
310
      write_dataset(
2,709✔
311
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
312
      write_dataset(
2,709✔
313
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
314
    }
315
    write_dataset(
4,339✔
316
      runtime_group, "accumulating tallies", time_tallies.elapsed());
317
    write_dataset(runtime_group, "total", time_total.elapsed());
4,339✔
318
    write_dataset(
4,339✔
319
      runtime_group, "writing statepoints", time_statepoint.elapsed());
320
    close_group(runtime_group);
4,339✔
321

322
    file_close(file_id);
4,339✔
323
  }
324

325
#ifdef PHDF5
326
  bool parallel = true;
2,781✔
327
#else
328
  bool parallel = false;
2,508✔
329
#endif
330

331
  // Write the source bank if desired
332
  if (write_source_) {
5,289✔
333
    if (mpi::master || parallel)
3,105✔
334
      file_id = file_open(filename_, 'a', true);
3,105✔
335
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
3,105✔
336
    if (mpi::master || parallel)
3,105✔
337
      file_close(file_id);
3,105✔
338
  }
339

340
#if defined(LIBMESH) || defined(DAGMC)
341
  // write unstructured mesh tally files
342
  write_unstructured_mesh_results();
1,509✔
343
#endif
344

345
  simulation::time_statepoint.stop();
5,289✔
346

347
  return 0;
5,289✔
348
}
5,289✔
349

350
void restart_set_keff()
46✔
351
{
352
  if (simulation::restart_batch > settings::n_inactive) {
46✔
353
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
225✔
354
      simulation::k_sum[0] += simulation::k_generation[i];
179✔
355
      simulation::k_sum[1] += std::pow(simulation::k_generation[i], 2);
179✔
356
    }
357
    int n = settings::gen_per_batch * simulation::n_realizations;
46✔
358
    simulation::keff = simulation::k_sum[0] / n;
46✔
359
  } else {
360
    simulation::keff = simulation::k_generation.back();
×
361
  }
362
}
46✔
363

364
void load_state_point()
46✔
365
{
366
  write_message(
46✔
367
    fmt::format("Loading state point {}...", settings::path_statepoint_c), 5);
84✔
368
  openmc_statepoint_load(settings::path_statepoint.c_str());
46✔
369
}
46✔
370

371
void statepoint_version_check(hid_t file_id)
46✔
372
{
373
  // Read revision number for state point file and make sure it matches with
374
  // current version
375
  array<int, 2> version_array;
376
  read_attribute(file_id, "version", version_array);
46✔
377
  if (version_array != VERSION_STATEPOINT) {
46✔
378
    fatal_error(
×
379
      "State point version does not match current version in OpenMC.");
380
  }
381
}
46✔
382

383
extern "C" int openmc_statepoint_load(const char* filename)
46✔
384
{
385
  // Open file for reading
386
  hid_t file_id = file_open(filename, 'r', true);
46✔
387

388
  // Read filetype
389
  std::string word;
46✔
390
  read_attribute(file_id, "filetype", word);
46✔
391
  if (word != "statepoint") {
46✔
392
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
393
  }
394

395
  statepoint_version_check(file_id);
46✔
396

397
  // Read and overwrite random number seed
398
  int64_t seed;
399
  read_dataset(file_id, "seed", seed);
46✔
400
  openmc_set_seed(seed);
46✔
401

402
  // It is not impossible for a state point to be generated from a CE run but
403
  // to be loaded in to an MG run (or vice versa), check to prevent that.
404
  read_dataset(file_id, "energy_mode", word);
46✔
405
  if (word == "multi-group" && settings::run_CE) {
46✔
406
    fatal_error("State point file is from multigroup run but current run is "
×
407
                "continous energy.");
408
  } else if (word == "continuous-energy" && !settings::run_CE) {
46✔
409
    fatal_error("State point file is from continuous-energy run but current "
×
410
                "run is multigroup!");
411
  }
412

413
  // Read and overwrite run information except number of batches
414
  read_dataset(file_id, "run_mode", word);
46✔
415
  if (word == "fixed source") {
46✔
416
    settings::run_mode = RunMode::FIXED_SOURCE;
×
417
  } else if (word == "eigenvalue") {
46✔
418
    settings::run_mode = RunMode::EIGENVALUE;
46✔
419
  }
420
  read_attribute(file_id, "photon_transport", settings::photon_transport);
46✔
421
  read_dataset(file_id, "n_particles", settings::n_particles);
46✔
422
  int temp;
423
  read_dataset(file_id, "n_batches", temp);
46✔
424

425
  // Take maximum of statepoint n_batches and input n_batches
426
  settings::n_batches = std::max(settings::n_batches, temp);
46✔
427

428
  // Read batch number to restart at
429
  read_dataset(file_id, "current_batch", simulation::restart_batch);
46✔
430

431
  if (simulation::restart_batch >= settings::n_max_batches) {
46✔
432
    warning(fmt::format(
12✔
433
      "The number of batches specified for simulation ({}) is smaller "
434
      "than or equal to the number of batches in the restart statepoint file "
435
      "({})",
436
      settings::n_max_batches, simulation::restart_batch));
437
  }
438

439
  // Logical flag for source present in statepoint file
440
  bool source_present;
441
  read_attribute(file_id, "source_present", source_present);
46✔
442

443
  // Read information specific to eigenvalue run
444
  if (settings::run_mode == RunMode::EIGENVALUE) {
46✔
445
    read_dataset(file_id, "n_inactive", temp);
46✔
446
    read_eigenvalue_hdf5(file_id);
46✔
447

448
    // Take maximum of statepoint n_inactive and input n_inactive
449
    settings::n_inactive = std::max(settings::n_inactive, temp);
46✔
450

451
    // Check to make sure source bank is present
452
    if (settings::path_sourcepoint == settings::path_statepoint &&
92✔
453
        !source_present) {
46✔
454
      fatal_error("Source bank must be contained in statepoint restart file");
×
455
    }
456
  }
457

458
  // Read number of realizations for global tallies
459
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
46✔
460

461
  // Set k_sum, keff, and current_batch based on whether restart file is part
462
  // of active cycle or inactive cycle
463
  if (settings::run_mode == RunMode::EIGENVALUE) {
46✔
464
    restart_set_keff();
46✔
465
  }
466

467
  // Set current batch number
468
  simulation::current_batch = simulation::restart_batch;
46✔
469

470
  // Read tallies to master. If we are using Parallel HDF5, all processes
471
  // need to be included in the HDF5 calls.
472
#ifdef PHDF5
473
  if (true) {
474
#else
475
  if (mpi::master) {
21✔
476
#endif
477
    // Read global tally data
478
    read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
46✔
479
      false, simulation::global_tallies.data());
46✔
480

481
    // Check if tally results are present
482
    bool present;
483
    read_attribute(file_id, "tallies_present", present);
46✔
484

485
    // Read in sum and sum squared
486
    if (present) {
46✔
487
      hid_t tallies_group = open_group(file_id, "tallies");
46✔
488

489
      for (auto& tally : model::tallies) {
121✔
490
        // Read sum, sum_sq, and N for each bin
491
        std::string name = "tally " + std::to_string(tally->id_);
75✔
492
        hid_t tally_group = open_group(tallies_group, name.c_str());
75✔
493

494
        int internal = 0;
75✔
495
        if (attribute_exists(tally_group, "internal")) {
75✔
496
          read_attribute(tally_group, "internal", internal);
75✔
497
        }
498
        if (internal) {
75✔
499
          tally->writable_ = false;
×
500
        } else {
501
          auto& results = tally->results_;
75✔
502
          read_tally_results(tally_group, results.shape()[0],
150✔
503
            results.shape()[1], results.data());
75✔
504
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
75✔
505
          close_group(tally_group);
75✔
506
        }
507
      }
75✔
508
      close_group(tallies_group);
46✔
509
    }
510
  }
511

512
  // Read source if in eigenvalue mode
513
  if (settings::run_mode == RunMode::EIGENVALUE) {
46✔
514

515
    // Check if source was written out separately
516
    if (!source_present) {
46✔
517

518
      // Close statepoint file
519
      file_close(file_id);
×
520

521
      // Write message
522
      write_message(
×
523
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
524

525
      // Open source file
526
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
527
    }
528

529
    // Read source
530
    read_source_bank(file_id, simulation::source_bank, true);
46✔
531
  }
532

533
  // Close file
534
  file_close(file_id);
46✔
535

536
  return 0;
46✔
537
}
46✔
538

539
hid_t h5banktype()
3,841✔
540
{
541
  // Create compound type for position
542
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
3,841✔
543
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
3,841✔
544
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
3,841✔
545
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
3,841✔
546

547
  // Create bank datatype
548
  //
549
  // If you make changes to the compound datatype here, make sure you update:
550
  // - openmc/source.py
551
  // - openmc/statepoint.py
552
  // - docs/source/io_formats/statepoint.rst
553
  // - docs/source/io_formats/source.rst
554
  hid_t banktype = H5Tcreate(H5T_COMPOUND, sizeof(struct SourceSite));
3,841✔
555
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
3,841✔
556
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
3,841✔
557
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
3,841✔
558
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
3,841✔
559
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
3,841✔
560
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
3,841✔
561
    H5T_NATIVE_INT);
3,841✔
562
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
3,841✔
563
  H5Tinsert(
3,841✔
564
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
3,841✔
565

566
  H5Tclose(postype);
3,841✔
567
  return banktype;
3,841✔
568
}
569

570
void write_source_point(std::string filename, gsl::span<SourceSite> source_bank,
651✔
571
  const vector<int64_t>& bank_index, bool use_mcpl)
572
{
573
  std::string ext = use_mcpl ? "mcpl" : "h5";
651✔
574
  write_message("Creating source file {}.{} with {} particles ...", filename,
651✔
575
    ext, source_bank.size(), 5);
651✔
576

577
  // Dispatch to appropriate function based on file type
578
  if (use_mcpl) {
651✔
579
    filename.append(".mcpl");
17✔
580
    write_mcpl_source_point(filename.c_str(), source_bank, bank_index);
17✔
581
  } else {
582
    filename.append(".h5");
634✔
583
    write_h5_source_point(filename.c_str(), source_bank, bank_index);
634✔
584
  }
585
}
651✔
586

587
void write_h5_source_point(const char* filename,
634✔
588
  gsl::span<SourceSite> source_bank, const vector<int64_t>& bank_index)
589
{
590
  // When using parallel HDF5, the file is written to collectively by all
591
  // processes. With MPI-only, the file is opened and written by the master
592
  // (note that the call to write_source_bank is by all processes since slave
593
  // processes need to send source bank data to the master.
594
#ifdef PHDF5
595
  bool parallel = true;
317✔
596
#else
597
  bool parallel = false;
317✔
598
#endif
599

600
  if (!filename)
634✔
601
    fatal_error("write_source_point filename needs a nonempty name.");
×
602

603
  std::string filename_(filename);
634✔
604
  const auto extension = get_file_extension(filename_);
634✔
605
  if (extension != "h5") {
634✔
606
    warning("write_source_point was passed a file extension differing "
×
607
            "from .h5, but an hdf5 file will be written.");
608
  }
609

610
  hid_t file_id;
611
  if (mpi::master || parallel) {
634✔
612
    file_id = file_open(filename_.c_str(), 'w', true);
634✔
613
    write_attribute(file_id, "filetype", "source");
634✔
614
  }
615

616
  // Get pointer to source bank and write to file
617
  write_source_bank(file_id, source_bank, bank_index);
634✔
618

619
  if (mpi::master || parallel)
634✔
620
    file_close(file_id);
634✔
621
}
634✔
622

623
void write_source_bank(hid_t group_id, gsl::span<SourceSite> source_bank,
3,739✔
624
  const vector<int64_t>& bank_index)
625
{
626
  hid_t banktype = h5banktype();
3,739✔
627

628
  // Set total and individual process dataspace sizes for source bank
629
  int64_t dims_size = bank_index.back();
3,739✔
630
  int64_t count_size = bank_index[mpi::rank + 1] - bank_index[mpi::rank];
3,739✔
631

632
#ifdef PHDF5
633
  // Set size of total dataspace for all procs and rank
634
  hsize_t dims[] {static_cast<hsize_t>(dims_size)};
1,999✔
635
  hid_t dspace = H5Screate_simple(1, dims, nullptr);
1,999✔
636
  hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace, H5P_DEFAULT,
1,999✔
637
    H5P_DEFAULT, H5P_DEFAULT);
638

639
  // Create another data space but for each proc individually
640
  hsize_t count[] {static_cast<hsize_t>(count_size)};
1,999✔
641
  hid_t memspace = H5Screate_simple(1, count, nullptr);
1,999✔
642

643
  // Select hyperslab for this dataspace
644
  hsize_t start[] {static_cast<hsize_t>(bank_index[mpi::rank])};
1,999✔
645
  H5Sselect_hyperslab(dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
1,999✔
646

647
  // Set up the property list for parallel writing
648
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
1,999✔
649
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
1,999✔
650

651
  // Write data to file in parallel
652
  H5Dwrite(dset, banktype, memspace, dspace, plist, source_bank.data());
1,999✔
653

654
  // Free resources
655
  H5Sclose(dspace);
1,999✔
656
  H5Sclose(memspace);
1,999✔
657
  H5Dclose(dset);
1,999✔
658
  H5Pclose(plist);
1,999✔
659

660
#else
661

662
  if (mpi::master) {
1,740✔
663
    // Create dataset big enough to hold all source sites
664
    hsize_t dims[] {static_cast<hsize_t>(dims_size)};
1,740✔
665
    hid_t dspace = H5Screate_simple(1, dims, nullptr);
1,740✔
666
    hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace,
1,740✔
667
      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
668

669
    // Save source bank sites since the array is overwritten below
670
#ifdef OPENMC_MPI
671
    vector<SourceSite> temp_source {source_bank.begin(), source_bank.end()};
672
#endif
673

674
    for (int i = 0; i < mpi::n_procs; ++i) {
3,480✔
675
      // Create memory space
676
      hsize_t count[] {static_cast<hsize_t>(bank_index[i + 1] - bank_index[i])};
1,740✔
677
      hid_t memspace = H5Screate_simple(1, count, nullptr);
1,740✔
678

679
#ifdef OPENMC_MPI
680
      // Receive source sites from other processes
681
      if (i > 0)
682
        MPI_Recv(source_bank.data(), count[0], mpi::source_site, i, i,
683
          mpi::intracomm, MPI_STATUS_IGNORE);
684
#endif
685

686
      // Select hyperslab for this dataspace
687
      dspace = H5Dget_space(dset);
1,740✔
688
      hsize_t start[] {static_cast<hsize_t>(bank_index[i])};
1,740✔
689
      H5Sselect_hyperslab(
1,740✔
690
        dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
691

692
      // Write data to hyperslab
693
      H5Dwrite(
1,740✔
694
        dset, banktype, memspace, dspace, H5P_DEFAULT, source_bank.data());
1,740✔
695

696
      H5Sclose(memspace);
1,740✔
697
      H5Sclose(dspace);
1,740✔
698
    }
699

700
    // Close all ids
701
    H5Dclose(dset);
1,740✔
702

703
#ifdef OPENMC_MPI
704
    // Restore state of source bank
705
    std::copy(temp_source.begin(), temp_source.end(), source_bank.begin());
706
#endif
707
  } else {
708
#ifdef OPENMC_MPI
709
    MPI_Send(source_bank.data(), count_size, mpi::source_site, 0, mpi::rank,
710
      mpi::intracomm);
711
#endif
712
  }
713
#endif
714

715
  H5Tclose(banktype);
3,739✔
716
}
3,739✔
717

718
// Determine member names of a compound HDF5 datatype
719
std::string dtype_member_names(hid_t dtype_id)
204✔
720
{
721
  int nmembers = H5Tget_nmembers(dtype_id);
204✔
722
  std::string names;
204✔
723
  for (int i = 0; i < nmembers; i++) {
1,786✔
724
    char* name = H5Tget_member_name(dtype_id, i);
1,582✔
725
    names = names.append(name);
1,582✔
726
    H5free_memory(name);
1,582✔
727
    if (i < nmembers - 1)
1,582✔
728
      names += ", ";
1,378✔
729
  }
730
  return names;
204✔
731
}
×
732

733
void read_source_bank(
102✔
734
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
735
{
736
  hid_t banktype = h5banktype();
102✔
737

738
  // Open the dataset
739
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
102✔
740

741
  // Make sure number of members matches
742
  hid_t dtype = H5Dget_type(dset);
102✔
743
  auto file_member_names = dtype_member_names(dtype);
102✔
744
  auto bank_member_names = dtype_member_names(banktype);
102✔
745
  if (file_member_names != bank_member_names) {
102✔
746
    fatal_error(fmt::format(
10✔
747
      "Source site attributes in file do not match what is "
748
      "expected for this version of OpenMC. File attributes = ({}). Expected "
749
      "attributes = ({})",
750
      file_member_names, bank_member_names));
751
  }
752

753
  hid_t dspace = H5Dget_space(dset);
92✔
754
  hsize_t n_sites;
755
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
92✔
756

757
  // Make sure vector is big enough in case where we're reading entire source on
758
  // each process
759
  if (!distribute)
92✔
760
    sites.resize(n_sites);
46✔
761

762
  hid_t memspace;
763
  if (distribute) {
92✔
764
    if (simulation::work_index[mpi::n_procs] > n_sites) {
46✔
765
      fatal_error("Number of source sites in source file is less "
×
766
                  "than number of source particles per generation.");
767
    }
768

769
    // Create another data space but for each proc individually
770
    hsize_t n_sites_local = simulation::work_per_rank;
46✔
771
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
46✔
772

773
    // Select hyperslab for each process
774
    hsize_t offset = simulation::work_index[mpi::rank];
46✔
775
    H5Sselect_hyperslab(
46✔
776
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
777
  } else {
778
    memspace = H5S_ALL;
46✔
779
  }
780

781
#ifdef PHDF5
782
  // Read data in parallel
783
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
50✔
784
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
50✔
785
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
50✔
786
  H5Pclose(plist);
50✔
787
#else
788
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
42✔
789
#endif
790

791
  // Close all ids
792
  H5Sclose(dspace);
92✔
793
  if (distribute)
92✔
794
    H5Sclose(memspace);
46✔
795
  H5Dclose(dset);
92✔
796
  H5Tclose(banktype);
92✔
797
}
92✔
798

799
void write_unstructured_mesh_results()
1,509✔
800
{
801

802
  for (auto& tally : model::tallies) {
8,677✔
803

804
    vector<std::string> tally_scores;
7,168✔
805
    for (auto filter_idx : tally->filters()) {
21,690✔
806
      auto& filter = model::tally_filters[filter_idx];
14,522✔
807
      if (filter->type() != FilterType::MESH)
14,522✔
808
        continue;
14,507✔
809

810
      // check if the filter uses an unstructured mesh
811
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
1,609✔
812
      auto mesh_idx = mesh_filter->mesh();
1,609✔
813
      auto umesh =
814
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
1,609✔
815

816
      if (!umesh)
1,609✔
817
        continue;
1,576✔
818

819
      if (!umesh->output_)
33✔
820
        continue;
×
821

822
      if (umesh->library() == "moab") {
33✔
823
        if (mpi::master)
18✔
824
          warning(fmt::format(
8✔
825
            "Output for a MOAB mesh (mesh {}) was "
826
            "requested but will not be written. Please use the Python "
827
            "API to generated the desired VTK tetrahedral mesh.",
828
            umesh->id_));
8✔
829
        continue;
18✔
830
      }
831

832
      // if this tally has more than one filter, print
833
      // warning and skip writing the mesh
834
      if (tally->filters().size() > 1) {
15✔
835
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
836
                            "{}. More than one filter is present on the tally.",
837
          tally->id_));
×
838
        break;
×
839
      }
840

841
      int n_realizations = tally->n_realizations_;
15✔
842

843
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
844
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
845
          // combine the score and nuclide into a name for the value
846
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30✔
847
            tally->nuclide_name(nuc_idx));
30✔
848
          // add this score to the mesh
849
          // (this is in a separate loop because all variables need to be added
850
          //  to libMesh's equation system before any are initialized, which
851
          //  happens in set_score_data)
852
          umesh->add_score(score_str);
15✔
853
        }
15✔
854
      }
855

856
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
857
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
858
          // combine the score and nuclide into a name for the value
859
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30✔
860
            tally->nuclide_name(nuc_idx));
30✔
861

862
          // index for this nuclide and score
863
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
15✔
864

865
          // construct result vectors
866
          vector<double> mean_vec(umesh->n_bins()),
15✔
867
            std_dev_vec(umesh->n_bins());
15✔
868
          for (int j = 0; j < tally->results_.shape()[0]; j++) {
146,799✔
869
            // get the volume for this bin
870
            double volume = umesh->volume(j);
146,784✔
871
            // compute the mean
872
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
146,784✔
873
                          n_realizations;
146,784✔
874
            mean_vec.at(j) = mean / volume;
146,784✔
875

876
            // compute the standard deviation
877
            double sum_sq =
878
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
146,784✔
879
            double std_dev {0.0};
146,784✔
880
            if (n_realizations > 1) {
146,784✔
881
              std_dev = sum_sq / n_realizations - mean * mean;
146,784✔
882
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
146,784✔
883
            }
884
            std_dev_vec[j] = std_dev / volume;
146,784✔
885
          }
886
#ifdef OPENMC_MPI
887
          MPI_Bcast(
10✔
888
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
10✔
889
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
10✔
890
            mpi::intracomm);
891
#endif
892
          // set the data for this score
893
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
15✔
894
        }
15✔
895
      }
896

897
      // Generate a file name based on the tally id
898
      // and the current batch number
899
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
15✔
900
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
15✔
901
        simulation::current_batch, batch_width);
×
902

903
      // Write the unstructured mesh and data to file
904
      umesh->write(filename);
15✔
905

906
      // remove score data added for this mesh write
907
      umesh->remove_scores();
15✔
908
    }
15✔
909
  }
7,168✔
910
}
1,509✔
911

912
void write_tally_results_nr(hid_t file_id)
×
913
{
914
  // ==========================================================================
915
  // COLLECT AND WRITE GLOBAL TALLIES
916

917
  hid_t tallies_group;
918
  if (mpi::master) {
×
919
    // Write number of realizations
920
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
×
921

922
    tallies_group = open_group(file_id, "tallies");
×
923
  }
924

925
  // Get global tallies
926
  auto& gt = simulation::global_tallies;
×
927

928
#ifdef OPENMC_MPI
929
  // Reduce global tallies
930
  xt::xtensor<double, 2> gt_reduced = xt::empty_like(gt);
931
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
932
    mpi::intracomm);
933

934
  // Transfer values to value on master
935
  if (mpi::master) {
936
    if (simulation::current_batch == settings::n_max_batches ||
937
        simulation::satisfy_triggers) {
938
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
939
    }
940
  }
941
#endif
942

943
  // Write out global tallies sum and sum_sq
944
  if (mpi::master) {
×
945
    write_dataset(file_id, "global_tallies", gt);
×
946
  }
947

948
  for (const auto& t : model::tallies) {
×
949
    // Skip any tallies that are not active
950
    if (!t->active_)
×
951
      continue;
×
952
    if (!t->writable_)
×
953
      continue;
×
954

955
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
×
956
      write_attribute(file_id, "tallies_present", 1);
×
957
    }
958

959
    // Get view of accumulated tally values
960
    auto values_view = xt::view(t->results_, xt::all(), xt::all(),
×
961
      xt::range(static_cast<int>(TallyResult::SUM),
×
962
        static_cast<int>(TallyResult::SUM_SQ) + 1));
963

964
    // Make copy of tally values in contiguous array
965
    xt::xtensor<double, 3> values = values_view;
×
966

967
    if (mpi::master) {
×
968
      // Open group for tally
969
      std::string groupname {"tally " + std::to_string(t->id_)};
×
970
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
×
971

972
      // The MPI_IN_PLACE specifier allows the master to copy values into
973
      // a receive buffer without having a temporary variable
974
#ifdef OPENMC_MPI
975
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
976
        MPI_SUM, 0, mpi::intracomm);
977
#endif
978

979
      // At the end of the simulation, store the results back in the
980
      // regular TallyResults array
981
      if (simulation::current_batch == settings::n_max_batches ||
×
982
          simulation::satisfy_triggers) {
983
        values_view = values;
×
984
      }
985

986
      // Put in temporary tally result
987
      xt::xtensor<double, 3> results_copy = xt::zeros_like(t->results_);
×
988
      auto copy_view = xt::view(results_copy, xt::all(), xt::all(),
×
989
        xt::range(static_cast<int>(TallyResult::SUM),
×
990
          static_cast<int>(TallyResult::SUM_SQ) + 1));
991
      copy_view = values;
×
992

993
      // Write reduced tally results to file
994
      auto shape = results_copy.shape();
×
995
      write_tally_results(tally_group, shape[0], shape[1], results_copy.data());
×
996

997
      close_group(tally_group);
×
998
    } else {
×
999
      // Receive buffer not significant at other processors
1000
#ifdef OPENMC_MPI
1001
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
1002
        mpi::intracomm);
1003
#endif
1004
    }
1005
  }
1006

1007
  if (mpi::master) {
×
1008
    if (!object_exists(file_id, "tallies_present")) {
×
1009
      // Indicate that tallies are off
1010
      write_dataset(file_id, "tallies_present", 0);
×
1011
    }
1012

1013
    close_group(tallies_group);
×
1014
  }
1015
}
1016

1017
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc