• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 17868066028

19 Sep 2025 07:35PM UTC coverage: 85.215% (+0.02%) from 85.197%
17868066028

Pull #3495

github

web-flow
Merge c60289e62 into ecb0a3361
Pull Request #3495: Calculation of kinetic parameters relevant to fissile system with an external neutron source

90 of 96 new or added lines in 8 files covered. (93.75%)

48 existing lines in 2 files now uncovered.

53239 of 62476 relevant lines covered (85.22%)

39108913.18 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.8
/src/state_point.cpp
1
#include "openmc/state_point.h"
2

3
#include <algorithm>
4
#include <cstdint> // for int64_t
5
#include <string>
6

7
#include "xtensor/xbuilder.hpp" // for empty_like
8
#include "xtensor/xview.hpp"
9
#include <fmt/core.h>
10

11
#include "openmc/bank.h"
12
#include "openmc/capi.h"
13
#include "openmc/constants.h"
14
#include "openmc/eigenvalue.h"
15
#include "openmc/error.h"
16
#include "openmc/file_utils.h"
17
#include "openmc/hdf5_interface.h"
18
#include "openmc/mcpl_interface.h"
19
#include "openmc/mesh.h"
20
#include "openmc/message_passing.h"
21
#include "openmc/mgxs_interface.h"
22
#include "openmc/nuclide.h"
23
#include "openmc/output.h"
24
#include "openmc/settings.h"
25
#include "openmc/simulation.h"
26
#include "openmc/tallies/derivative.h"
27
#include "openmc/tallies/filter.h"
28
#include "openmc/tallies/filter_mesh.h"
29
#include "openmc/tallies/tally.h"
30
#include "openmc/timer.h"
31
#include "openmc/vector.h"
32

33
namespace openmc {
34

35
extern "C" int openmc_statepoint_write(const char* filename, bool* write_source)
7,628✔
36
{
37
  simulation::time_statepoint.start();
7,628✔
38

39
  // If a nullptr is passed in, we assume that the user
40
  // wants a default name for this, of the form like output/statepoint.20.h5
41
  std::string filename_;
7,628✔
42
  if (filename) {
7,628✔
43
    filename_ = filename;
1,094✔
44
  } else {
45
    // Determine width for zero padding
46
    int w = std::to_string(settings::n_max_batches).size();
6,534✔
47

48
    // Set filename for state point
49
    filename_ = fmt::format("{0}statepoint.{1:0{2}}.h5", settings::path_output,
11,851✔
50
      simulation::current_batch, w);
6,534✔
51
  }
52

53
  // If a file name was specified, ensure it has .h5 file extension
54
  const auto extension = get_file_extension(filename_);
7,628✔
55
  if (extension != "h5") {
7,628✔
56
    warning("openmc_statepoint_write was passed a file extension differing "
×
57
            "from .h5, but an hdf5 file will be written.");
58
  }
59

60
  // Determine whether or not to write the source bank
61
  bool write_source_ = write_source ? *write_source : true;
7,628✔
62

63
  // Write message
64
  write_message("Creating state point " + filename_ + "...", 5);
7,628✔
65

66
  hid_t file_id;
67
  if (mpi::master) {
7,628✔
68
    // Create statepoint file
69
    file_id = file_open(filename_, 'w');
6,573✔
70

71
    // Write file type
72
    write_attribute(file_id, "filetype", "statepoint");
6,573✔
73

74
    // Write revision number for state point file
75
    write_attribute(file_id, "version", VERSION_STATEPOINT);
6,573✔
76

77
    // Write OpenMC version
78
    write_attribute(file_id, "openmc_version", VERSION);
6,573✔
79
#ifdef GIT_SHA1
80
    write_attribute(file_id, "git_sha1", GIT_SHA1);
81
#endif
82

83
    // Write current date and time
84
    write_attribute(file_id, "date_and_time", time_stamp());
6,573✔
85

86
    // Write path to input
87
    write_attribute(file_id, "path", settings::path_input);
6,573✔
88

89
    // Write out random number seed
90
    write_dataset(file_id, "seed", openmc_get_seed());
6,573✔
91

92
    // Write out random number stride
93
    write_dataset(file_id, "stride", openmc_get_stride());
6,573✔
94

95
    // Write run information
96
    write_dataset(file_id, "energy_mode",
6,573✔
97
      settings::run_CE ? "continuous-energy" : "multi-group");
98
    switch (settings::run_mode) {
6,573✔
99
    case RunMode::FIXED_SOURCE:
2,723✔
100
      write_dataset(file_id, "run_mode", "fixed source");
2,723✔
101
      break;
2,723✔
102
    case RunMode::EIGENVALUE:
3,850✔
103
      write_dataset(file_id, "run_mode", "eigenvalue");
3,850✔
104
      break;
3,850✔
105
    default:
×
106
      break;
×
107
    }
108
    write_attribute(file_id, "photon_transport", settings::photon_transport);
6,573✔
109
    write_dataset(file_id, "n_particles", settings::n_particles);
6,573✔
110
    write_dataset(file_id, "n_batches", settings::n_batches);
6,573✔
111

112
    // Write out current batch number
113
    write_dataset(file_id, "current_batch", simulation::current_batch);
6,573✔
114

115
    // Indicate whether source bank is stored in statepoint
116
    write_attribute(file_id, "source_present", write_source_);
6,573✔
117

118
    // Write out information for eigenvalue run
119
    if (settings::run_mode == RunMode::EIGENVALUE)
6,573✔
120
      write_eigenvalue_hdf5(file_id);
3,850✔
121

122
    hid_t tallies_group = create_group(file_id, "tallies");
6,573✔
123

124
    // Write meshes
125
    meshes_to_hdf5(tallies_group);
6,573✔
126

127
    // Write information for derivatives
128
    if (!model::tally_derivs.empty()) {
6,573✔
129
      hid_t derivs_group = create_group(tallies_group, "derivatives");
11✔
130
      for (const auto& deriv : model::tally_derivs) {
66✔
131
        hid_t deriv_group =
132
          create_group(derivs_group, "derivative " + std::to_string(deriv.id));
55✔
133
        write_dataset(deriv_group, "material", deriv.diff_material);
55✔
134
        if (deriv.variable == DerivativeVariable::DENSITY) {
55✔
135
          write_dataset(deriv_group, "independent variable", "density");
22✔
136
        } else if (deriv.variable == DerivativeVariable::NUCLIDE_DENSITY) {
33✔
137
          write_dataset(deriv_group, "independent variable", "nuclide_density");
22✔
138
          write_dataset(
22✔
139
            deriv_group, "nuclide", data::nuclides[deriv.diff_nuclide]->name_);
22✔
140
        } else if (deriv.variable == DerivativeVariable::TEMPERATURE) {
11✔
141
          write_dataset(deriv_group, "independent variable", "temperature");
11✔
142
        } else {
143
          fatal_error("Independent variable for derivative " +
×
144
                      std::to_string(deriv.id) +
×
145
                      " not defined in state_point.cpp");
146
        }
147
        close_group(deriv_group);
55✔
148
      }
149
      close_group(derivs_group);
11✔
150
    }
151

152
    // Write information for filters
153
    hid_t filters_group = create_group(tallies_group, "filters");
6,573✔
154
    write_attribute(filters_group, "n_filters", model::tally_filters.size());
6,573✔
155
    if (!model::tally_filters.empty()) {
6,573✔
156
      // Write filter IDs
157
      vector<int32_t> filter_ids;
4,372✔
158
      filter_ids.reserve(model::tally_filters.size());
4,372✔
159
      for (const auto& filt : model::tally_filters)
15,108✔
160
        filter_ids.push_back(filt->id());
10,736✔
161
      write_attribute(filters_group, "ids", filter_ids);
4,372✔
162

163
      // Write info for each filter
164
      for (const auto& filt : model::tally_filters) {
15,108✔
165
        hid_t filter_group =
166
          create_group(filters_group, "filter " + std::to_string(filt->id()));
10,736✔
167
        filt->to_statepoint(filter_group);
10,736✔
168
        close_group(filter_group);
10,736✔
169
      }
170
    }
4,372✔
171
    close_group(filters_group);
6,573✔
172

173
    // Write information for tallies
174
    write_attribute(tallies_group, "n_tallies", model::tallies.size());
6,573✔
175
    if (!model::tallies.empty()) {
6,573✔
176
      // Write tally IDs
177
      vector<int32_t> tally_ids;
4,867✔
178
      tally_ids.reserve(model::tallies.size());
4,867✔
179
      for (const auto& tally : model::tallies)
26,317✔
180
        tally_ids.push_back(tally->id_);
21,450✔
181
      write_attribute(tallies_group, "ids", tally_ids);
4,867✔
182

183
      // Write all tally information except results
184
      for (const auto& tally : model::tallies) {
26,317✔
185
        hid_t tally_group =
186
          create_group(tallies_group, "tally " + std::to_string(tally->id_));
21,450✔
187

188
        write_dataset(tally_group, "name", tally->name_);
21,450✔
189

190
        if (tally->writable_) {
21,450✔
191
          write_attribute(tally_group, "internal", 0);
19,829✔
192
        } else {
193
          write_attribute(tally_group, "internal", 1);
1,621✔
194
          close_group(tally_group);
1,621✔
195
          continue;
1,621✔
196
        }
197

198
        if (tally->multiply_density()) {
19,829✔
199
          write_attribute(tally_group, "multiply_density", 1);
19,796✔
200
        } else {
201
          write_attribute(tally_group, "multiply_density", 0);
33✔
202
        }
203

204
        if (tally->estimator_ == TallyEstimator::ANALOG) {
19,829✔
205
          write_dataset(tally_group, "estimator", "analog");
6,888✔
206
        } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) {
12,941✔
207
          write_dataset(tally_group, "estimator", "tracklength");
12,239✔
208
        } else if (tally->estimator_ == TallyEstimator::COLLISION) {
702✔
209
          write_dataset(tally_group, "estimator", "collision");
702✔
210
        }
211

212
        write_dataset(tally_group, "n_realizations", tally->n_realizations_);
19,829✔
213

214
        // Write the ID of each filter attached to this tally
215
        write_dataset(tally_group, "n_filters", tally->filters().size());
19,829✔
216
        if (!tally->filters().empty()) {
19,829✔
217
          vector<int32_t> filter_ids;
18,410✔
218
          filter_ids.reserve(tally->filters().size());
18,410✔
219
          for (auto i_filt : tally->filters())
55,345✔
220
            filter_ids.push_back(model::tally_filters[i_filt]->id());
36,935✔
221
          write_dataset(tally_group, "filters", filter_ids);
18,410✔
222
        }
18,410✔
223

224
        // Write the nuclides this tally scores
225
        vector<std::string> nuclides;
19,829✔
226
        for (auto i_nuclide : tally->nuclides_) {
47,218✔
227
          if (i_nuclide == -1) {
27,389✔
228
            nuclides.push_back("total");
17,208✔
229
          } else {
230
            if (settings::run_CE) {
10,181✔
231
              nuclides.push_back(data::nuclides[i_nuclide]->name_);
10,071✔
232
            } else {
233
              nuclides.push_back(data::mg.nuclides_[i_nuclide].name);
110✔
234
            }
235
          }
236
        }
237
        write_dataset(tally_group, "nuclides", nuclides);
19,829✔
238

239
        if (tally->deriv_ != C_NONE)
19,829✔
240
          write_dataset(
220✔
241
            tally_group, "derivative", model::tally_derivs[tally->deriv_].id);
220✔
242

243
        // Write the tally score bins
244
        vector<std::string> scores;
19,829✔
245
        for (auto sc : tally->scores_)
47,552✔
246
          scores.push_back(reaction_name(sc));
27,723✔
247
        write_dataset(tally_group, "n_score_bins", scores.size());
19,829✔
248
        write_dataset(tally_group, "score_bins", scores);
19,829✔
249

250
        close_group(tally_group);
19,829✔
251
      }
19,829✔
252
    }
4,867✔
253

254
    if (settings::reduce_tallies) {
6,573✔
255
      // Write global tallies
256
      write_global_tallies(file_id);
6,573✔
257

258
      // Write tallies
259
      if (model::active_tallies.size() > 0) {
6,573✔
260
        // Indicate that tallies are on
261
        write_attribute(file_id, "tallies_present", 1);
4,524✔
262

263
        // Write all tally results
264
        for (const auto& tally : model::tallies) {
25,631✔
265
          if (!tally->writable_)
21,107✔
266
            continue;
1,278✔
267
          // Write sum and sum_sq for each bin
268
          std::string name = "tally " + std::to_string(tally->id_);
19,829✔
269
          hid_t tally_group = open_group(tallies_group, name.c_str());
19,829✔
270
          auto& results = tally->results_;
19,829✔
271
          write_tally_results(tally_group, results.shape()[0],
19,829✔
272
            results.shape()[1], results.data());
19,829✔
273
          close_group(tally_group);
19,829✔
274
        }
19,829✔
275
      } else {
276
        // Indicate tallies are off
277
        write_attribute(file_id, "tallies_present", 0);
2,049✔
278
      }
279
    }
280

281
    close_group(tallies_group);
6,573✔
282
  }
283

284
  // Check for the no-tally-reduction method
285
  if (!settings::reduce_tallies) {
7,628✔
286
    // If using the no-tally-reduction method, we need to collect tally
287
    // results before writing them to the state point file.
288
    write_tally_results_nr(file_id);
×
289

290
  } else if (mpi::master) {
7,628✔
291
    // Write number of global realizations
292
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
6,573✔
293
  }
294

295
  if (mpi::master) {
7,628✔
296
    // Write out the runtime metrics.
297
    using namespace simulation;
298
    hid_t runtime_group = create_group(file_id, "runtime");
6,573✔
299
    write_dataset(
6,573✔
300
      runtime_group, "total initialization", time_initialize.elapsed());
301
    write_dataset(
6,573✔
302
      runtime_group, "reading cross sections", time_read_xs.elapsed());
303
    write_dataset(runtime_group, "simulation",
6,573✔
304
      time_inactive.elapsed() + time_active.elapsed());
6,573✔
305
    write_dataset(runtime_group, "transport", time_transport.elapsed());
6,573✔
306
    if (settings::run_mode == RunMode::EIGENVALUE) {
6,573✔
307
      write_dataset(runtime_group, "inactive batches", time_inactive.elapsed());
3,850✔
308
    }
309
    write_dataset(runtime_group, "active batches", time_active.elapsed());
6,573✔
310
    if (settings::run_mode == RunMode::EIGENVALUE) {
6,573✔
311
      write_dataset(
3,850✔
312
        runtime_group, "synchronizing fission bank", time_bank.elapsed());
313
      write_dataset(
3,850✔
314
        runtime_group, "sampling source sites", time_bank_sample.elapsed());
315
      write_dataset(
3,850✔
316
        runtime_group, "SEND-RECV source sites", time_bank_sendrecv.elapsed());
317
    }
318
    write_dataset(
6,573✔
319
      runtime_group, "accumulating tallies", time_tallies.elapsed());
320
    write_dataset(runtime_group, "total", time_total.elapsed());
6,573✔
321
    write_dataset(
6,573✔
322
      runtime_group, "writing statepoints", time_statepoint.elapsed());
323
    close_group(runtime_group);
6,573✔
324

325
    file_close(file_id);
6,573✔
326
  }
327

328
#ifdef PHDF5
329
  bool parallel = true;
4,404✔
330
#else
331
  bool parallel = false;
3,224✔
332
#endif
333

334
  // Write the source bank if desired
335
  if (write_source_) {
7,628✔
336
    if (mpi::master || parallel)
3,661✔
337
      file_id = file_open(filename_, 'a', true);
3,661✔
338
    write_source_bank(file_id, simulation::source_bank, simulation::work_index);
3,661✔
339
    if (mpi::master || parallel)
3,661✔
340
      file_close(file_id);
3,661✔
341
  }
342

343
#if defined(OPENMC_LIBMESH_ENABLED) || defined(OPENMC_DAGMC_ENABLED)
344
  // write unstructured mesh tally files
345
  write_unstructured_mesh_results();
2,296✔
346
#endif
347

348
  simulation::time_statepoint.stop();
7,628✔
349

350
  return 0;
7,628✔
351
}
7,628✔
352

353
void restart_set_keff()
67✔
354
{
355
  if (simulation::restart_batch > settings::n_inactive) {
67✔
356
    for (int i = settings::n_inactive; i < simulation::restart_batch; ++i) {
318✔
357
      simulation::k_sum[0] += simulation::k_generation[i][0];
251✔
358
      simulation::k_sum[1] += std::pow(simulation::k_generation[i][0], 2);
251✔
359
    }
360
    int n = settings::gen_per_batch * simulation::n_realizations;
67✔
361
    simulation::keff = simulation::k_sum[0] / n;
67✔
362
  } else {
NEW
363
    simulation::keff = simulation::k_generation.back()[0];
×
364
  }
365
}
67✔
366

367
void load_state_point()
67✔
368
{
369
  write_message(
67✔
370
    fmt::format("Loading state point {}...", settings::path_statepoint_c), 5);
122✔
371
  openmc_statepoint_load(settings::path_statepoint.c_str());
67✔
372
}
67✔
373

374
void statepoint_version_check(hid_t file_id)
67✔
375
{
376
  // Read revision number for state point file and make sure it matches with
377
  // current version
378
  array<int, 2> version_array;
379
  read_attribute(file_id, "version", version_array);
67✔
380
  if (version_array != VERSION_STATEPOINT) {
67✔
381
    fatal_error(
×
382
      "State point version does not match current version in OpenMC.");
383
  }
384
}
67✔
385

386
extern "C" int openmc_statepoint_load(const char* filename)
67✔
387
{
388
  // Open file for reading
389
  hid_t file_id = file_open(filename, 'r', true);
67✔
390

391
  // Read filetype
392
  std::string word;
67✔
393
  read_attribute(file_id, "filetype", word);
67✔
394
  if (word != "statepoint") {
67✔
395
    fatal_error("OpenMC tried to restart from a non-statepoint file.");
×
396
  }
397

398
  statepoint_version_check(file_id);
67✔
399

400
  // Read and overwrite random number seed
401
  int64_t seed;
402
  read_dataset(file_id, "seed", seed);
67✔
403
  openmc_set_seed(seed);
67✔
404

405
  // Read and overwrite random number stride
406
  uint64_t stride;
407
  read_dataset(file_id, "stride", stride);
67✔
408
  openmc_set_stride(stride);
67✔
409

410
  // It is not impossible for a state point to be generated from a CE run but
411
  // to be loaded in to an MG run (or vice versa), check to prevent that.
412
  read_dataset(file_id, "energy_mode", word);
67✔
413
  if (word == "multi-group" && settings::run_CE) {
67✔
414
    fatal_error("State point file is from multigroup run but current run is "
×
415
                "continous energy.");
416
  } else if (word == "continuous-energy" && !settings::run_CE) {
67✔
417
    fatal_error("State point file is from continuous-energy run but current "
×
418
                "run is multigroup!");
419
  }
420

421
  // Read and overwrite run information except number of batches
422
  read_dataset(file_id, "run_mode", word);
67✔
423
  if (word == "fixed source") {
67✔
424
    settings::run_mode = RunMode::FIXED_SOURCE;
×
425
  } else if (word == "eigenvalue") {
67✔
426
    settings::run_mode = RunMode::EIGENVALUE;
67✔
427
  }
428
  read_attribute(file_id, "photon_transport", settings::photon_transport);
67✔
429
  read_dataset(file_id, "n_particles", settings::n_particles);
67✔
430
  int temp;
431
  read_dataset(file_id, "n_batches", temp);
67✔
432

433
  // Take maximum of statepoint n_batches and input n_batches
434
  settings::n_batches = std::max(settings::n_batches, temp);
67✔
435

436
  // Read batch number to restart at
437
  read_dataset(file_id, "current_batch", simulation::restart_batch);
67✔
438

439
  if (settings::restart_run &&
67✔
440
      simulation::restart_batch >= settings::n_max_batches) {
67✔
441
    warning(fmt::format(
11✔
442
      "The number of batches specified for simulation ({}) is smaller "
443
      "than or equal to the number of batches in the restart statepoint file "
444
      "({})",
445
      settings::n_max_batches, simulation::restart_batch));
446
  }
447

448
  // Logical flag for source present in statepoint file
449
  bool source_present;
450
  read_attribute(file_id, "source_present", source_present);
67✔
451

452
  // Read information specific to eigenvalue run
453
  if (settings::run_mode == RunMode::EIGENVALUE) {
67✔
454
    read_dataset(file_id, "n_inactive", temp);
67✔
455
    read_eigenvalue_hdf5(file_id);
67✔
456

457
    // Take maximum of statepoint n_inactive and input n_inactive
458
    settings::n_inactive = std::max(settings::n_inactive, temp);
67✔
459

460
    // Check to make sure source bank is present
461
    if (settings::path_sourcepoint == settings::path_statepoint &&
134✔
462
        !source_present) {
67✔
463
      fatal_error("Source bank must be contained in statepoint restart file");
×
464
    }
465
  }
466

467
  // Read number of realizations for global tallies
468
  read_dataset(file_id, "n_realizations", simulation::n_realizations);
67✔
469

470
  // Set k_sum, keff, and current_batch based on whether restart file is part
471
  // of active cycle or inactive cycle
472
  if (settings::run_mode == RunMode::EIGENVALUE) {
67✔
473
    restart_set_keff();
67✔
474
  }
475

476
  // Set current batch number
477
  simulation::current_batch = simulation::restart_batch;
67✔
478

479
  // Read tallies to master. If we are using Parallel HDF5, all processes
480
  // need to be included in the HDF5 calls.
481
#ifdef PHDF5
482
  if (true) {
483
#else
484
  if (mpi::master) {
30✔
485
#endif
486
    // Read global tally data
487
    read_global_tallies(file_id);
67✔
488

489
    // Check if tally results are present
490
    bool present;
491
    read_attribute(file_id, "tallies_present", present);
67✔
492

493
    // Read in sum and sum squared
494
    if (present) {
67✔
495
      hid_t tallies_group = open_group(file_id, "tallies");
67✔
496

497
      for (auto& tally : model::tallies) {
233✔
498
        // Read sum, sum_sq, and N for each bin
499
        std::string name = "tally " + std::to_string(tally->id_);
166✔
500
        hid_t tally_group = open_group(tallies_group, name.c_str());
166✔
501

502
        int internal = 0;
166✔
503
        if (attribute_exists(tally_group, "internal")) {
166✔
504
          read_attribute(tally_group, "internal", internal);
166✔
505
        }
506
        if (internal) {
166✔
507
          tally->writable_ = false;
×
508
        } else {
509
          auto& results = tally->results_;
166✔
510
          read_tally_results(tally_group, results.shape()[0],
332✔
511
            results.shape()[1], results.data());
166✔
512
          read_dataset(tally_group, "n_realizations", tally->n_realizations_);
166✔
513
          close_group(tally_group);
166✔
514
        }
515
      }
166✔
516
      close_group(tallies_group);
67✔
517
    }
518
  }
519

520
  // Read source if in eigenvalue mode
521
  if (settings::run_mode == RunMode::EIGENVALUE) {
67✔
522

523
    // Check if source was written out separately
524
    if (!source_present) {
67✔
525

526
      // Close statepoint file
527
      file_close(file_id);
×
528

529
      // Write message
530
      write_message(
×
531
        "Loading source file " + settings::path_sourcepoint + "...", 5);
×
532

533
      // Open source file
534
      file_id = file_open(settings::path_sourcepoint.c_str(), 'r', true);
×
535
    }
536

537
    // Read source
538
    read_source_bank(file_id, simulation::source_bank, true);
67✔
539
  }
540

541
  // Close file
542
  file_close(file_id);
67✔
543

544
  return 0;
67✔
545
}
67✔
546

547
hid_t h5banktype()
4,997✔
548
{
549
  // Create compound type for position
550
  hid_t postype = H5Tcreate(H5T_COMPOUND, sizeof(struct Position));
4,997✔
551
  H5Tinsert(postype, "x", HOFFSET(Position, x), H5T_NATIVE_DOUBLE);
4,997✔
552
  H5Tinsert(postype, "y", HOFFSET(Position, y), H5T_NATIVE_DOUBLE);
4,997✔
553
  H5Tinsert(postype, "z", HOFFSET(Position, z), H5T_NATIVE_DOUBLE);
4,997✔
554

555
  // Create bank datatype
556
  //
557
  // If you make changes to the compound datatype here, make sure you update:
558
  // - openmc/source.py
559
  // - openmc/statepoint.py
560
  // - docs/source/io_formats/statepoint.rst
561
  // - docs/source/io_formats/source.rst
562
  hid_t banktype = H5Tcreate(H5T_COMPOUND, sizeof(struct SourceSite));
4,997✔
563
  H5Tinsert(banktype, "r", HOFFSET(SourceSite, r), postype);
4,997✔
564
  H5Tinsert(banktype, "u", HOFFSET(SourceSite, u), postype);
4,997✔
565
  H5Tinsert(banktype, "E", HOFFSET(SourceSite, E), H5T_NATIVE_DOUBLE);
4,997✔
566
  H5Tinsert(banktype, "time", HOFFSET(SourceSite, time), H5T_NATIVE_DOUBLE);
4,997✔
567
  H5Tinsert(banktype, "wgt", HOFFSET(SourceSite, wgt), H5T_NATIVE_DOUBLE);
4,997✔
568
  H5Tinsert(banktype, "delayed_group", HOFFSET(SourceSite, delayed_group),
4,997✔
569
    H5T_NATIVE_INT);
4,997✔
570
  H5Tinsert(banktype, "surf_id", HOFFSET(SourceSite, surf_id), H5T_NATIVE_INT);
4,997✔
571
  H5Tinsert(
4,997✔
572
    banktype, "particle", HOFFSET(SourceSite, particle), H5T_NATIVE_INT);
4,997✔
573

574
  H5Tclose(postype);
4,997✔
575
  return banktype;
4,997✔
576
}
577

578
void write_source_point(std::string filename, span<SourceSite> source_bank,
1,243✔
579
  const vector<int64_t>& bank_index, bool use_mcpl)
580
{
581
  std::string ext = use_mcpl ? "mcpl" : "h5";
1,243✔
582
  write_message("Creating source file {}.{} with {} particles ...", filename,
1,243✔
583
    ext, source_bank.size(), 5);
1,243✔
584

585
  // Dispatch to appropriate function based on file type
586
  if (use_mcpl) {
1,243✔
587
    filename.append(".mcpl");
38✔
588
    write_mcpl_source_point(filename.c_str(), source_bank, bank_index);
38✔
589
  } else {
590
    filename.append(".h5");
1,205✔
591
    write_h5_source_point(filename.c_str(), source_bank, bank_index);
1,205✔
592
  }
593
}
1,243✔
594

595
void write_h5_source_point(const char* filename, span<SourceSite> source_bank,
1,205✔
596
  const vector<int64_t>& bank_index)
597
{
598
  // When using parallel HDF5, the file is written to collectively by all
599
  // processes. With MPI-only, the file is opened and written by the master
600
  // (note that the call to write_source_bank is by all processes since slave
601
  // processes need to send source bank data to the master.
602
#ifdef PHDF5
603
  bool parallel = true;
624✔
604
#else
605
  bool parallel = false;
581✔
606
#endif
607

608
  if (!filename)
1,205✔
609
    fatal_error("write_source_point filename needs a nonempty name.");
×
610

611
  std::string filename_(filename);
1,205✔
612
  const auto extension = get_file_extension(filename_);
1,205✔
613
  if (extension != "h5") {
1,205✔
614
    warning("write_source_point was passed a file extension differing "
×
615
            "from .h5, but an hdf5 file will be written.");
616
  }
617

618
  hid_t file_id;
619
  if (mpi::master || parallel) {
1,205✔
620
    file_id = file_open(filename_.c_str(), 'w', true);
1,205✔
621
    write_attribute(file_id, "filetype", "source");
1,205✔
622
  }
623

624
  // Get pointer to source bank and write to file
625
  write_source_bank(file_id, source_bank, bank_index);
1,205✔
626

627
  if (mpi::master || parallel)
1,205✔
628
    file_close(file_id);
1,205✔
629
}
1,205✔
630

631
void write_source_bank(hid_t group_id, span<SourceSite> source_bank,
4,866✔
632
  const vector<int64_t>& bank_index)
633
{
634
  hid_t banktype = h5banktype();
4,866✔
635

636
  // Set total and individual process dataspace sizes for source bank
637
  int64_t dims_size = bank_index.back();
4,866✔
638
  int64_t count_size = bank_index[mpi::rank + 1] - bank_index[mpi::rank];
4,866✔
639

640
#ifdef PHDF5
641
  // Set size of total dataspace for all procs and rank
642
  hsize_t dims[] {static_cast<hsize_t>(dims_size)};
2,707✔
643
  hid_t dspace = H5Screate_simple(1, dims, nullptr);
2,707✔
644
  hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace, H5P_DEFAULT,
2,707✔
645
    H5P_DEFAULT, H5P_DEFAULT);
646

647
  // Create another data space but for each proc individually
648
  hsize_t count[] {static_cast<hsize_t>(count_size)};
2,707✔
649
  hid_t memspace = H5Screate_simple(1, count, nullptr);
2,707✔
650

651
  // Select hyperslab for this dataspace
652
  hsize_t start[] {static_cast<hsize_t>(bank_index[mpi::rank])};
2,707✔
653
  H5Sselect_hyperslab(dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
2,707✔
654

655
  // Set up the property list for parallel writing
656
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
2,707✔
657
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
2,707✔
658

659
  // Write data to file in parallel
660
  H5Dwrite(dset, banktype, memspace, dspace, plist, source_bank.data());
2,707✔
661

662
  // Free resources
663
  H5Sclose(dspace);
2,707✔
664
  H5Sclose(memspace);
2,707✔
665
  H5Dclose(dset);
2,707✔
666
  H5Pclose(plist);
2,707✔
667

668
#else
669

670
  if (mpi::master) {
2,159✔
671
    // Create dataset big enough to hold all source sites
672
    hsize_t dims[] {static_cast<hsize_t>(dims_size)};
2,159✔
673
    hid_t dspace = H5Screate_simple(1, dims, nullptr);
2,159✔
674
    hid_t dset = H5Dcreate(group_id, "source_bank", banktype, dspace,
2,159✔
675
      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
676

677
    // Save source bank sites since the array is overwritten below
678
#ifdef OPENMC_MPI
679
    vector<SourceSite> temp_source {source_bank.begin(), source_bank.end()};
680
#endif
681

682
    for (int i = 0; i < mpi::n_procs; ++i) {
4,318✔
683
      // Create memory space
684
      hsize_t count[] {static_cast<hsize_t>(bank_index[i + 1] - bank_index[i])};
2,159✔
685
      hid_t memspace = H5Screate_simple(1, count, nullptr);
2,159✔
686

687
#ifdef OPENMC_MPI
688
      // Receive source sites from other processes
689
      if (i > 0)
690
        MPI_Recv(source_bank.data(), count[0], mpi::source_site, i, i,
691
          mpi::intracomm, MPI_STATUS_IGNORE);
692
#endif
693

694
      // Select hyperslab for this dataspace
695
      dspace = H5Dget_space(dset);
2,159✔
696
      hsize_t start[] {static_cast<hsize_t>(bank_index[i])};
2,159✔
697
      H5Sselect_hyperslab(
2,159✔
698
        dspace, H5S_SELECT_SET, start, nullptr, count, nullptr);
699

700
      // Write data to hyperslab
701
      H5Dwrite(
2,159✔
702
        dset, banktype, memspace, dspace, H5P_DEFAULT, source_bank.data());
2,159✔
703

704
      H5Sclose(memspace);
2,159✔
705
      H5Sclose(dspace);
2,159✔
706
    }
707

708
    // Close all ids
709
    H5Dclose(dset);
2,159✔
710

711
#ifdef OPENMC_MPI
712
    // Restore state of source bank
713
    std::copy(temp_source.begin(), temp_source.end(), source_bank.begin());
714
#endif
715
  } else {
716
#ifdef OPENMC_MPI
717
    MPI_Send(source_bank.data(), count_size, mpi::source_site, 0, mpi::rank,
718
      mpi::intracomm);
719
#endif
720
  }
721
#endif
722

723
  H5Tclose(banktype);
4,866✔
724
}
4,866✔
725

726
// Determine member names of a compound HDF5 datatype
727
std::string dtype_member_names(hid_t dtype_id)
262✔
728
{
729
  int nmembers = H5Tget_nmembers(dtype_id);
262✔
730
  std::string names;
262✔
731
  for (int i = 0; i < nmembers; i++) {
2,313✔
732
    char* name = H5Tget_member_name(dtype_id, i);
2,051✔
733
    names = names.append(name);
2,051✔
734
    H5free_memory(name);
2,051✔
735
    if (i < nmembers - 1)
2,051✔
736
      names += ", ";
1,789✔
737
  }
738
  return names;
262✔
739
}
×
740

741
void read_source_bank(
131✔
742
  hid_t group_id, vector<SourceSite>& sites, bool distribute)
743
{
744
  hid_t banktype = h5banktype();
131✔
745

746
  // Open the dataset
747
  hid_t dset = H5Dopen(group_id, "source_bank", H5P_DEFAULT);
131✔
748

749
  // Make sure number of members matches
750
  hid_t dtype = H5Dget_type(dset);
131✔
751
  auto file_member_names = dtype_member_names(dtype);
131✔
752
  auto bank_member_names = dtype_member_names(banktype);
131✔
753
  if (file_member_names != bank_member_names) {
131✔
754
    fatal_error(fmt::format(
9✔
755
      "Source site attributes in file do not match what is "
756
      "expected for this version of OpenMC. File attributes = ({}). Expected "
757
      "attributes = ({})",
758
      file_member_names, bank_member_names));
759
  }
760

761
  hid_t dspace = H5Dget_space(dset);
122✔
762
  hsize_t n_sites;
763
  H5Sget_simple_extent_dims(dspace, &n_sites, nullptr);
122✔
764

765
  // Make sure vector is big enough in case where we're reading entire source on
766
  // each process
767
  if (!distribute)
122✔
768
    sites.resize(n_sites);
55✔
769

770
  hid_t memspace;
771
  if (distribute) {
122✔
772
    if (simulation::work_index[mpi::n_procs] > n_sites) {
67✔
773
      fatal_error("Number of source sites in source file is less "
×
774
                  "than number of source particles per generation.");
775
    }
776

777
    // Create another data space but for each proc individually
778
    hsize_t n_sites_local = simulation::work_per_rank;
67✔
779
    memspace = H5Screate_simple(1, &n_sites_local, nullptr);
67✔
780

781
    // Select hyperslab for each process
782
    hsize_t offset = simulation::work_index[mpi::rank];
67✔
783
    H5Sselect_hyperslab(
67✔
784
      dspace, H5S_SELECT_SET, &offset, nullptr, &n_sites_local, nullptr);
785
  } else {
786
    memspace = H5S_ALL;
55✔
787
  }
788

789
#ifdef PHDF5
790
  // Read data in parallel
791
  hid_t plist = H5Pcreate(H5P_DATASET_XFER);
68✔
792
  H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
68✔
793
  H5Dread(dset, banktype, memspace, dspace, plist, sites.data());
68✔
794
  H5Pclose(plist);
68✔
795
#else
796
  H5Dread(dset, banktype, memspace, dspace, H5P_DEFAULT, sites.data());
54✔
797
#endif
798

799
  // Close all ids
800
  H5Sclose(dspace);
122✔
801
  if (distribute)
122✔
802
    H5Sclose(memspace);
67✔
803
  H5Dclose(dset);
122✔
804
  H5Tclose(banktype);
122✔
805
}
122✔
806

807
void write_unstructured_mesh_results()
2,296✔
808
{
809

810
  for (auto& tally : model::tallies) {
10,992✔
811

812
    vector<std::string> tally_scores;
8,696✔
813
    for (auto filter_idx : tally->filters()) {
25,136✔
814
      auto& filter = model::tally_filters[filter_idx];
16,440✔
815
      if (filter->type() != FilterType::MESH)
16,440✔
816
        continue;
16,425✔
817

818
      // check if the filter uses an unstructured mesh
819
      auto mesh_filter = dynamic_cast<MeshFilter*>(filter.get());
1,915✔
820
      auto mesh_idx = mesh_filter->mesh();
1,915✔
821
      auto umesh =
822
        dynamic_cast<UnstructuredMesh*>(model::meshes[mesh_idx].get());
1,915✔
823

824
      if (!umesh)
1,915✔
825
        continue;
1,881✔
826

827
      if (!umesh->output_)
34✔
828
        continue;
×
829

830
      if (umesh->library() == "moab") {
34✔
831
        if (mpi::master)
19✔
832
          warning(fmt::format(
9✔
833
            "Output for a MOAB mesh (mesh {}) was "
834
            "requested but will not be written. Please use the Python "
835
            "API to generated the desired VTK tetrahedral mesh.",
836
            umesh->id_));
9✔
837
        continue;
19✔
838
      }
839

840
      // if this tally has more than one filter, print
841
      // warning and skip writing the mesh
842
      if (tally->filters().size() > 1) {
15✔
843
        warning(fmt::format("Skipping unstructured mesh writing for tally "
×
844
                            "{}. More than one filter is present on the tally.",
845
          tally->id_));
×
846
        break;
×
847
      }
848

849
      int n_realizations = tally->n_realizations_;
15✔
850

851
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
852
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
853
          // combine the score and nuclide into a name for the value
854
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30✔
855
            tally->nuclide_name(nuc_idx));
30✔
856
          // add this score to the mesh
857
          // (this is in a separate loop because all variables need to be added
858
          //  to libMesh's equation system before any are initialized, which
859
          //  happens in set_score_data)
860
          umesh->add_score(score_str);
15✔
861
        }
15✔
862
      }
863

864
      for (int score_idx = 0; score_idx < tally->scores_.size(); score_idx++) {
30✔
865
        for (int nuc_idx = 0; nuc_idx < tally->nuclides_.size(); nuc_idx++) {
30✔
866
          // combine the score and nuclide into a name for the value
867
          auto score_str = fmt::format("{}_{}", tally->score_name(score_idx),
30✔
868
            tally->nuclide_name(nuc_idx));
30✔
869

870
          // index for this nuclide and score
871
          int nuc_score_idx = score_idx + nuc_idx * tally->scores_.size();
15✔
872

873
          // construct result vectors
874
          vector<double> mean_vec(umesh->n_bins()),
15✔
875
            std_dev_vec(umesh->n_bins());
15✔
876
          for (int j = 0; j < tally->results_.shape()[0]; j++) {
146,799✔
877
            // get the volume for this bin
878
            double volume = umesh->volume(j);
146,784✔
879
            // compute the mean
880
            double mean = tally->results_(j, nuc_score_idx, TallyResult::SUM) /
146,784✔
881
                          n_realizations;
146,784✔
882
            mean_vec.at(j) = mean / volume;
146,784✔
883

884
            // compute the standard deviation
885
            double sum_sq =
886
              tally->results_(j, nuc_score_idx, TallyResult::SUM_SQ);
146,784✔
887
            double std_dev {0.0};
146,784✔
888
            if (n_realizations > 1) {
146,784✔
889
              std_dev = sum_sq / n_realizations - mean * mean;
146,784✔
890
              std_dev = std::sqrt(std_dev / (n_realizations - 1));
146,784✔
891
            }
892
            std_dev_vec[j] = std_dev / volume;
146,784✔
893
          }
894
#ifdef OPENMC_MPI
895
          MPI_Bcast(
10✔
896
            mean_vec.data(), mean_vec.size(), MPI_DOUBLE, 0, mpi::intracomm);
10✔
897
          MPI_Bcast(std_dev_vec.data(), std_dev_vec.size(), MPI_DOUBLE, 0,
10✔
898
            mpi::intracomm);
899
#endif
900
          // set the data for this score
901
          umesh->set_score_data(score_str, mean_vec, std_dev_vec);
15✔
902
        }
15✔
903
      }
904

905
      // Generate a file name based on the tally id
906
      // and the current batch number
907
      size_t batch_width {std::to_string(settings::n_max_batches).size()};
15✔
908
      std::string filename = fmt::format("tally_{0}.{1:0{2}}", tally->id_,
15✔
UNCOV
909
        simulation::current_batch, batch_width);
×
910

911
      // Write the unstructured mesh and data to file
912
      umesh->write(filename);
15✔
913

914
      // remove score data added for this mesh write
915
      umesh->remove_scores();
15✔
916
    }
15✔
917
  }
8,696✔
918
}
2,296✔
919
void write_global_tallies(hid_t file_id)
6,573✔
920
{
921
  // Get global tallies
922
  auto& gt = simulation::global_tallies;
6,573✔
923
  auto write_view = xt::view(gt,
924
    xt::range(static_cast<int>(GlobalTally::K_COLLISION),
6,573✔
925
      static_cast<int>(GlobalTally::LEAKAGE) + 1),
926
    xt::range(static_cast<int>(TallyResult::SUM),
6,573✔
927
      static_cast<int>(TallyResult::SUM_SQ) + 1));
6,573✔
928
  auto gt_reduced = xt::empty_like(write_view);
6,573✔
929
  gt_reduced = write_view;
6,573✔
930
  write_dataset(file_id, "global_tallies", gt_reduced);
6,573✔
931
}
6,573✔
932

933
void read_global_tallies(hid_t file_id)
67✔
934
{
935
  // Get global tallies
936
  auto& gt = simulation::global_tallies;
67✔
937
  auto gt_view = xt::view(gt,
938
    xt::range(static_cast<int>(GlobalTally::K_COLLISION),
67✔
939
      static_cast<int>(GlobalTally::LEAKAGE) + 1),
940
    xt::range(static_cast<int>(TallyResult::SUM),
67✔
941
      static_cast<int>(TallyResult::SUM_SQ) + 1));
67✔
942
  auto gt_reduced = xt::empty_like(gt_view);
67✔
943
  read_dataset_lowlevel(file_id, "global_tallies", H5T_NATIVE_DOUBLE, H5S_ALL,
67✔
944
    false, gt_reduced.data());
67✔
945
  gt_view = gt_reduced;
67✔
946
}
67✔
947

948
void write_tally_results_nr(hid_t file_id)
×
949
{
950
  // ==========================================================================
951
  // COLLECT AND WRITE GLOBAL TALLIES
952

953
  hid_t tallies_group;
954
  if (mpi::master) {
×
955
    // Write number of realizations
956
    write_dataset(file_id, "n_realizations", simulation::n_realizations);
×
957

958
    tallies_group = open_group(file_id, "tallies");
×
959
  }
960

961
  // Get global tallies
962
  auto& gt = simulation::global_tallies;
×
963

964
#ifdef OPENMC_MPI
965
  // Reduce global tallies
966
  xt::xtensor<double, 2> gt_reduced = xt::empty_like(gt);
967
  MPI_Reduce(gt.data(), gt_reduced.data(), gt.size(), MPI_DOUBLE, MPI_SUM, 0,
968
    mpi::intracomm);
969

970
  // Transfer values to value on master
971
  if (mpi::master) {
972
    if (simulation::current_batch == settings::n_max_batches ||
973
        simulation::satisfy_triggers) {
974
      std::copy(gt_reduced.begin(), gt_reduced.end(), gt.begin());
975
    }
976
  }
977
#endif
978

979
  // Write out global tallies sum and sum_sq
980
  if (mpi::master) {
×
NEW
981
    write_global_tallies(file_id);
×
982
  }
983

984
  for (const auto& t : model::tallies) {
×
985
    // Skip any tallies that are not active
986
    if (!t->active_)
×
987
      continue;
×
988
    if (!t->writable_)
×
989
      continue;
×
990

991
    if (mpi::master && !attribute_exists(file_id, "tallies_present")) {
×
992
      write_attribute(file_id, "tallies_present", 1);
×
993
    }
994

995
    // Get view of accumulated tally values
996
    auto values_view = xt::view(t->results_, xt::all(), xt::all(),
×
997
      xt::range(static_cast<int>(TallyResult::SUM),
×
998
        static_cast<int>(TallyResult::SUM_SQ) + 1));
999

1000
    // Make copy of tally values in contiguous array
1001
    xt::xtensor<double, 3> values = values_view;
×
1002

1003
    if (mpi::master) {
×
1004
      // Open group for tally
1005
      std::string groupname {"tally " + std::to_string(t->id_)};
×
1006
      hid_t tally_group = open_group(tallies_group, groupname.c_str());
×
1007

1008
      // The MPI_IN_PLACE specifier allows the master to copy values into
1009
      // a receive buffer without having a temporary variable
1010
#ifdef OPENMC_MPI
1011
      MPI_Reduce(MPI_IN_PLACE, values.data(), values.size(), MPI_DOUBLE,
1012
        MPI_SUM, 0, mpi::intracomm);
1013
#endif
1014

1015
      // At the end of the simulation, store the results back in the
1016
      // regular TallyResults array
1017
      if (simulation::current_batch == settings::n_max_batches ||
×
1018
          simulation::satisfy_triggers) {
1019
        values_view = values;
×
1020
      }
1021

1022
      // Put in temporary tally result
1023
      xt::xtensor<double, 3> results_copy = xt::zeros_like(t->results_);
×
1024
      auto copy_view = xt::view(results_copy, xt::all(), xt::all(),
×
1025
        xt::range(static_cast<int>(TallyResult::SUM),
×
1026
          static_cast<int>(TallyResult::SUM_SQ) + 1));
1027
      copy_view = values;
×
1028

1029
      // Write reduced tally results to file
1030
      auto shape = results_copy.shape();
×
1031
      write_tally_results(tally_group, shape[0], shape[1], results_copy.data());
×
1032

1033
      close_group(tally_group);
×
1034
    } else {
×
1035
      // Receive buffer not significant at other processors
1036
#ifdef OPENMC_MPI
1037
      MPI_Reduce(values.data(), nullptr, values.size(), MPI_DOUBLE, MPI_SUM, 0,
1038
        mpi::intracomm);
1039
#endif
1040
    }
1041
  }
1042

1043
  if (mpi::master) {
×
1044
    if (!object_exists(file_id, "tallies_present")) {
×
1045
      // Indicate that tallies are off
1046
      write_dataset(file_id, "tallies_present", 0);
×
1047
    }
1048

1049
    close_group(tallies_group);
×
1050
  }
1051
}
1052

1053
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc