• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 9966309227

17 Jul 2024 12:53AM UTC coverage: 84.815% (+0.008%) from 84.807%
9966309227

push

github

web-flow
Linear Source Random Ray (#3072)

Co-authored-by: John Tramm <john.tramm@gmail.com>
Co-authored-by: Paul Romano <paul.k.romano@gmail.com>

336 of 369 new or added lines in 9 files covered. (91.06%)

36 existing lines in 4 files now uncovered.

49336 of 58169 relevant lines covered (84.81%)

32020493.4 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.8
/src/random_ray/linear_source_domain.cpp
1
#include "openmc/random_ray/linear_source_domain.h"
2

3
#include "openmc/cell.h"
4
#include "openmc/geometry.h"
5
#include "openmc/material.h"
6
#include "openmc/message_passing.h"
7
#include "openmc/mgxs_interface.h"
8
#include "openmc/output.h"
9
#include "openmc/plot.h"
10
#include "openmc/random_ray/random_ray.h"
11
#include "openmc/simulation.h"
12
#include "openmc/tallies/filter.h"
13
#include "openmc/tallies/tally.h"
14
#include "openmc/tallies/tally_scoring.h"
15
#include "openmc/timer.h"
16

17
namespace openmc {
18

19
//==============================================================================
20
// LinearSourceDomain implementation
21
//==============================================================================
22

23
LinearSourceDomain::LinearSourceDomain() : FlatSourceDomain()
68✔
24
{
25
  // First order spatial moment of the scalar flux
26
  flux_moments_old_.assign(n_source_elements_, {0.0, 0.0, 0.0});
68✔
27
  flux_moments_new_.assign(n_source_elements_, {0.0, 0.0, 0.0});
68✔
28
  flux_moments_t_.assign(n_source_elements_, {0.0, 0.0, 0.0});
68✔
29
  // Source gradients given by M inverse multiplied by source moments
30
  source_gradients_.assign(n_source_elements_, {0.0, 0.0, 0.0});
68✔
31

32
  centroid_.assign(n_source_regions_, {0.0, 0.0, 0.0});
68✔
33
  centroid_iteration_.assign(n_source_regions_, {0.0, 0.0, 0.0});
68✔
34
  centroid_t_.assign(n_source_regions_, {0.0, 0.0, 0.0});
68✔
35
  mom_matrix_.assign(n_source_regions_, {0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
68✔
36
  mom_matrix_t_.assign(n_source_regions_, {0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
68✔
37
}
68✔
38

39
void LinearSourceDomain::batch_reset()
680✔
40
{
41
  FlatSourceDomain::batch_reset();
680✔
42
#pragma omp parallel for
360✔
43
  for (int64_t se = 0; se < n_source_elements_; se++) {
550,080✔
44
    flux_moments_new_[se] = {0.0, 0.0, 0.0};
549,760✔
45
  }
46
#pragma omp parallel for
360✔
47
  for (int64_t sr = 0; sr < n_source_regions_; sr++) {
315,840✔
48
    centroid_iteration_[sr] = {0.0, 0.0, 0.0};
315,520✔
49
    mom_matrix_[sr] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
315,520✔
50
  }
51
}
680✔
52

53
void LinearSourceDomain::update_neutron_source(double k_eff)
680✔
54
{
55
  simulation::time_update_src.start();
680✔
56

57
  double inverse_k_eff = 1.0 / k_eff;
680✔
58

59
  // Temperature and angle indices, if using multiple temperature
60
  // data sets and/or anisotropic data sets.
61
  // TODO: Currently assumes we are only using single temp/single
62
  // angle data.
63
  const int t = 0;
680✔
64
  const int a = 0;
680✔
65

66
#pragma omp parallel for
360✔
67
  for (int sr = 0; sr < n_source_regions_; sr++) {
315,840✔
68

69
    int material = material_[sr];
315,520✔
70
    MomentMatrix invM = mom_matrix_[sr].inverse();
315,520✔
71

72
    for (int e_out = 0; e_out < negroups_; e_out++) {
865,280✔
73
      float sigma_t = data::mg.macro_xs_[material].get_xs(
549,760✔
74
        MgxsType::TOTAL, e_out, nullptr, nullptr, nullptr, t, a);
549,760✔
75

76
      float scatter_flat = 0.0f;
549,760✔
77
      float fission_flat = 0.0f;
549,760✔
78
      MomentArray scatter_linear = {0.0, 0.0, 0.0};
549,760✔
79
      MomentArray fission_linear = {0.0, 0.0, 0.0};
549,760✔
80

81
      for (int e_in = 0; e_in < negroups_; e_in++) {
2,739,200✔
82
        // Handles for the flat and linear components of the flux
83
        float flux_flat = scalar_flux_old_[sr * negroups_ + e_in];
2,189,440✔
84
        MomentArray flux_linear = flux_moments_old_[sr * negroups_ + e_in];
2,189,440✔
85

86
        // Handles for cross sections
87
        float sigma_s = data::mg.macro_xs_[material].get_xs(
2,189,440✔
88
          MgxsType::NU_SCATTER, e_in, &e_out, nullptr, nullptr, t, a);
2,189,440✔
89
        float nu_sigma_f = data::mg.macro_xs_[material].get_xs(
2,189,440✔
90
          MgxsType::NU_FISSION, e_in, nullptr, nullptr, nullptr, t, a);
2,189,440✔
91
        float chi = data::mg.macro_xs_[material].get_xs(
2,189,440✔
92
          MgxsType::CHI_PROMPT, e_in, &e_out, nullptr, nullptr, t, a);
2,189,440✔
93

94
        // Compute source terms for flat and linear components of the flux
95
        scatter_flat += sigma_s * flux_flat;
2,189,440✔
96
        fission_flat += nu_sigma_f * flux_flat * chi;
2,189,440✔
97
        scatter_linear += sigma_s * flux_linear;
2,189,440✔
98
        fission_linear += nu_sigma_f * flux_linear * chi;
2,189,440✔
99
      }
100

101
      // Compute the flat source term
102
      source_[sr * negroups_ + e_out] =
1,099,520✔
103
        (scatter_flat + fission_flat * inverse_k_eff) / sigma_t;
549,760✔
104

105
      // Compute the linear source terms
106
      if (simulation::current_batch > 2) {
549,760✔
107
        source_gradients_[sr * negroups_ + e_out] =
439,808✔
108
          invM * ((scatter_linear + fission_linear * inverse_k_eff) / sigma_t);
879,616✔
109
      }
110
    }
111
  }
112

113
  if (settings::run_mode == RunMode::FIXED_SOURCE) {
680✔
114
// Add external source to flat source term if in fixed source mode
115
#pragma omp parallel for
180✔
116
    for (int se = 0; se < n_source_elements_; se++) {
276,640✔
117
      source_[se] += external_source_[se];
276,480✔
118
    }
119
  }
120

121
  simulation::time_update_src.stop();
680✔
122
}
680✔
123

124
void LinearSourceDomain::normalize_scalar_flux_and_volumes(
680✔
125
  double total_active_distance_per_iteration)
126
{
127
  float normalization_factor = 1.0 / total_active_distance_per_iteration;
680✔
128
  double volume_normalization_factor =
680✔
129
    1.0 / (total_active_distance_per_iteration * simulation::current_batch);
680✔
130

131
// Normalize flux to total distance travelled by all rays this iteration
132
#pragma omp parallel for
360✔
133
  for (int64_t e = 0; e < scalar_flux_new_.size(); e++) {
550,080✔
134
    scalar_flux_new_[e] *= normalization_factor;
549,760✔
135
    flux_moments_new_[e] *= normalization_factor;
549,760✔
136
  }
137

138
// Accumulate cell-wise ray length tallies collected this iteration, then
139
// update the simulation-averaged cell-wise volume estimates
140
#pragma omp parallel for
360✔
141
  for (int64_t sr = 0; sr < n_source_regions_; sr++) {
315,840✔
142
    centroid_t_[sr] += centroid_iteration_[sr];
315,520✔
143
    mom_matrix_t_[sr] += mom_matrix_[sr];
315,520✔
144
    volume_t_[sr] += volume_[sr];
315,520✔
145
    volume_[sr] = volume_t_[sr] * volume_normalization_factor;
315,520✔
146
    if (volume_t_[sr] > 0.0) {
315,520✔
147
      double inv_volume = 1.0 / volume_t_[sr];
315,520✔
148
      centroid_[sr] = centroid_t_[sr];
315,520✔
149
      centroid_[sr] *= inv_volume;
315,520✔
150
      mom_matrix_[sr] = mom_matrix_t_[sr];
315,520✔
151
      mom_matrix_[sr] *= inv_volume;
315,520✔
152
    }
153
  }
154
}
680✔
155

156
int64_t LinearSourceDomain::add_source_to_scalar_flux()
680✔
157
{
158
  int64_t n_hits = 0;
680✔
159

160
  // Temperature and angle indices, if using multiple temperature
161
  // data sets and/or anisotropic data sets.
162
  // TODO: Currently assumes we are only using single temp/single
163
  // angle data.
164
  const int t = 0;
680✔
165
  const int a = 0;
680✔
166

167
#pragma omp parallel for reduction(+ : n_hits)
360✔
168
  for (int sr = 0; sr < n_source_regions_; sr++) {
315,840✔
169

170
    double volume = volume_[sr];
315,520✔
171
    int material = material_[sr];
315,520✔
172

173
    // Check if this cell was hit this iteration
174
    int was_cell_hit = was_hit_[sr];
315,520✔
175
    if (was_cell_hit) {
315,520✔
176
      n_hits++;
315,520✔
177
    }
178

179
    for (int g = 0; g < negroups_; g++) {
865,280✔
180
      int64_t idx = (sr * negroups_) + g;
549,760✔
181
      // There are three scenarios we need to consider:
182
      if (was_cell_hit) {
549,760✔
183
        // 1. If the FSR was hit this iteration, then the new flux is equal to
184
        // the flat source from the previous iteration plus the contributions
185
        // from rays passing through the source region (computed during the
186
        // transport sweep)
187
        scalar_flux_new_[idx] /= volume;
549,760✔
188
        scalar_flux_new_[idx] += source_[idx];
549,760✔
189
        flux_moments_new_[idx] *= (1.0 / volume);
549,760✔
190
      } else if (volume > 0.0) {
191
        // 2. If the FSR was not hit this iteration, but has been hit some
192
        // previous iteration, then we simply set the new scalar flux to be
193
        // equal to the contribution from the flat source alone.
194
        scalar_flux_new_[idx] = source_[idx];
195
      } else {
196
        // If the FSR was not hit this iteration, and it has never been hit in
197
        // any iteration (i.e., volume is zero), then we want to set this to 0
198
        // to avoid dividing anything by a zero volume.
199
        scalar_flux_new_[idx] = 0.0f;
200
        flux_moments_new_[idx] *= 0.0;
201
      }
202
    }
203
  }
204

205
  return n_hits;
680✔
206
}
207

208
void LinearSourceDomain::flux_swap()
680✔
209
{
210
  FlatSourceDomain::flux_swap();
680✔
211
  flux_moments_old_.swap(flux_moments_new_);
680✔
212
}
680✔
213

214
void LinearSourceDomain::accumulate_iteration_flux()
240✔
215
{
216
  // Accumulate scalar flux
217
  FlatSourceDomain::accumulate_iteration_flux();
240✔
218

219
  // Accumulate scalar flux moments
220
#pragma omp parallel for
120✔
221
  for (int64_t se = 0; se < n_source_elements_; se++) {
206,280✔
222
    flux_moments_t_[se] += flux_moments_new_[se];
206,160✔
223
  }
224
}
240✔
225

226
void LinearSourceDomain::all_reduce_replicated_source_regions()
680✔
227
{
228
#ifdef OPENMC_MPI
229
  FlatSourceDomain::all_reduce_replicated_source_regions();
400✔
230
  simulation::time_bank_sendrecv.start();
400✔
231

232
  // We are going to assume we can safely cast Position, MomentArray,
233
  // and MomentMatrix to contiguous arrays of doubles for the MPI
234
  // allreduce operation. This is a safe assumption as typically
235
  // compilers will at most pad to 8 byte boundaries. If a new FP32 MomentArray
236
  // type is introduced, then there will likely be padding, in which case this
237
  // function will need to become more complex.
238
  if (sizeof(MomentArray) != 3 * sizeof(double) ||
239
      sizeof(MomentMatrix) != 6 * sizeof(double)) {
240
    fatal_error("Unexpected buffer padding in linear source domain reduction.");
241
  }
242

243
  MPI_Allreduce(MPI_IN_PLACE, static_cast<void*>(flux_moments_new_.data()),
400✔
244
    n_source_elements_ * 3, MPI_DOUBLE, MPI_SUM, mpi::intracomm);
400✔
245
  MPI_Allreduce(MPI_IN_PLACE, static_cast<void*>(mom_matrix_.data()),
400✔
246
    n_source_regions_ * 6, MPI_DOUBLE, MPI_SUM, mpi::intracomm);
400✔
247
  MPI_Allreduce(MPI_IN_PLACE, static_cast<void*>(centroid_iteration_.data()),
400✔
248
    n_source_regions_ * 3, MPI_DOUBLE, MPI_SUM, mpi::intracomm);
400✔
249

250
  simulation::time_bank_sendrecv.stop();
400✔
251
#endif
252
}
680✔
253

NEW
254
double LinearSourceDomain::evaluate_flux_at_point(
×
255
  Position r, int64_t sr, int g) const
256
{
NEW
257
  float phi_flat = FlatSourceDomain::evaluate_flux_at_point(r, sr, g);
×
258

NEW
259
  Position local_r = r - centroid_[sr];
×
NEW
260
  MomentArray phi_linear = flux_moments_t_[sr * negroups_ + g];
×
NEW
261
  phi_linear *= 1.0 / (settings::n_batches - settings::n_inactive);
×
262

NEW
263
  MomentMatrix invM = mom_matrix_[sr].inverse();
×
NEW
264
  MomentArray phi_solved = invM * phi_linear;
×
265

NEW
266
  return phi_flat + phi_solved.dot(local_r);
×
267
}
268

269
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc