• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 10586562087

27 Aug 2024 10:05PM UTC coverage: 84.707% (-0.2%) from 84.9%
10586562087

Pull #3112

github

web-flow
Merge f7f32bf18 into 5bc04b5d7
Pull Request #3112: Revamp CI with dependency and Python caching for efficient installs

49553 of 58499 relevant lines covered (84.71%)

34324762.08 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.52
/src/random_ray/linear_source_domain.cpp
1
#include "openmc/random_ray/linear_source_domain.h"
2

3
#include "openmc/cell.h"
4
#include "openmc/geometry.h"
5
#include "openmc/material.h"
6
#include "openmc/message_passing.h"
7
#include "openmc/mgxs_interface.h"
8
#include "openmc/output.h"
9
#include "openmc/plot.h"
10
#include "openmc/random_ray/random_ray.h"
11
#include "openmc/simulation.h"
12
#include "openmc/tallies/filter.h"
13
#include "openmc/tallies/tally.h"
14
#include "openmc/tallies/tally_scoring.h"
15
#include "openmc/timer.h"
16

17
namespace openmc {
18

19
//==============================================================================
20
// LinearSourceDomain implementation
21
//==============================================================================
22

23
LinearSourceDomain::LinearSourceDomain() : FlatSourceDomain()
136✔
24
{
25
  // First order spatial moment of the scalar flux
26
  flux_moments_old_.assign(n_source_elements_, {0.0, 0.0, 0.0});
136✔
27
  flux_moments_new_.assign(n_source_elements_, {0.0, 0.0, 0.0});
136✔
28
  flux_moments_t_.assign(n_source_elements_, {0.0, 0.0, 0.0});
136✔
29
  // Source gradients given by M inverse multiplied by source moments
30
  source_gradients_.assign(n_source_elements_, {0.0, 0.0, 0.0});
136✔
31

32
  centroid_.assign(n_source_regions_, {0.0, 0.0, 0.0});
136✔
33
  centroid_iteration_.assign(n_source_regions_, {0.0, 0.0, 0.0});
136✔
34
  centroid_t_.assign(n_source_regions_, {0.0, 0.0, 0.0});
136✔
35
  mom_matrix_.assign(n_source_regions_, {0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
136✔
36
  mom_matrix_t_.assign(n_source_regions_, {0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
136✔
37
}
136✔
38

39
void LinearSourceDomain::batch_reset()
6,885✔
40
{
41
  FlatSourceDomain::batch_reset();
6,885✔
42
#pragma omp parallel for
3,645✔
43
  for (int64_t se = 0; se < n_source_elements_; se++) {
6,297,160✔
44
    flux_moments_new_[se] = {0.0, 0.0, 0.0};
6,293,920✔
45
  }
46
#pragma omp parallel for
3,645✔
47
  for (int64_t sr = 0; sr < n_source_regions_; sr++) {
3,272,200✔
48
    centroid_iteration_[sr] = {0.0, 0.0, 0.0};
3,268,960✔
49
    mom_matrix_[sr] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
3,268,960✔
50
  }
51
}
6,885✔
52

53
void LinearSourceDomain::update_neutron_source(double k_eff)
6,885✔
54
{
55
  simulation::time_update_src.start();
6,885✔
56

57
  double inverse_k_eff = 1.0 / k_eff;
6,885✔
58

59
  // Temperature and angle indices, if using multiple temperature
60
  // data sets and/or anisotropic data sets.
61
  // TODO: Currently assumes we are only using single temp/single
62
  // angle data.
63
  const int t = 0;
6,885✔
64
  const int a = 0;
6,885✔
65

66
#pragma omp parallel for
3,645✔
67
  for (int sr = 0; sr < n_source_regions_; sr++) {
3,272,200✔
68

69
    int material = material_[sr];
3,268,960✔
70
    MomentMatrix invM = mom_matrix_[sr].inverse();
3,268,960✔
71

72
    for (int e_out = 0; e_out < negroups_; e_out++) {
9,562,880✔
73
      double sigma_t = data::mg.macro_xs_[material].get_xs(
6,293,920✔
74
        MgxsType::TOTAL, e_out, nullptr, nullptr, nullptr, t, a);
75

76
      double scatter_flat = 0.0f;
6,293,920✔
77
      double fission_flat = 0.0f;
6,293,920✔
78
      MomentArray scatter_linear = {0.0, 0.0, 0.0};
6,293,920✔
79
      MomentArray fission_linear = {0.0, 0.0, 0.0};
6,293,920✔
80

81
      for (int e_in = 0; e_in < negroups_; e_in++) {
33,762,560✔
82
        // Handles for the flat and linear components of the flux
83
        double flux_flat = scalar_flux_old_[sr * negroups_ + e_in];
27,468,640✔
84
        MomentArray flux_linear = flux_moments_old_[sr * negroups_ + e_in];
27,468,640✔
85

86
        // Handles for cross sections
87
        double sigma_s = data::mg.macro_xs_[material].get_xs(
27,468,640✔
88
          MgxsType::NU_SCATTER, e_in, &e_out, nullptr, nullptr, t, a);
89
        double nu_sigma_f = data::mg.macro_xs_[material].get_xs(
27,468,640✔
90
          MgxsType::NU_FISSION, e_in, nullptr, nullptr, nullptr, t, a);
91
        double chi = data::mg.macro_xs_[material].get_xs(
27,468,640✔
92
          MgxsType::CHI_PROMPT, e_in, &e_out, nullptr, nullptr, t, a);
93

94
        // Compute source terms for flat and linear components of the flux
95
        scatter_flat += sigma_s * flux_flat;
27,468,640✔
96
        fission_flat += nu_sigma_f * flux_flat * chi;
27,468,640✔
97
        scatter_linear += sigma_s * flux_linear;
27,468,640✔
98
        fission_linear += nu_sigma_f * flux_linear * chi;
27,468,640✔
99
      }
100

101
      // Compute the flat source term
102
      source_[sr * negroups_ + e_out] =
12,587,840✔
103
        (scatter_flat + fission_flat * inverse_k_eff) / sigma_t;
6,293,920✔
104

105
      // Compute the linear source terms
106
      // In the first 10 iterations when the centroids and spatial moments
107
      // are not well known, we will leave the source gradients as zero
108
      // so as to avoid causing any numerical instability.
109
      if (simulation::current_batch > 10) {
6,293,920✔
110
        source_gradients_[sr * negroups_ + e_out] =
5,134,560✔
111
          invM * ((scatter_linear + fission_linear * inverse_k_eff) / sigma_t);
10,269,120✔
112
      }
113
    }
114
  }
115

116
  if (settings::run_mode == RunMode::FIXED_SOURCE) {
6,885✔
117
// Add external source to flat source term if in fixed source mode
118
#pragma omp parallel for
2,925✔
119
    for (int se = 0; se < n_source_elements_; se++) {
5,203,400✔
120
      source_[se] += external_source_[se];
5,200,800✔
121
    }
122
  }
123

124
  simulation::time_update_src.stop();
6,885✔
125
}
6,885✔
126

127
void LinearSourceDomain::normalize_scalar_flux_and_volumes(
6,885✔
128
  double total_active_distance_per_iteration)
129
{
130
  double normalization_factor = 1.0 / total_active_distance_per_iteration;
6,885✔
131
  double volume_normalization_factor =
6,885✔
132
    1.0 / (total_active_distance_per_iteration * simulation::current_batch);
6,885✔
133

134
// Normalize flux to total distance travelled by all rays this iteration
135
#pragma omp parallel for
3,645✔
136
  for (int64_t e = 0; e < scalar_flux_new_.size(); e++) {
6,297,160✔
137
    scalar_flux_new_[e] *= normalization_factor;
6,293,920✔
138
    flux_moments_new_[e] *= normalization_factor;
6,293,920✔
139
  }
140

141
// Accumulate cell-wise ray length tallies collected this iteration, then
142
// update the simulation-averaged cell-wise volume estimates
143
#pragma omp parallel for
3,645✔
144
  for (int64_t sr = 0; sr < n_source_regions_; sr++) {
3,272,200✔
145
    centroid_t_[sr] += centroid_iteration_[sr];
3,268,960✔
146
    mom_matrix_t_[sr] += mom_matrix_[sr];
3,268,960✔
147
    volume_t_[sr] += volume_[sr];
3,268,960✔
148
    volume_naive_[sr] = volume_[sr] * normalization_factor;
3,268,960✔
149
    volume_[sr] = volume_t_[sr] * volume_normalization_factor;
3,268,960✔
150
    if (volume_t_[sr] > 0.0) {
3,268,960✔
151
      double inv_volume = 1.0 / volume_t_[sr];
3,268,960✔
152
      centroid_[sr] = centroid_t_[sr];
3,268,960✔
153
      centroid_[sr] *= inv_volume;
3,268,960✔
154
      mom_matrix_[sr] = mom_matrix_t_[sr];
3,268,960✔
155
      mom_matrix_[sr] *= inv_volume;
3,268,960✔
156
    }
157
  }
158
}
6,885✔
159

160
void LinearSourceDomain::set_flux_to_flux_plus_source(
13,374,580✔
161
  int64_t idx, double volume, int material, int g)
162
{
163
  scalar_flux_new_[idx] /= volume;
13,374,580✔
164
  scalar_flux_new_[idx] += source_[idx];
13,374,580✔
165
  flux_moments_new_[idx] *= (1.0 / volume);
13,374,580✔
166
}
13,374,580✔
167

168
void LinearSourceDomain::set_flux_to_old_flux(int64_t idx)
×
169
{
170
  scalar_flux_new_[idx] = scalar_flux_old_[idx];
×
171
  flux_moments_new_[idx] = flux_moments_old_[idx];
×
172
}
173

174
void LinearSourceDomain::flux_swap()
6,885✔
175
{
176
  FlatSourceDomain::flux_swap();
6,885✔
177
  flux_moments_old_.swap(flux_moments_new_);
6,885✔
178
}
6,885✔
179

180
void LinearSourceDomain::accumulate_iteration_flux()
1,980✔
181
{
182
  // Accumulate scalar flux
183
  FlatSourceDomain::accumulate_iteration_flux();
1,980✔
184

185
  // Accumulate scalar flux moments
186
#pragma omp parallel for
990✔
187
  for (int64_t se = 0; se < n_source_elements_; se++) {
1,813,110✔
188
    flux_moments_t_[se] += flux_moments_new_[se];
1,812,120✔
189
  }
190
}
1,980✔
191

192
void LinearSourceDomain::all_reduce_replicated_source_regions()
6,885✔
193
{
194
#ifdef OPENMC_MPI
195
  FlatSourceDomain::all_reduce_replicated_source_regions();
4,050✔
196
  simulation::time_bank_sendrecv.start();
4,050✔
197

198
  // We are going to assume we can safely cast Position, MomentArray,
199
  // and MomentMatrix to contiguous arrays of doubles for the MPI
200
  // allreduce operation. This is a safe assumption as typically
201
  // compilers will at most pad to 8 byte boundaries. If a new FP32 MomentArray
202
  // type is introduced, then there will likely be padding, in which case this
203
  // function will need to become more complex.
204
  if (sizeof(MomentArray) != 3 * sizeof(double) ||
205
      sizeof(MomentMatrix) != 6 * sizeof(double)) {
206
    fatal_error("Unexpected buffer padding in linear source domain reduction.");
207
  }
208

209
  MPI_Allreduce(MPI_IN_PLACE, static_cast<void*>(flux_moments_new_.data()),
4,050✔
210
    n_source_elements_ * 3, MPI_DOUBLE, MPI_SUM, mpi::intracomm);
4,050✔
211
  MPI_Allreduce(MPI_IN_PLACE, static_cast<void*>(mom_matrix_.data()),
4,050✔
212
    n_source_regions_ * 6, MPI_DOUBLE, MPI_SUM, mpi::intracomm);
4,050✔
213
  MPI_Allreduce(MPI_IN_PLACE, static_cast<void*>(centroid_iteration_.data()),
4,050✔
214
    n_source_regions_ * 3, MPI_DOUBLE, MPI_SUM, mpi::intracomm);
4,050✔
215

216
  simulation::time_bank_sendrecv.stop();
4,050✔
217
#endif
218
}
6,885✔
219

220
double LinearSourceDomain::evaluate_flux_at_point(
×
221
  Position r, int64_t sr, int g) const
222
{
223
  double phi_flat = FlatSourceDomain::evaluate_flux_at_point(r, sr, g);
×
224

225
  Position local_r = r - centroid_[sr];
×
226
  MomentArray phi_linear = flux_moments_t_[sr * negroups_ + g];
×
227
  phi_linear *= 1.0 / (settings::n_batches - settings::n_inactive);
×
228

229
  MomentMatrix invM = mom_matrix_[sr].inverse();
×
230
  MomentArray phi_solved = invM * phi_linear;
×
231

232
  return phi_flat + phi_solved.dot(local_r);
×
233
}
234

235
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc