• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 23010841626

12 Mar 2026 03:50PM UTC coverage: 81.015% (-0.6%) from 81.566%
23010841626

Pull #3863

github

web-flow
Merge 954a87042 into ba94c5823
Pull Request #3863: Shared Secondary Particle Bank

16912 of 24191 branches covered (69.91%)

Branch coverage included in aggregate %.

323 of 429 new or added lines in 17 files covered. (75.29%)

577 existing lines in 39 files now uncovered.

56865 of 66875 relevant lines covered (85.03%)

32719674.03 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/src/event.cpp
1
#include "openmc/event.h"
2

3
#include "openmc/bank.h"
4
#include "openmc/error.h"
5
#include "openmc/material.h"
6
#include "openmc/settings.h"
7
#include "openmc/simulation.h"
8
#include "openmc/timer.h"
9

10
namespace openmc {
11

12
//==============================================================================
13
// Global variables
14
//==============================================================================
15

16
namespace simulation {
17

18
SharedArray<EventQueueItem> calculate_fuel_xs_queue;
19
SharedArray<EventQueueItem> calculate_nonfuel_xs_queue;
20
SharedArray<EventQueueItem> advance_particle_queue;
21
SharedArray<EventQueueItem> surface_crossing_queue;
22
SharedArray<EventQueueItem> collision_queue;
23

24
vector<Particle> particles;
25

26
} // namespace simulation
27

28
//==============================================================================
29
// Non-member functions
30
//==============================================================================
31

UNCOV
32
void init_event_queues(int64_t n_particles)
×
33
{
UNCOV
34
  simulation::calculate_fuel_xs_queue.reserve(n_particles);
×
UNCOV
35
  simulation::calculate_nonfuel_xs_queue.reserve(n_particles);
×
UNCOV
36
  simulation::advance_particle_queue.reserve(n_particles);
×
UNCOV
37
  simulation::surface_crossing_queue.reserve(n_particles);
×
UNCOV
38
  simulation::collision_queue.reserve(n_particles);
×
39

UNCOV
40
  simulation::particles.resize(n_particles);
×
UNCOV
41
}
×
42

43
void free_event_queues(void)
×
44
{
45
  simulation::calculate_fuel_xs_queue.clear();
×
46
  simulation::calculate_nonfuel_xs_queue.clear();
×
47
  simulation::advance_particle_queue.clear();
×
48
  simulation::surface_crossing_queue.clear();
×
49
  simulation::collision_queue.clear();
×
50

51
  simulation::particles.clear();
×
52
}
×
53

UNCOV
54
void dispatch_xs_event(int64_t buffer_idx)
×
55
{
UNCOV
56
  Particle& p = simulation::particles[buffer_idx];
×
UNCOV
57
  if (p.material() == MATERIAL_VOID ||
×
UNCOV
58
      !model::materials[p.material()]->fissionable()) {
×
UNCOV
59
    simulation::calculate_nonfuel_xs_queue.thread_safe_append({p, buffer_idx});
×
60
  } else {
UNCOV
61
    simulation::calculate_fuel_xs_queue.thread_safe_append({p, buffer_idx});
×
62
  }
UNCOV
63
}
×
64

UNCOV
65
void process_init_events(int64_t n_particles, int64_t source_offset)
×
66
{
UNCOV
67
  simulation::time_event_init.start();
×
68
#pragma omp parallel for schedule(runtime)
69
  for (int64_t i = 0; i < n_particles; i++) {
×
70
    initialize_history(simulation::particles[i], source_offset + i + 1, false);
71
    dispatch_xs_event(i);
72
  }
UNCOV
73
  simulation::time_event_init.stop();
×
UNCOV
74
}
×
75

UNCOV
76
void process_calculate_xs_events(SharedArray<EventQueueItem>& queue)
×
77
{
UNCOV
78
  simulation::time_event_calculate_xs.start();
×
79

80
  // TODO: If using C++17, we could perform a parallel sort of the queue by
81
  // particle type, material type, and then energy, in order to improve cache
82
  // locality and reduce thread divergence on GPU. However, the parallel
83
  // algorithms typically require linking against an additional library (Intel
84
  // TBB). Prior to C++17, std::sort is a serial only operation, which in this
85
  // case makes it too slow to be practical for most test problems.
86
  //
87
  // std::sort(std::execution::par_unseq, queue.data(), queue.data() +
88
  // queue.size());
89

UNCOV
90
  int64_t offset = simulation::advance_particle_queue.size();
×
91

92
#pragma omp parallel for schedule(runtime)
93
  for (int64_t i = 0; i < queue.size(); i++) {
×
94
    Particle* p = &simulation::particles[queue[i].idx];
95
    p->event_calculate_xs();
96

97
    // After executing a calculate_xs event, particles will
98
    // always require an advance event. Therefore, we don't need to use
99
    // the protected enqueuing function.
100
    simulation::advance_particle_queue[offset + i] = queue[i];
101
  }
102

UNCOV
103
  simulation::advance_particle_queue.resize(offset + queue.size());
×
104

UNCOV
105
  queue.resize(0);
×
106

UNCOV
107
  simulation::time_event_calculate_xs.stop();
×
UNCOV
108
}
×
109

UNCOV
110
void process_advance_particle_events()
×
111
{
UNCOV
112
  simulation::time_event_advance_particle.start();
×
113

114
#pragma omp parallel for schedule(runtime)
115
  for (int64_t i = 0; i < simulation::advance_particle_queue.size(); i++) {
×
116
    int64_t buffer_idx = simulation::advance_particle_queue[i].idx;
117
    Particle& p = simulation::particles[buffer_idx];
118
    p.event_advance();
119
    if (!p.alive())
×
120
      continue;
121
    if (p.collision_distance() > p.boundary().distance()) {
×
122
      simulation::surface_crossing_queue.thread_safe_append({p, buffer_idx});
123
    } else {
124
      simulation::collision_queue.thread_safe_append({p, buffer_idx});
125
    }
126
  }
127

UNCOV
128
  simulation::advance_particle_queue.resize(0);
×
129

UNCOV
130
  simulation::time_event_advance_particle.stop();
×
UNCOV
131
}
×
132

UNCOV
133
void process_surface_crossing_events()
×
134
{
UNCOV
135
  simulation::time_event_surface_crossing.start();
×
136

137
#pragma omp parallel for schedule(runtime)
138
  for (int64_t i = 0; i < simulation::surface_crossing_queue.size(); i++) {
×
139
    int64_t buffer_idx = simulation::surface_crossing_queue[i].idx;
140
    Particle& p = simulation::particles[buffer_idx];
141
    p.event_cross_surface();
142
    p.event_check_limit_and_revive();
143
    if (p.alive())
×
144
      dispatch_xs_event(buffer_idx);
145
  }
146

UNCOV
147
  simulation::surface_crossing_queue.resize(0);
×
148

UNCOV
149
  simulation::time_event_surface_crossing.stop();
×
UNCOV
150
}
×
151

UNCOV
152
void process_collision_events()
×
153
{
UNCOV
154
  simulation::time_event_collision.start();
×
155

156
#pragma omp parallel for schedule(runtime)
157
  for (int64_t i = 0; i < simulation::collision_queue.size(); i++) {
×
158
    int64_t buffer_idx = simulation::collision_queue[i].idx;
159
    Particle& p = simulation::particles[buffer_idx];
160
    p.event_collide();
161
    p.event_check_limit_and_revive();
162
    if (p.alive())
×
163
      dispatch_xs_event(buffer_idx);
164
  }
165

UNCOV
166
  simulation::collision_queue.resize(0);
×
167

UNCOV
168
  simulation::time_event_collision.stop();
×
UNCOV
169
}
×
170

UNCOV
171
void process_death_events(int64_t n_particles)
×
172
{
UNCOV
173
  simulation::time_event_death.start();
×
174
#pragma omp parallel for schedule(runtime)
175
  for (int64_t i = 0; i < n_particles; i++) {
×
176
    Particle& p = simulation::particles[i];
177
    p.event_death();
178
  }
UNCOV
179
  simulation::time_event_death.stop();
×
UNCOV
180
}
×
181

NEW
182
void process_transport_events()
×
183
{
NEW
184
  while (true) {
×
NEW
185
    int64_t max = std::max({simulation::calculate_fuel_xs_queue.size(),
×
NEW
186
      simulation::calculate_nonfuel_xs_queue.size(),
×
NEW
187
      simulation::advance_particle_queue.size(),
×
NEW
188
      simulation::surface_crossing_queue.size(),
×
NEW
189
      simulation::collision_queue.size()});
×
190

NEW
191
    if (max == 0) {
×
192
      break;
NEW
193
    } else if (max == simulation::calculate_fuel_xs_queue.size()) {
×
NEW
194
      process_calculate_xs_events(simulation::calculate_fuel_xs_queue);
×
NEW
195
    } else if (max == simulation::calculate_nonfuel_xs_queue.size()) {
×
NEW
196
      process_calculate_xs_events(simulation::calculate_nonfuel_xs_queue);
×
NEW
197
    } else if (max == simulation::advance_particle_queue.size()) {
×
NEW
198
      process_advance_particle_events();
×
NEW
199
    } else if (max == simulation::surface_crossing_queue.size()) {
×
NEW
200
      process_surface_crossing_events();
×
NEW
201
    } else if (max == simulation::collision_queue.size()) {
×
NEW
202
      process_collision_events();
×
203
    }
204
  }
NEW
205
}
×
206

NEW
207
void process_init_secondary_events(int64_t n_particles, int64_t offset,
×
208
  SharedArray<SourceSite>& shared_secondary_bank)
209
{
NEW
210
  simulation::time_event_init.start();
×
211
#pragma omp parallel for schedule(runtime)
212
  for (int64_t i = 0; i < n_particles; i++) {
×
213
    initialize_history(simulation::particles[i], offset + i + 1, true);
214
    SourceSite& site = shared_secondary_bank[offset + i];
215
    simulation::particles[i].event_revive_from_secondary(site);
216
    if (simulation::particles[i].alive()) {
×
217
      dispatch_xs_event(i);
218
    }
219
  }
NEW
220
  simulation::time_event_init.stop();
×
NEW
221
}
×
222

223
} // namespace openmc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc