• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

celerity / celerity-runtime / 11854130628

15 Nov 2024 09:58AM UTC coverage: 95.102% (-0.06%) from 95.163%
11854130628

push

github

psalz
Update benchmark results for buffer_access_map refactor

2992 of 3394 branches covered (88.16%)

Branch coverage included in aggregate %.

6677 of 6773 relevant lines covered (98.58%)

1294452.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.23
/include/handler.h
1
#pragma once
2

3
#include <memory>
4
#include <type_traits>
5
#include <typeinfo>
6
#include <utility>
7
#include <vector>
8

9
#include <fmt/format.h>
10
#include <sycl/sycl.hpp>
11

12
#include "buffer.h"
13
#include "cgf_diagnostics.h"
14
#include "item.h"
15
#include "partition.h"
16
#include "range_mapper.h"
17
#include "ranges.h"
18
#include "sycl_wrappers.h"
19
#include "task.h"
20
#include "types.h"
21
#include "version.h"
22
#include "workaround.h"
23

24
namespace celerity {
25
class handler;
26
}
27

28
namespace celerity::experimental {
29

30
/**
31
 * Constrains the granularity at which a task's global range can be split into chunks.
32
 *
33
 * In some situations an output buffer access is only guaranteed to write to non-overlapping subranges
34
 * if the task is split in a certain way. For example when computing the row-wise sum of a 2D matrix into
35
 * a 1D vector, a split constraint is required to ensure that each element of the vector is written by
36
 * exactly one chunk.
37
 *
38
 * Another use case is for performance optimization, for example when the creation of lots of small chunks
39
 * would result in hardware under-utilization and excessive data transfers.
40
 *
41
 * Since ND-range parallel_for kernels are already constrained to be split with group size granularity,
42
 * adding an additional constraint on top results in an effective constraint of LCM(group size, constraint).
43
 *
44
 * The constraint (or effective constraint) must evenly divide the global range.
45
 * This function has no effect when called for a task without a user-provided global range.
46
 */
47
template <int Dims>
48
void constrain_split(handler& cgh, const range<Dims>& constraint);
49

50
class collective_group;
51

52
} // namespace celerity::experimental
53

54
namespace celerity {
55

56
namespace detail {
57
        class task_manager;
58

59
        handler make_command_group_handler(const task_id tid, const size_t num_collective_nodes);
60
        std::unique_ptr<task> into_task(handler&& cgh);
61
        hydration_id add_requirement(handler& cgh, const buffer_id bid, const access_mode mode, std::unique_ptr<range_mapper_base> rm);
62
        void add_requirement(handler& cgh, const host_object_id hoid, const experimental::side_effect_order order, const bool is_void);
63
        void add_reduction(handler& cgh, const reduction_info& rinfo);
64

65
        void set_task_name(handler& cgh, const std::string& debug_name);
66

67
        struct unnamed_kernel {};
68

69
        template <typename KernelName>
70
        constexpr bool is_unnamed_kernel = std::is_same_v<KernelName, unnamed_kernel>;
71

72
        template <typename KernelName>
73
        std::string kernel_debug_name() {
2,833✔
74
                return !is_unnamed_kernel<KernelName> ? utils::get_simplified_type_name<KernelName>() : std::string{};
2,833✔
75
        }
76

77
        struct simple_kernel_flavor {};
78
        struct nd_range_kernel_flavor {};
79

80
        template <typename Flavor, int Dims>
81
        struct kernel_flavor_traits;
82

83
        struct no_local_size {};
84

85
        template <int Dims>
86
        struct kernel_flavor_traits<simple_kernel_flavor, Dims> {
87
                inline static constexpr bool has_local_size = false;
88
                using local_size_type = no_local_size;
89
        };
90

91
        template <int Dims>
92
        struct kernel_flavor_traits<nd_range_kernel_flavor, Dims> {
93
                inline static constexpr bool has_local_size = true;
94
                using local_size_type = range<Dims>;
95
        };
96

97
        class collective_tag_factory;
98
} // namespace detail
99

100
namespace experimental {
101
        /**
102
         * Each collective host task is executed within a collective group. If multiple host tasks are scheduled within the same collective group, they are
103
         * guaranteed to execute in the same order on every node and within a single thread per node. Each group has its own MPI communicator spanning all
104
         * participating nodes, so MPI operations the user invokes from different collective groups do not race.
105
         */
106
        class collective_group {
107
          public:
108
                /// Creates a new collective group with a globally unique id. This must only be called from the main thread.
109
                collective_group() noexcept : m_cgid(s_next_cgid++) {}
23✔
110

111
          private:
112
                friend class detail::collective_tag_factory;
113
                detail::collective_group_id m_cgid;
114
                inline static detail::collective_group_id s_next_cgid = detail::root_collective_group_id + 1;
115
        };
116
} // namespace experimental
117

118
namespace detail {
119
        /**
120
         * The collective group used in collective host tasks when no group is specified explicitly.
121
         */
122
        inline const experimental::collective_group default_collective_group;
123

124
        /**
125
         * Tag type marking a `handler::host_task` as a collective task. Do not construct this type directly, but use `celerity::experimental::collective`
126
         * or `celerity::experimental::collective(group)`.
127
         */
128
        class collective_tag {
129
          private:
130
                friend class collective_tag_factory;
131
                friend class celerity::handler;
132
                collective_tag(collective_group_id cgid) : m_cgid(cgid) {}
61✔
133
                collective_group_id m_cgid;
134
        };
135

136
        /**
137
         * Tag type construction helper. Do not construct this type directly, use `celerity::experimental::collective` instead.
138
         */
139
        class collective_tag_factory {
140
          public:
141
                operator collective_tag() const { return default_collective_group.m_cgid; }
28✔
142
                collective_tag operator()(experimental::collective_group cg) const { return cg.m_cgid; }
33✔
143
        };
144
} // namespace detail
145

146
namespace experimental {
147
        /**
148
         * Pass to `handler::host_task` to select the collective host task overload.
149
         *
150
         * Either as a value to schedule with the `default_collective_group`:
151
         * ```c++
152
         * cgh.host_task(celerity::experimental::collective, []...);
153
         * ```
154
         *
155
         * Or by specifying a collective group explicitly:
156
         * ```c++
157
         * celerity::experimental::collective_group my_group;
158
         * ...
159
         * cgh.host_task(celerity::experimental::collective(my_group), []...);
160
         * ```
161
         */
162
        inline constexpr detail::collective_tag_factory collective;
163
} // namespace experimental
164

165
namespace detail {
166

167
        /// Tag type marking a `handler::host_task` as a master-node task. Do not construct this type directly, but use `celerity::on_master_node`.
168
        class on_master_node_tag {};
169

170
        /// Tag type marking a `handler::host_task` as a single-item host task equivalent to `range<0>{}`. Do not construct this type directly, but use
171
        /// `celerity::once`.
172
        class once_tag {};
173

174
        template <typename Kernel, int Dims, typename... Reducers>
175
        inline void invoke_kernel(const Kernel& kernel, const sycl::id<std::max(1, Dims)>& s_id, const range<Dims>& global_range, const id<Dims>& global_offset,
16,804,144✔
176
            const id<Dims>& chunk_offset, Reducers&... reducers) {
177
                kernel(make_item<Dims>(id_cast<Dims>(id<std::max(1, Dims)>(s_id)) + chunk_offset, global_offset, global_range), reducers...);
16,804,144✔
178
        }
16,804,144✔
179

180
        template <typename Kernel, int Dims, typename... Reducers>
181
        inline void invoke_kernel(const Kernel& kernel, const sycl::nd_item<std::max(1, Dims)>& s_item, const range<Dims>& global_range,
25,330✔
182
            const id<Dims>& global_offset, const id<Dims>& chunk_offset, const range<Dims>& group_range, const id<Dims>& group_offset, Reducers&... reducers) {
183
                kernel(make_nd_item<Dims>(s_item, global_range, global_offset, chunk_offset, group_range, group_offset), reducers...);
25,330✔
184
        }
25,330✔
185

186
        template <typename Kernel, int Dims>
187
        auto bind_simple_kernel(const Kernel& kernel, const range<Dims>& global_range, const id<Dims>& global_offset, const id<Dims>& chunk_offset) {
306✔
188
                return [=](auto s_item_or_id, auto&... reducers) {
16,804,144✔
189
                        static_assert(std::is_invocable_v<Kernel, celerity::item<Dims>, decltype(reducers)...>,
190
                            "Kernel function must be invocable with celerity::item<Dims> and as many reducer objects as reductions passed to parallel_for");
191
                        if constexpr(CELERITY_WORKAROUND(DPCPP) && std::is_same_v<sycl::id<Dims>, decltype(s_item_or_id)>) {
192
                                // CELERITY_WORKAROUND_LESS_OR_EQUAL: DPC++ passes a sycl::id instead of a sycl::item to kernels alongside reductions
193
                                invoke_kernel(kernel, s_item_or_id, global_range, global_offset, chunk_offset, reducers...);
194
                        } else {
195
                                invoke_kernel(kernel, s_item_or_id.get_id(), global_range, global_offset, chunk_offset, reducers...);
16,804,144✔
196
                        }
197
                };
306✔
198
        }
199

200
        template <typename Kernel, int Dims>
201
        auto bind_nd_range_kernel(const Kernel& kernel, const range<Dims>& global_range, const id<Dims>& global_offset, const id<Dims> chunk_offset,
39✔
202
            const range<Dims>& group_range, const id<Dims>& group_offset) {
203
                return [=](sycl::nd_item<std::max(1, Dims)> s_item, auto&... reducers) {
50,664✔
204
                        static_assert(std::is_invocable_v<Kernel, celerity::nd_item<Dims>, decltype(reducers)...>,
205
                            "Kernel function must be invocable with celerity::nd_item<Dims> or and as many reducer objects as reductions passed to parallel_for");
206
                        invoke_kernel(kernel, s_item, global_range, global_offset, chunk_offset, group_range, group_offset, reducers...);
25,330✔
207
                };
39✔
208
        }
209

210
        template <typename KernelName, typename... Params>
211
        inline void invoke_sycl_parallel_for(sycl::handler& cgh, Params&&... args) {
343✔
212
                static_assert(CELERITY_FEATURE_UNNAMED_KERNELS || !is_unnamed_kernel<KernelName>,
213
                    "Your SYCL implementation does not support unnamed kernels, add a kernel name template parameter to this parallel_for invocation");
214
                if constexpr(detail::is_unnamed_kernel<KernelName>) {
215
#if CELERITY_FEATURE_UNNAMED_KERNELS // see static_assert above
216
                        cgh.parallel_for(std::forward<Params>(args)...);
156✔
217
#endif
218
                } else {
219
                        cgh.parallel_for<KernelName>(std::forward<Params>(args)...);
187✔
220
                }
221
        }
343✔
222

223
        template <typename DataT, int Dims, typename BinaryOperation, bool WithExplicitIdentity>
224
        class reduction_descriptor;
225

226
        template <typename DataT, int Dims, typename BinaryOperation, bool WithExplicitIdentity>
227
        auto make_sycl_reduction(const reduction_descriptor<DataT, Dims, BinaryOperation, WithExplicitIdentity>& d, void* ptr) {
77✔
228
                if constexpr(WithExplicitIdentity) {
229
                        return sycl::reduction(static_cast<DataT*>(ptr), d.m_identity, d.m_op, sycl::property_list{sycl::property::reduction::initialize_to_identity{}});
16✔
230
                } else {
231
                        return sycl::reduction(static_cast<DataT*>(ptr), d.m_op, sycl::property_list{sycl::property::reduction::initialize_to_identity{}});
138✔
232
                }
233
        }
234

235
        template <typename DataT, int Dims, typename BinaryOperation>
236
        class reduction_descriptor<DataT, Dims, BinaryOperation, false /* WithExplicitIdentity */> {
237
          public:
238
                reduction_descriptor(buffer_id bid, BinaryOperation combiner, DataT /* identity */, bool include_current_buffer_value)
45✔
239
                    : m_bid(bid), m_op(combiner), m_include_current_buffer_value(include_current_buffer_value) {}
45✔
240

241
          private:
242
                friend auto make_sycl_reduction<DataT, Dims, BinaryOperation, false>(const reduction_descriptor&, void*);
243

244
                buffer_id m_bid;
245
                BinaryOperation m_op;
246
                bool m_include_current_buffer_value;
247
        };
248

249
        template <typename DataT, int Dims, typename BinaryOperation>
250
        class reduction_descriptor<DataT, Dims, BinaryOperation, true /* WithExplicitIdentity */> {
251
          public:
252
                reduction_descriptor(buffer_id bid, BinaryOperation combiner, DataT identity, bool include_current_buffer_value)
6✔
253
                    : m_bid(bid), m_op(combiner), m_identity(identity), m_include_current_buffer_value(include_current_buffer_value) {}
6✔
254

255
          private:
256
                friend auto make_sycl_reduction<DataT, Dims, BinaryOperation, true>(const reduction_descriptor&, void*);
257

258
                buffer_id m_bid;
259
                BinaryOperation m_op;
260
                DataT m_identity{};
261
                bool m_include_current_buffer_value;
262
        };
263

264
        template <bool WithExplicitIdentity, typename DataT, int Dims, typename BinaryOperation>
265
        auto make_reduction(const buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation op, DataT identity, const sycl::property_list& prop_list) {
54✔
266
                if(vars.get_range().size() != 1) {
54✔
267
                        // Like SYCL 2020, Celerity only supports reductions to unit-sized buffers. This allows us to avoid tracking different parts of the buffer
268
                        // as distributed_state and pending_reduction_state.
269
                        throw std::runtime_error("Only unit-sized buffers can be reduction targets");
3✔
270
                }
271

272
                const auto bid = detail::get_buffer_id(vars);
51✔
273
                const auto include_current_buffer_value = !prop_list.has_property<celerity::property::reduction::initialize_to_identity>();
51✔
274

275
                const auto rid = detail::runtime::get_instance().create_reduction(detail::make_reducer(op, identity));
51✔
276
                add_reduction(cgh, reduction_info{rid, bid, include_current_buffer_value});
51✔
277

278
                return detail::reduction_descriptor<DataT, Dims, BinaryOperation, WithExplicitIdentity>{bid, op, identity, include_current_buffer_value};
102✔
279
        }
280

281
        template <typename... ReductionsAndKernel>
282
        struct is_reductions_and_kernel_object : std::false_type {};
283

284
        template <typename Kernel>
285
        struct is_reductions_and_kernel_object<Kernel> : std::true_type {};
286

287
        template <bool WithExplicitIdentity, typename DataT, int Dims, typename BinaryOperation, typename... ReductionsAndKernel>
288
        struct is_reductions_and_kernel_object<reduction_descriptor<DataT, Dims, BinaryOperation, WithExplicitIdentity>, ReductionsAndKernel...>
289
            : is_reductions_and_kernel_object<ReductionsAndKernel...> {};
290

291
        template <typename... KernelAndReductions>
292
        constexpr bool is_reductions_and_kernel_v = is_reductions_and_kernel_object<std::remove_cv_t<std::remove_reference_t<KernelAndReductions>>...>::value;
293

294
} // namespace detail
295

296
/// Pass to `handler::host_task` to select the master-node task overload.
297
inline constexpr detail::on_master_node_tag on_master_node;
298

299
/// Equivalent to `range<0>{}` when passed to `handler::host_task`.
300
inline constexpr detail::once_tag once;
301

302
class handler {
303
  public:
304
        template <typename KernelName = detail::unnamed_kernel, int Dims, typename... ReductionsAndKernel,
305
            std::enable_if_t<detail::is_reductions_and_kernel_v<ReductionsAndKernel...>, int> = 0>
306
        void parallel_for(range<Dims> global_range, ReductionsAndKernel&&... reductions_and_kernel) {
191✔
307
                parallel_for_reductions_and_kernel<detail::simple_kernel_flavor, KernelName, Dims, ReductionsAndKernel...>(global_range, id<Dims>(),
283✔
308
                    detail::no_local_size{}, std::make_index_sequence<sizeof...(reductions_and_kernel) - 1>{},
309
                    std::forward<ReductionsAndKernel>(reductions_and_kernel)...);
310
        }
178✔
311

312
        template <typename KernelName = detail::unnamed_kernel, int Dims, typename... ReductionsAndKernel,
313
            std::enable_if_t<detail::is_reductions_and_kernel_v<ReductionsAndKernel...>, int> = 0>
314
        void parallel_for(range<Dims> global_range, id<Dims> global_offset, ReductionsAndKernel&&... reductions_and_kernel) {
822✔
315
                parallel_for_reductions_and_kernel<detail::simple_kernel_flavor, KernelName, Dims, ReductionsAndKernel...>(global_range, global_offset,
822✔
316
                    detail::no_local_size{}, std::make_index_sequence<sizeof...(reductions_and_kernel) - 1>{},
317
                    std::forward<ReductionsAndKernel>(reductions_and_kernel)...);
318
        }
822✔
319

320
        template <typename KernelName = detail::unnamed_kernel, typename... ReductionsAndKernel,
321
            std::enable_if_t<detail::is_reductions_and_kernel_v<ReductionsAndKernel...>, int> = 0>
322
        void parallel_for(const size_t global_range, ReductionsAndKernel&&... reductions_and_kernel) {
1✔
323
                parallel_for<KernelName>(range<1>(global_range), std::forward<ReductionsAndKernel>(reductions_and_kernel)...);
1✔
324
        }
1✔
325

326
        template <typename KernelName = detail::unnamed_kernel, typename... ReductionsAndKernel,
327
            std::enable_if_t<detail::is_reductions_and_kernel_v<ReductionsAndKernel...>, int> = 0>
328
        void parallel_for(const size_t global_range, const size_t global_offset, ReductionsAndKernel&&... reductions_and_kernel) {
1✔
329
                parallel_for<KernelName>(range<1>(global_range), id<1>(global_offset), std::forward<ReductionsAndKernel>(reductions_and_kernel)...);
1✔
330
        }
1✔
331

332
        template <typename KernelName = detail::unnamed_kernel, int Dims, typename... ReductionsAndKernel,
333
            std::enable_if_t<detail::is_reductions_and_kernel_v<ReductionsAndKernel...>, int> = 0>
334
        void parallel_for(celerity::nd_range<Dims> execution_range, ReductionsAndKernel&&... reductions_and_kernel) {
48✔
335
                parallel_for_reductions_and_kernel<detail::nd_range_kernel_flavor, KernelName, Dims, ReductionsAndKernel...>(execution_range.get_global_range(),
96✔
336
                    execution_range.get_offset(), execution_range.get_local_range(), std::make_index_sequence<sizeof...(reductions_and_kernel) - 1>{},
48✔
337
                    std::forward<ReductionsAndKernel>(reductions_and_kernel)...);
338
        }
45✔
339

340
        /**
341
         * Schedules `task` to execute on the master node only. Call via `cgh.host_task(celerity::on_master_node, []...)`. The task functor is assumed to be
342
         * invocable with the signature `void(const celerity::partition<0> &)` or `void()`.
343
         *
344
         * The task is executed in a background thread and multiple master node tasks may be executed concurrently if they are independent in the task graph, so
345
         * proper synchronization must be ensured.
346
         */
347
        template <typename Functor>
348
        void host_task(const detail::on_master_node_tag on_master_node, Functor&& task) {
1,266✔
349
                auto launcher = make_host_task_launcher<0, false>(detail::zeros, detail::non_collective_group_id, std::forward<Functor>(task));
1,266✔
350
                create_master_node_task(std::move(launcher));
1,263✔
351
        }
2,526✔
352
        /**
353
         * Schedules `task` to be executed collectively on all nodes participating in the specified collective group. Call via
354
         * `cgh.host_task(celerity::experimental::collective, []...)` or  `cgh.host_task(celerity::experimental::collective(group), []...)`.
355
         * The task functor is assumed to be invocable with the signature `void(const celerity::experimental::collective_partition&)`
356
         * or `void(const celerity::partition<1>&)`.
357
         *
358
         * This provides framework to use arbitrary collective MPI operations in a host task, such as performing collective I/O with parallel HDF5.
359
         * The local node id,t the number of participating nodes as well as the group MPI communicator can be obtained from the `collective_partition` passed into
360
         * the task functor.
361
         *
362
         * All collective tasks within a collective group are guaranteed to be executed in the same order on all nodes.
363
         */
364
        template <typename Functor>
365
        void host_task(const detail::collective_tag collective, Functor&& task) {
61✔
366
                // FIXME: We should not have to know how the global range is determined for collective tasks to create the launcher
367
                auto launcher = make_host_task_launcher<1, true>(range<3>{m_num_collective_nodes, 1, 1}, collective.m_cgid, std::forward<Functor>(task));
61✔
368
                create_collective_task(collective.m_cgid, std::move(launcher));
61✔
369
        }
122✔
370

371
        /**
372
         * Schedules a distributed execution of `task` by splitting the iteration space in a runtime-defined manner. The task functor is assumed to be invocable
373
         * with the signature `void(const celerity::partition<Dims>&)`.
374
         *
375
         * The task is executed in a background thread with multiple host tasks being run concurrently if they are independent in the task graph, so proper
376
         * synchronization must be ensured. The partition passed into the task functor describes the split each host receives. It may be used with accessors to
377
         * obtain the per-node portion of a buffer en-bloc, see `celerity::accessor::get_allocation_window` for details.
378
         *
379
         * There are no guarantees with respect to the split size and the order in which host tasks are reordered between nodes other than
380
         * the restrictions imposed by dependencies in the task graph. Also, the task functor may be invoked multiple times on one node and not be scheduled on
381
         * another node. If you need guarantees about execution order, consider `host_task(experimental::collective)` instead.
382
         */
383
        template <int Dims, typename Functor>
384
        void host_task(range<Dims> global_range, id<Dims> global_offset, Functor&& task) {
336✔
385
                const detail::task_geometry geometry{
672✔
386
                    Dims, detail::range_cast<3>(global_range), detail::id_cast<3>(global_offset), get_constrained_granularity(global_range, range<Dims>(detail::ones))};
672✔
387
                auto launcher = make_host_task_launcher<Dims, false>(detail::range_cast<3>(global_range), 0, std::forward<Functor>(task));
336✔
388
                create_host_compute_task(geometry, std::move(launcher));
336✔
389
        }
672✔
390

391
        /**
392
         * Like `host_task(range<Dims> global_range, id<Dims> global_offset, Functor task)`, but with a `global_offset` of zero.
393
         */
394
        template <int Dims, typename Functor>
395
        void host_task(range<Dims> global_range, Functor&& task) {
333✔
396
                host_task(global_range, {}, std::forward<Functor>(task));
349✔
397
        }
333✔
398

399
        /**
400
         * Schedules a host task with a single-element iteration space, causing it to be executed exactly once and on a single cluster node.
401
         * Equivalent to `host_task(range<0>{}, ...)`.
402
         */
403
        template <typename Functor>
404
        void host_task(const detail::once_tag once, Functor&& task) {
263✔
405
                host_task(range<0>{}, std::forward<Functor>(task));
263✔
406
        }
263✔
407

408
  private:
409
        friend handler detail::make_command_group_handler(const detail::task_id tid, const size_t num_collective_nodes);
410
        friend std::unique_ptr<detail::task> detail::into_task(handler&& cgh);
411
        friend detail::hydration_id detail::add_requirement(
412
            handler& cgh, const detail::buffer_id bid, const access_mode mode, std::unique_ptr<detail::range_mapper_base> rm);
413
        friend void detail::add_requirement(handler& cgh, const detail::host_object_id hoid, const experimental::side_effect_order order, const bool is_void);
414
        friend void detail::add_reduction(handler& cgh, const detail::reduction_info& rinfo);
415
        template <int Dims>
416
        friend void experimental::constrain_split(handler& cgh, const range<Dims>& constraint);
417
        template <typename Hint>
418
        friend void experimental::hint(handler& cgh, Hint&& hint);
419
        friend void detail::set_task_name(handler& cgh, const std::string& debug_name);
420

421
        detail::task_id m_tid;
422
        std::vector<detail::buffer_access> m_buffer_accesses;
423
        detail::side_effect_map m_side_effects;
424
        size_t m_non_void_side_effects_count = 0;
425
        detail::reduction_set m_reductions;
426
        std::unique_ptr<detail::task> m_task = nullptr;
427
        size_t m_num_collective_nodes;
428
        detail::hydration_id m_next_accessor_hydration_id = 1;
429
        std::optional<std::string> m_usr_def_task_name;
430
        range<3> m_split_constraint = detail::ones;
431
        std::vector<std::unique_ptr<detail::hint_base>> m_hints;
432

433
        handler(detail::task_id tid, size_t num_collective_nodes) : m_tid(tid), m_num_collective_nodes(num_collective_nodes) {}
2,731✔
434

435
        template <typename KernelFlavor, typename KernelName, int Dims, typename... ReductionsAndKernel, size_t... ReductionIndices>
436
        void parallel_for_reductions_and_kernel(range<Dims> global_range, id<Dims> global_offset,
1,061✔
437
            typename detail::kernel_flavor_traits<KernelFlavor, Dims>::local_size_type local_size, std::index_sequence<ReductionIndices...> indices,
438
            ReductionsAndKernel&&... kernel_and_reductions) {
439
                auto args_tuple = std::forward_as_tuple(kernel_and_reductions...);
1,061✔
440
                auto&& kernel = std::get<sizeof...(kernel_and_reductions) - 1>(args_tuple);
1,061✔
441
                parallel_for_kernel_and_reductions<KernelFlavor, KernelName>(
1,061✔
442
                    global_range, global_offset, local_size, std::forward<decltype(kernel)>(kernel), std::get<ReductionIndices>(args_tuple)...);
443
        }
1,045✔
444

445
        template <typename KernelFlavor, typename KernelName, int Dims, typename Kernel, typename... Reductions>
446
        void parallel_for_kernel_and_reductions(range<Dims> global_range, id<Dims> global_offset,
1,061✔
447
            typename detail::kernel_flavor_traits<KernelFlavor, Dims>::local_size_type local_range, Kernel&& kernel, Reductions&... reductions) {
448
                range<3> granularity = {1, 1, 1};
1,061✔
449
                if constexpr(detail::kernel_flavor_traits<KernelFlavor, Dims>::has_local_size) {
450
                        for(int d = 0; d < Dims; ++d) {
133✔
451
                                granularity[d] = local_range[d];
85✔
452
                        }
453
                }
454
                const detail::task_geometry geometry{Dims, detail::range_cast<3>(global_range), detail::id_cast<3>(global_offset),
1,061✔
455
                    get_constrained_granularity(global_range, detail::range_cast<Dims>(granularity))};
1,061✔
456
                auto launcher = make_device_kernel_launcher<KernelFlavor, KernelName, Dims>(
1,051✔
457
                    global_range, global_offset, local_range, std::forward<Kernel>(kernel), std::index_sequence_for<Reductions...>(), reductions...);
458
                create_device_compute_task(geometry, detail::kernel_debug_name<KernelName>(), std::move(launcher));
1,053✔
459
        }
2,092✔
460

461
        [[nodiscard]] detail::hydration_id add_requirement(const detail::buffer_id bid, const access_mode mode, std::unique_ptr<detail::range_mapper_base> rm) {
2,630✔
462
                assert(m_task == nullptr);
2,630✔
463
                m_buffer_accesses.push_back(detail::buffer_access{bid, mode, std::move(rm)});
2,630✔
464
                return m_next_accessor_hydration_id++;
2,630✔
465
        }
466

467
        void add_requirement(const detail::host_object_id hoid, const experimental::side_effect_order order, const bool is_void) {
73✔
468
                assert(m_task == nullptr);
73✔
469
                m_side_effects.add_side_effect(hoid, order);
73✔
470
                if(!is_void) { m_non_void_side_effects_count++; }
73✔
471
        }
73✔
472

473
        void add_reduction(const detail::reduction_info& rinfo) {
125✔
474
                assert(m_task == nullptr);
125✔
475
                m_reductions.push_back(rinfo);
125✔
476
        }
125✔
477

478
        template <int Dims>
479
        void experimental_constrain_split(const range<Dims>& constraint) {
18✔
480
                assert(m_task == nullptr);
18✔
481
                m_split_constraint = detail::range_cast<3>(constraint);
18✔
482
        }
18✔
483

484
        template <typename Hint>
485
        void experimental_hint(Hint&& hint) {
91✔
486
                static_assert(std::is_base_of_v<detail::hint_base, std::decay_t<Hint>>, "Hint must extend hint_base");
487
                static_assert(std::is_move_constructible_v<Hint>, "Hint must be move-constructible");
488
                for(auto& h : m_hints) {
104✔
489
                        // We currently don't allow more than one hint of the same type for simplicity; this could be loosened in the future.
490
                        auto& hr = *h; // Need to do this here to avoid -Wpotentially-evaluated-expression
17✔
491
                        if(typeid(hr) == typeid(hint)) { throw std::runtime_error("Providing more than one hint of the same type is not allowed"); }
17✔
492
                        h->validate(hint);
16✔
493
                }
494
                m_hints.emplace_back(std::make_unique<std::decay_t<Hint>>(std::forward<Hint>(hint)));
87✔
495
        }
87✔
496

497
        template <int Dims>
498
        range<3> get_constrained_granularity(const range<Dims>& global_size, const range<Dims>& granularity) const {
1,397✔
499
                range<3> result = detail::range_cast<3>(granularity);
1,397✔
500
                for(int i = 0; i < Dims; ++i) {
2,866✔
501
                        const auto lcm = std::lcm(granularity[i], m_split_constraint[i]);
1,472✔
502
                        if(lcm == 0) { throw std::runtime_error("Split constraint cannot be 0"); }
1,472✔
503
                        result[i] = lcm;
1,469✔
504
                }
505
                if(global_size % detail::range_cast<Dims>(result) != range<Dims>(detail::zeros)) {
1,394✔
506
                        throw std::runtime_error(fmt::format("The{}split constraint {} does not evenly divide the kernel global size {}",
28✔
507
                            granularity.size() > 1 ? " effective " : " ", detail::range_cast<Dims>(result), global_size));
21✔
508
                }
509
                return result;
1,387✔
510
        }
511

512
        void create_host_compute_task(const detail::task_geometry& geometry, detail::host_task_launcher launcher) {
336✔
513
                assert(m_task == nullptr);
336✔
514
                if(geometry.global_size.size() == 0) {
336!
515
                        // TODO this can be easily supported by not creating a task in case the execution range is empty
516
                        throw std::runtime_error{"The execution range of distributed host tasks must have at least one item"};
×
517
                }
518
                m_task = detail::task::make_host_compute(m_tid, geometry, std::move(launcher), detail::buffer_access_map(std::move(m_buffer_accesses), geometry),
1,344✔
519
                    std::move(m_side_effects), std::move(m_reductions));
1,008✔
520

521
                m_task->set_debug_name(m_usr_def_task_name.value_or(""));
336✔
522
        }
336✔
523

524
        void create_device_compute_task(const detail::task_geometry& geometry, const std::string& debug_name, detail::device_kernel_launcher launcher) {
1,047✔
525
                assert(m_task == nullptr);
1,047✔
526
                if(geometry.global_size.size() == 0) {
1,047!
527
                        // TODO unless reductions are involved, this can be easily supported by not creating a task in case the execution range is empty.
528
                        // Edge case: If the task includes reductions that specify property::reduction::initialize_to_identity, we need to create a task that sets
529
                        // the buffer state to an empty pending_reduction_state in the graph_generator. This will cause a trivial reduction_command to be generated on
530
                        // each node that reads from the reduction output buffer, initializing it to the identity value locally.
531
                        throw std::runtime_error{"The execution range of device tasks must have at least one item"};
×
532
                }
533
                // Note that cgf_diagnostics has a similar check, but we don't catch void side effects there.
534
                if(!m_side_effects.empty()) { throw std::runtime_error{"Side effects cannot be used in device kernels"}; }
1,047!
535
                m_task = detail::task::make_device_compute(
6,280✔
536
                    m_tid, geometry, std::move(launcher), detail::buffer_access_map(std::move(m_buffer_accesses), geometry), std::move(m_reductions));
5,229✔
537

538
                m_task->set_debug_name(m_usr_def_task_name.value_or(debug_name));
1,045✔
539
        }
1,045✔
540

541
        void create_collective_task(const detail::collective_group_id cgid, detail::host_task_launcher launcher) {
61✔
542
                assert(m_task == nullptr);
61✔
543
                const detail::task_geometry geometry{1, detail::range_cast<3>(range(m_num_collective_nodes)), {}, {1, 1, 1}};
61✔
544
                m_task = detail::task::make_collective(m_tid, geometry, cgid, m_num_collective_nodes, std::move(launcher),
305✔
545
                    detail::buffer_access_map(std::move(m_buffer_accesses), geometry), std::move(m_side_effects));
244✔
546

547
                m_task->set_debug_name(m_usr_def_task_name.value_or(""));
61✔
548
        }
61✔
549

550
        void create_master_node_task(detail::host_task_launcher launcher) {
1,263✔
551
                assert(m_task == nullptr);
1,263✔
552
                m_task = detail::task::make_master_node(
7,578✔
553
                    m_tid, std::move(launcher), detail::buffer_access_map(std::move(m_buffer_accesses), detail::task_geometry{}), std::move(m_side_effects));
6,315✔
554
                m_task->set_debug_name(m_usr_def_task_name.value_or(""));
1,263✔
555
        }
1,263✔
556

557
        template <typename KernelFlavor, typename KernelName, int Dims, typename Kernel, size_t... ReductionIndices, typename... Reductions>
558
        detail::device_kernel_launcher make_device_kernel_launcher(const range<Dims>& global_range, const id<Dims>& global_offset,
1,051✔
559
            typename detail::kernel_flavor_traits<KernelFlavor, Dims>::local_size_type local_range, Kernel&& kernel,
560
            std::index_sequence<ReductionIndices...> /* indices */, Reductions... reductions) {
561
                static_assert(std::is_copy_constructible_v<std::decay_t<Kernel>>, "Kernel functor must be copyable"); // Required for hydration
562

563
                // Check whether all accessors are being captured by value etc.
564
                // Although the diagnostics should always be available, we currently disable them for some test cases.
565
                if(detail::cgf_diagnostics::is_available()) { detail::cgf_diagnostics::get_instance().check<target::device>(kernel, m_buffer_accesses); }
1,051✔
566

567
                return [=](sycl::handler& sycl_cgh, const detail::box<3>& execution_range, const std::vector<void*>& reduction_ptrs) {
1,550!
568
                        constexpr int sycl_dims = std::max(1, Dims);
343✔
569
                        if constexpr(std::is_same_v<KernelFlavor, detail::simple_kernel_flavor>) {
570
                                const auto sycl_global_range = sycl::range<sycl_dims>(detail::range_cast<sycl_dims>(execution_range.get_range()));
304✔
571
                                detail::invoke_sycl_parallel_for<KernelName>(sycl_cgh, sycl_global_range,
912✔
572
                                    detail::make_sycl_reduction(reductions, reduction_ptrs[ReductionIndices])...,
71✔
573
                                    detail::bind_simple_kernel(kernel, global_range, global_offset, detail::id_cast<Dims>(execution_range.get_offset())));
608✔
574
                        } else if constexpr(std::is_same_v<KernelFlavor, detail::nd_range_kernel_flavor>) {
575
                                const auto sycl_global_range = sycl::range<sycl_dims>(detail::range_cast<sycl_dims>(execution_range.get_range()));
39✔
576
                                const auto sycl_local_range = sycl::range<sycl_dims>(detail::range_cast<sycl_dims>(local_range));
39✔
577
                                detail::invoke_sycl_parallel_for<KernelName>(sycl_cgh, sycl::nd_range{sycl_global_range, sycl_local_range},
170✔
578
                                    detail::make_sycl_reduction(reductions, reduction_ptrs[ReductionIndices])...,
6✔
579
                                    detail::bind_nd_range_kernel(kernel, global_range, global_offset, detail::id_cast<Dims>(execution_range.get_offset()),
69✔
580
                                        global_range / local_range, detail::id_cast<Dims>(execution_range.get_offset()) / local_range));
117✔
581
                        } else {
582
                                static_assert(detail::constexpr_false<KernelFlavor>);
583
                        }
584
                };
2,252✔
585
        }
586

587
        template <int Dims, bool Collective, typename Kernel>
588
        detail::host_task_launcher make_host_task_launcher(const range<3>& global_range, const detail::collective_group_id cgid, Kernel&& kernel) {
1,663✔
589
                static_assert(Collective || std::is_invocable_v<Kernel> || std::is_invocable_v<Kernel, const partition<Dims>>,
590
                    "Kernel for host task must be invocable with either no arguments or a celerity::partition<Dims>");
591
                static_assert(!Collective || std::is_invocable_v<Kernel> || std::is_invocable_v<Kernel, const experimental::collective_partition>,
592
                    "Kernel for collective host task must be invocable with either no arguments or a celerity::experimental::collective_partition");
593
                static_assert(std::is_copy_constructible_v<std::decay_t<Kernel>>, "Kernel functor must be copyable"); // Required for hydration
594
                static_assert(Dims >= 0);
595

596
                // Check whether all accessors are being captured by value etc.
597
                // Although the diagnostics should always be available, we currently disable them for some test cases.
598
                if(detail::cgf_diagnostics::is_available()) {
1,663✔
599
                        detail::cgf_diagnostics::get_instance().check<target::host_task>(kernel, m_buffer_accesses, m_non_void_side_effects_count);
1,148✔
600
                }
601

602
                return [kernel, global_range](const detail::box<3>& execution_range, const detail::communicator* collective_comm) {
4,947✔
603
                        (void)global_range;
604
                        (void)collective_comm;
605
                        if constexpr(Dims > 0) {
606
                                if constexpr(Collective) {
607
                                        static_assert(Dims == 1);
608
                                        assert(collective_comm != nullptr);
26✔
609
                                        const auto part =
52✔
610
                                            detail::make_collective_partition(detail::range_cast<1>(global_range), detail::box_cast<1>(execution_range), *collective_comm);
52✔
611
                                        kernel(part);
26✔
612
                                } else {
613
                                        const auto part = detail::make_partition<Dims>(detail::range_cast<Dims>(global_range), detail::box_cast<Dims>(execution_range));
14✔
614
                                        kernel(part);
14✔
615
                                }
616
                        } else if constexpr(std::is_invocable_v<Kernel, const partition<0>&>) {
617
                                (void)execution_range;
618
                                const auto part = detail::make_partition<0>(range<0>(), subrange<0>());
3✔
619
                                kernel(part);
3✔
620
                        } else {
621
                                (void)execution_range;
622
                                kernel();
1,056✔
623
                        }
624
                };
4,454✔
625
        }
626

627
        std::unique_ptr<detail::task> into_task() && {
2,696✔
628
                assert(m_task != nullptr);
2,696✔
629
                for(auto& h : m_hints) {
2,778✔
630
                        m_task->add_hint(std::move(h));
82✔
631
                }
632
                return std::move(m_task);
2,696✔
633
        }
634
};
635

636
namespace detail {
637

638
        inline handler make_command_group_handler(const detail::task_id tid, const size_t num_collective_nodes) { return handler(tid, num_collective_nodes); }
2,731✔
639

640
        inline std::unique_ptr<detail::task> into_task(handler&& cgh) { return std::move(cgh).into_task(); }
2,696✔
641

642
        template <typename CGF>
643
        std::unique_ptr<task> invoke_command_group_function(const task_id tid, size_t num_collective_nodes, CGF&& cgf) {
2,703✔
644
                handler cgh = make_command_group_handler(tid, num_collective_nodes);
2,703✔
645
                std::invoke(std::forward<CGF>(cgf), cgh);
2,703✔
646
                return into_task(std::move(cgh));
5,382✔
647
        }
2,703✔
648

649
        [[nodiscard]] inline hydration_id add_requirement(handler& cgh, const buffer_id bid, const access_mode mode, std::unique_ptr<range_mapper_base> rm) {
2,630✔
650
                return cgh.add_requirement(bid, mode, std::move(rm));
5,260✔
651
        }
652

653
        inline void add_requirement(handler& cgh, const host_object_id hoid, const experimental::side_effect_order order, const bool is_void) {
73✔
654
                return cgh.add_requirement(hoid, order, is_void);
73✔
655
        }
656

657
        inline void add_reduction(handler& cgh, const detail::reduction_info& rinfo) { return cgh.add_reduction(rinfo); }
125✔
658

659
        inline void set_task_name(handler& cgh, const std::string& debug_name) { cgh.m_usr_def_task_name = {debug_name}; }
357✔
660

661
        // TODO: The _impl functions in detail only exist during the grace period for deprecated reductions on const buffers; move outside again afterwards.
662
        template <typename DataT, int Dims, typename BinaryOperation>
663
        auto reduction_impl(const buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
48✔
664
                static_assert(sycl::has_known_identity_v<BinaryOperation, DataT>,
665
                    "Celerity does not currently support reductions without an identity. Either specialize "
666
                    "sycl::known_identity or use the reduction() overload taking an identity at runtime");
667
                return detail::make_reduction<false>(vars, cgh, combiner, sycl::known_identity_v<BinaryOperation, DataT>, prop_list);
48✔
668
        }
669

670
        template <typename DataT, int Dims, typename BinaryOperation>
671
        auto reduction_impl(
6✔
672
            const buffer<DataT, Dims>& vars, handler& cgh, const DataT identity, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
673
                static_assert(!sycl::has_known_identity_v<BinaryOperation, DataT>, "Identity is known to SYCL, remove the identity parameter from reduction()");
674
                return detail::make_reduction<true>(vars, cgh, combiner, identity, prop_list);
6✔
675
        }
676

677
} // namespace detail
678

679
template <typename DataT, int Dims, typename BinaryOperation>
680
auto reduction(buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
47✔
681
        return detail::reduction_impl(vars, cgh, combiner, prop_list);
47✔
682
}
683

684
template <typename DataT, int Dims, typename BinaryOperation>
685
auto reduction(buffer<DataT, Dims>& vars, handler& cgh, const DataT identity, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
6✔
686
        return detail::reduction_impl(vars, cgh, identity, combiner, prop_list);
6✔
687
}
688

689
template <typename DataT, int Dims, typename BinaryOperation>
690
[[deprecated("Creating reduction from const buffer is deprecated, capture buffer by reference instead")]] auto reduction(
1✔
691
    const buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
692
        return detail::reduction_impl(vars, cgh, combiner, prop_list);
1✔
693
}
694

695
template <typename DataT, int Dims, typename BinaryOperation>
696
[[deprecated("Creating reduction from const buffer is deprecated, capture buffer by reference instead")]] auto reduction(
697
    const buffer<DataT, Dims>& vars, handler& cgh, const DataT identity, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
698
        return detail::reduction_impl(vars, cgh, identity, combiner, prop_list);
699
}
700

701
} // namespace celerity
702

703
namespace celerity::experimental {
704
template <int Dims>
705
void constrain_split(handler& cgh, const range<Dims>& constraint) {
18✔
706
        cgh.experimental_constrain_split(constraint);
18✔
707
}
18✔
708

709
template <typename Hint>
710
void hint(handler& cgh, Hint&& hint) {
91✔
711
        cgh.experimental_hint(std::forward<Hint>(hint));
91✔
712
}
87✔
713
} // namespace celerity::experimental
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc