• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

celerity / celerity-runtime / 10281467154

07 Aug 2024 09:09AM UTC coverage: 94.936% (-0.01%) from 94.947%
10281467154

push

github

fknorr
hipSYCL is now AdaptiveCpp

2977 of 3372 branches covered (88.29%)

Branch coverage included in aggregate %.

6547 of 6660 relevant lines covered (98.3%)

1532783.7 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.02
/include/handler.h
1
#pragma once
2

3
#include <memory>
4
#include <type_traits>
5
#include <typeinfo>
6
#include <utility>
7

8
#include <fmt/format.h>
9
#include <sycl/sycl.hpp>
10

11
#include "buffer.h"
12
#include "cgf_diagnostics.h"
13
#include "item.h"
14
#include "partition.h"
15
#include "range_mapper.h"
16
#include "ranges.h"
17
#include "task.h"
18
#include "types.h"
19
#include "workaround.h"
20

21
namespace celerity {
22
class handler;
23
}
24

25
namespace celerity::experimental {
26

27
/**
28
 * Constrains the granularity at which a task's global range can be split into chunks.
29
 *
30
 * In some situations an output buffer access is only guaranteed to write to non-overlapping subranges
31
 * if the task is split in a certain way. For example when computing the row-wise sum of a 2D matrix into
32
 * a 1D vector, a split constraint is required to ensure that each element of the vector is written by
33
 * exactly one chunk.
34
 *
35
 * Another use case is for performance optimization, for example when the creation of lots of small chunks
36
 * would result in hardware under-utilization and excessive data transfers.
37
 *
38
 * Since ND-range parallel_for kernels are already constrained to be split with group size granularity,
39
 * adding an additional constraint on top results in an effective constraint of LCM(group size, constraint).
40
 *
41
 * The constraint (or effective constraint) must evenly divide the global range.
42
 * This function has no effect when called for a task without a user-provided global range.
43
 */
44
template <int Dims>
45
void constrain_split(handler& cgh, const range<Dims>& constraint);
46

47
} // namespace celerity::experimental
48

49
namespace celerity {
50

51
namespace detail {
52
        class task_manager;
53

54
        handler make_command_group_handler(const task_id tid, const size_t num_collective_nodes);
55
        std::unique_ptr<task> into_task(handler&& cgh);
56
        hydration_id add_requirement(handler& cgh, const buffer_id bid, std::unique_ptr<range_mapper_base> rm);
57
        void add_requirement(handler& cgh, const host_object_id hoid, const experimental::side_effect_order order, const bool is_void);
58
        void add_reduction(handler& cgh, const reduction_info& rinfo);
59

60
        void set_task_name(handler& cgh, const std::string& debug_name);
61

62
        struct unnamed_kernel {};
63

64
        template <typename KernelName>
65
        constexpr bool is_unnamed_kernel = std::is_same_v<KernelName, unnamed_kernel>;
66

67
        template <typename KernelName>
68
        std::string kernel_debug_name() {
2,016✔
69
                return !is_unnamed_kernel<KernelName> ? utils::get_simplified_type_name<KernelName>() : std::string{};
2,016✔
70
        }
71

72
        struct simple_kernel_flavor {};
73
        struct nd_range_kernel_flavor {};
74

75
        template <typename Flavor, int Dims>
76
        struct kernel_flavor_traits;
77

78
        struct no_local_size {};
79

80
        template <int Dims>
81
        struct kernel_flavor_traits<simple_kernel_flavor, Dims> {
82
                inline static constexpr bool has_local_size = false;
83
                using local_size_type = no_local_size;
84
        };
85

86
        template <int Dims>
87
        struct kernel_flavor_traits<nd_range_kernel_flavor, Dims> {
88
                inline static constexpr bool has_local_size = true;
89
                using local_size_type = range<Dims>;
90
        };
91
} // namespace detail
92

93
/**
94
 * Tag type marking a `handler::host_task` as a master-node task. Do not construct this type directly, but use `celerity::on_master_node`.
95
 */
96
class on_master_node_tag {};
97

98
/**
99
 * Pass to `handler::host_task` to select the master-node task overload.
100
 */
101
inline constexpr on_master_node_tag on_master_node;
102

103
namespace experimental {
104
        class collective_tag_factory;
105

106
        /**
107
         * Each collective host task is executed within a collective group. If multiple host tasks are scheduled within the same collective group, they are
108
         * guaranteed to execute in the same order on every node and within a single thread per node. Each group has its own MPI communicator spanning all
109
         * participating nodes, so MPI operations the user invokes from different collective groups do not race.
110
         */
111
        class collective_group {
112
          public:
113
                /// Creates a new collective group with a globally unique id. This must only be called from the main thread.
114
                collective_group() noexcept : m_cgid(s_next_cgid++) {}
23✔
115

116
          private:
117
                friend class collective_tag_factory;
118
                detail::collective_group_id m_cgid;
119
                inline static detail::collective_group_id s_next_cgid = detail::root_collective_group_id + 1;
120
        };
121

122
        /**
123
         * Tag type marking a `handler::host_task` as a collective task. Do not construct this type directly, but use `celerity::experimental::collective`
124
         * or `celerity::experimental::collective(group)`.
125
         */
126
        class collective_tag {
127
          private:
128
                friend class collective_tag_factory;
129
                friend class celerity::handler;
130
                collective_tag(detail::collective_group_id cgid) : m_cgid(cgid) {}
59✔
131
                detail::collective_group_id m_cgid;
132
        };
133

134
        /**
135
         * The collective group used in collective host tasks when no group is specified explicitly.
136
         */
137
        inline const collective_group default_collective_group;
138

139
        /**
140
         * Tag type construction helper. Do not construct this type directly, use `celerity::experimental::collective` instead.
141
         */
142
        class collective_tag_factory {
143
          public:
144
                operator experimental::collective_tag() const { return default_collective_group.m_cgid; }
28✔
145
                experimental::collective_tag operator()(experimental::collective_group cg) const { return cg.m_cgid; }
31✔
146
        };
147

148
        /**
149
         * Pass to `handler::host_task` to select the collective host task overload.
150
         *
151
         * Either as a value to schedule with the `default_collective_group`:
152
         * ```c++
153
         * cgh.host_task(celerity::experimental::collective, []...);
154
         * ```
155
         *
156
         * Or by specifying a collective group explicitly:
157
         * ```c++
158
         * celerity::experimental::collective_group my_group;
159
         * ...
160
         * cgh.host_task(celerity::experimental::collective(my_group), []...);
161
         * ```
162
         */
163
        inline constexpr collective_tag_factory collective;
164
} // namespace experimental
165

166
namespace detail {
167
        template <typename Kernel, int Dims, typename... Reducers>
168
        inline void invoke_kernel(const Kernel& kernel, const sycl::id<std::max(1, Dims)>& s_id, const range<Dims>& global_range, const id<Dims>& global_offset,
16,805,511✔
169
            const id<Dims>& chunk_offset, Reducers&... reducers) {
170
                kernel(make_item<Dims>(id_cast<Dims>(id<std::max(1, Dims)>(s_id)) + chunk_offset, global_offset, global_range), reducers...);
16,805,511✔
171
        }
16,805,511✔
172

173
        template <typename Kernel, int Dims, typename... Reducers>
174
        inline void invoke_kernel(const Kernel& kernel, const sycl::nd_item<std::max(1, Dims)>& s_item, const range<Dims>& global_range,
521,878✔
175
            const id<Dims>& global_offset, const id<Dims>& chunk_offset, const range<Dims>& group_range, const id<Dims>& group_offset, Reducers&... reducers) {
176
                kernel(make_nd_item<Dims>(s_item, global_range, global_offset, chunk_offset, group_range, group_offset), reducers...);
521,878✔
177
        }
521,878✔
178

179
        template <typename Kernel, int Dims>
180
        auto bind_simple_kernel(const Kernel& kernel, const range<Dims>& global_range, const id<Dims>& global_offset, const id<Dims>& chunk_offset) {
315✔
181
                return [=](auto s_item_or_id, auto&... reducers) {
16,805,511✔
182
                        static_assert(std::is_invocable_v<Kernel, celerity::item<Dims>, decltype(reducers)...>,
183
                            "Kernel function must be invocable with celerity::item<Dims> and as many reducer objects as reductions passed to parallel_for");
184
                        if constexpr(CELERITY_WORKAROUND(DPCPP) && std::is_same_v<sycl::id<Dims>, decltype(s_item_or_id)>) {
185
                                // CELERITY_WORKAROUND_LESS_OR_EQUAL: DPC++ passes a sycl::id instead of a sycl::item to kernels alongside reductions
186
                                invoke_kernel(kernel, s_item_or_id, global_range, global_offset, chunk_offset, reducers...);
187
                        } else {
188
                                invoke_kernel(kernel, s_item_or_id.get_id(), global_range, global_offset, chunk_offset, reducers...);
16,805,511✔
189
                        }
190
                };
315✔
191
        }
192

193
        template <typename Kernel, int Dims>
194
        auto bind_nd_range_kernel(const Kernel& kernel, const range<Dims>& global_range, const id<Dims>& global_offset, const id<Dims> chunk_offset,
63✔
195
            const range<Dims>& group_range, const id<Dims>& group_offset) {
196
                return [=](sycl::nd_item<std::max(1, Dims)> s_item, auto&... reducers) {
1,043,760✔
197
                        static_assert(std::is_invocable_v<Kernel, celerity::nd_item<Dims>, decltype(reducers)...>,
198
                            "Kernel function must be invocable with celerity::nd_item<Dims> or and as many reducer objects as reductions passed to parallel_for");
199
                        invoke_kernel(kernel, s_item, global_range, global_offset, chunk_offset, group_range, group_offset, reducers...);
521,878✔
200
                };
63✔
201
        }
202

203
        template <typename KernelName, typename... Params>
204
        inline void invoke_sycl_parallel_for(sycl::handler& cgh, Params&&... args) {
376✔
205
                static_assert(CELERITY_FEATURE_UNNAMED_KERNELS || !is_unnamed_kernel<KernelName>,
206
                    "Your SYCL implementation does not support unnamed kernels, add a kernel name template parameter to this parallel_for invocation");
207
                if constexpr(detail::is_unnamed_kernel<KernelName>) {
208
#if CELERITY_FEATURE_UNNAMED_KERNELS // see static_assert above
209
                        cgh.parallel_for(std::forward<Params>(args)...);
172✔
210
#endif
211
                } else {
212
                        cgh.parallel_for<KernelName>(std::forward<Params>(args)...);
204✔
213
                }
214
        }
376✔
215

216
        template <typename DataT, int Dims, typename BinaryOperation, bool WithExplicitIdentity>
217
        class reduction_descriptor;
218

219
        template <typename DataT, int Dims, typename BinaryOperation, bool WithExplicitIdentity>
220
        auto make_sycl_reduction(const reduction_descriptor<DataT, Dims, BinaryOperation, WithExplicitIdentity>& d, void* ptr) {
77✔
221
                if constexpr(WithExplicitIdentity) {
222
                        return sycl::reduction(static_cast<DataT*>(ptr), d.m_identity, d.m_op, sycl::property_list{sycl::property::reduction::initialize_to_identity{}});
16✔
223
                } else {
224
                        return sycl::reduction(static_cast<DataT*>(ptr), d.m_op, sycl::property_list{sycl::property::reduction::initialize_to_identity{}});
138✔
225
                }
226
        }
227

228
        template <typename DataT, int Dims, typename BinaryOperation>
229
        class reduction_descriptor<DataT, Dims, BinaryOperation, false /* WithExplicitIdentity */> {
230
          public:
231
                reduction_descriptor(buffer_id bid, BinaryOperation combiner, DataT /* identity */, bool include_current_buffer_value)
45✔
232
                    : m_bid(bid), m_op(combiner), m_include_current_buffer_value(include_current_buffer_value) {}
45✔
233

234
          private:
235
                friend auto make_sycl_reduction<DataT, Dims, BinaryOperation, false>(const reduction_descriptor&, void*);
236

237
                buffer_id m_bid;
238
                BinaryOperation m_op;
239
                bool m_include_current_buffer_value;
240
        };
241

242
        template <typename DataT, int Dims, typename BinaryOperation>
243
        class reduction_descriptor<DataT, Dims, BinaryOperation, true /* WithExplicitIdentity */> {
244
          public:
245
                reduction_descriptor(buffer_id bid, BinaryOperation combiner, DataT identity, bool include_current_buffer_value)
6✔
246
                    : m_bid(bid), m_op(combiner), m_identity(identity), m_include_current_buffer_value(include_current_buffer_value) {}
6✔
247

248
          private:
249
                friend auto make_sycl_reduction<DataT, Dims, BinaryOperation, true>(const reduction_descriptor&, void*);
250

251
                buffer_id m_bid;
252
                BinaryOperation m_op;
253
                DataT m_identity{};
254
                bool m_include_current_buffer_value;
255
        };
256

257
        template <bool WithExplicitIdentity, typename DataT, int Dims, typename BinaryOperation>
258
        auto make_reduction(const buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation op, DataT identity, const sycl::property_list& prop_list) {
54✔
259
                if(vars.get_range().size() != 1) {
54✔
260
                        // Like SYCL 2020, Celerity only supports reductions to unit-sized buffers. This allows us to avoid tracking different parts of the buffer
261
                        // as distributed_state and pending_reduction_state.
262
                        throw std::runtime_error("Only unit-sized buffers can be reduction targets");
3✔
263
                }
264

265
                const auto bid = detail::get_buffer_id(vars);
51✔
266
                const auto include_current_buffer_value = !prop_list.has_property<celerity::property::reduction::initialize_to_identity>();
51✔
267

268
                const auto rid = detail::runtime::get_instance().create_reduction(detail::make_reducer(op, identity));
51✔
269
                add_reduction(cgh, reduction_info{rid, bid, include_current_buffer_value});
51✔
270

271
                return detail::reduction_descriptor<DataT, Dims, BinaryOperation, WithExplicitIdentity>{bid, op, identity, include_current_buffer_value};
102✔
272
        }
273

274
} // namespace detail
275

276
class handler {
277
  public:
278
        template <typename KernelName = detail::unnamed_kernel, int Dims, typename... ReductionsAndKernel>
279
        void parallel_for(range<Dims> global_range, ReductionsAndKernel&&... reductions_and_kernel) {
185✔
280
                static_assert(sizeof...(reductions_and_kernel) > 0, "No kernel given");
281
                parallel_for_reductions_and_kernel<detail::simple_kernel_flavor, KernelName, Dims, ReductionsAndKernel...>(global_range, id<Dims>(),
277✔
282
                    detail::no_local_size{}, std::make_index_sequence<sizeof...(reductions_and_kernel) - 1>{},
283
                    std::forward<ReductionsAndKernel>(reductions_and_kernel)...);
284
        }
174✔
285

286
        template <typename KernelName = detail::unnamed_kernel, int Dims, typename... ReductionsAndKernel>
287
        void parallel_for(range<Dims> global_range, id<Dims> global_offset, ReductionsAndKernel&&... reductions_and_kernel) {
772✔
288
                static_assert(sizeof...(reductions_and_kernel) > 0, "No kernel given");
289
                parallel_for_reductions_and_kernel<detail::simple_kernel_flavor, KernelName, Dims, ReductionsAndKernel...>(global_range, global_offset,
772✔
290
                    detail::no_local_size{}, std::make_index_sequence<sizeof...(reductions_and_kernel) - 1>{},
291
                    std::forward<ReductionsAndKernel>(reductions_and_kernel)...);
292
        }
772✔
293

294
        template <typename KernelName = detail::unnamed_kernel, int Dims, typename... ReductionsAndKernel>
295
        void parallel_for(celerity::nd_range<Dims> execution_range, ReductionsAndKernel&&... reductions_and_kernel) {
49✔
296
                static_assert(sizeof...(reductions_and_kernel) > 0, "No kernel given");
297
                parallel_for_reductions_and_kernel<detail::nd_range_kernel_flavor, KernelName, Dims, ReductionsAndKernel...>(execution_range.get_global_range(),
98✔
298
                    execution_range.get_offset(), execution_range.get_local_range(), std::make_index_sequence<sizeof...(reductions_and_kernel) - 1>{},
49✔
299
                    std::forward<ReductionsAndKernel>(reductions_and_kernel)...);
300
        }
46✔
301

302
        /**
303
         * Schedules `kernel` to execute on the master node only. Call via `cgh.host_task(celerity::on_master_node, []...)`. The kernel is assumed to be invocable
304
         * with the signature `void(const celerity::partition<0> &)` or `void()`.
305
         *
306
         * The kernel is executed in a background thread pool and multiple master node tasks may be executed concurrently if they are independent in the
307
         * task graph, so proper synchronization must be ensured.
308
         *
309
         * **Compatibility note:** This replaces master-access tasks from Celerity 0.1 which were executed on the master node's main thread, so this implementation
310
         * may require different lifetimes for captures. See `celerity::allow_by_ref` for more information on this topic.
311
         */
312
        template <typename Functor>
313
        void host_task(on_master_node_tag /* tag */, Functor&& kernel) {
3,321✔
314
                auto launcher = make_host_task_launcher<0, false>(detail::zeros, 0, std::forward<Functor>(kernel));
3,324✔
315
                create_master_node_task(std::move(launcher));
3,318✔
316
        }
6,636✔
317

318
        /**
319
         * Schedules `kernel` to be executed collectively on all nodes participating in the specified collective group. Call via
320
         * `cgh.host_task(celerity::experimental::collective, []...)` or  `cgh.host_task(celerity::experimental::collective(group), []...)`.
321
         * The kernel is assumed to be invocable with the signature `void(const celerity::experimental::collective_partition&)`
322
         * or `void(const celerity::partition<1>&)`.
323
         *
324
         * This provides framework to use arbitrary collective MPI operations in a host task, such as performing collective I/O with parallel HDF5.
325
         * The local node id,t the number of participating nodes as well as the group MPI communicator can be obtained from the `collective_partition` passed into
326
         * the kernel.
327
         *
328
         * All collective tasks within a collective group are guaranteed to be executed in the same order on all nodes, additionally, all internal MPI operations
329
         * and all host kernel invocations are executed in a single thread on each host.
330
         */
331
        template <typename Functor>
332
        void host_task(experimental::collective_tag tag, Functor&& kernel) {
59✔
333
                // FIXME: We should not have to know how the global range is determined for collective tasks to create the launcher
334
                auto launcher = make_host_task_launcher<1, true>(range<3>{m_num_collective_nodes, 1, 1}, tag.m_cgid, std::forward<Functor>(kernel));
59✔
335
                create_collective_task(tag.m_cgid, std::move(launcher));
59✔
336
        }
118✔
337

338
        /**
339
         * Schedules a distributed execution of `kernel` by splitting the iteration space in a runtime-defined manner. The kernel is assumed to be invocable
340
         * with the signature `void(const celerity::partition<Dims>&)`.
341
         *
342
         * The kernel is executed in a background thread pool with multiple host tasks being run concurrently if they are independent in the task graph,
343
         * so proper synchronization must be ensured. The partition passed into the kernel describes the split each host receives. It may be used with accessors
344
         * to obtain the per-node portion of a buffer en-bloc, see `celerity::accessor::get_allocation_window` for details.
345
         *
346
         * There are no guarantees with respect to the split size and the order in which host tasks are re-orered between nodes other than
347
         * the restrictions imposed by dependencies in the task graph. Also, the kernel may be invoked multiple times on one node and not be scheduled on
348
         * another node. If you need guarantees about execution order
349
         */
350
        template <int Dims, typename Functor>
351
        void host_task(range<Dims> global_range, id<Dims> global_offset, Functor&& kernel) {
73✔
352
                const detail::task_geometry geometry{
146✔
353
                    Dims, detail::range_cast<3>(global_range), detail::id_cast<3>(global_offset), get_constrained_granularity(global_range, range<Dims>(detail::ones))};
146✔
354
                auto launcher = make_host_task_launcher<Dims, false>(detail::range_cast<3>(global_range), 0, std::forward<Functor>(kernel));
73✔
355
                create_host_compute_task(geometry, std::move(launcher));
73✔
356
        }
146✔
357

358
        /**
359
         * Like `host_task(range<Dims> global_range, id<Dims> global_offset, Functor kernel)`, but with a `global_offset` of zero.
360
         */
361
        template <int Dims, typename Functor>
362
        void host_task(range<Dims> global_range, Functor&& kernel) {
70✔
363
                host_task(global_range, {}, std::forward<Functor>(kernel));
86✔
364
        }
70✔
365

366
  private:
367
        friend handler detail::make_command_group_handler(const detail::task_id tid, const size_t num_collective_nodes);
368
        friend std::unique_ptr<detail::task> detail::into_task(handler&& cgh);
369
        friend detail::hydration_id detail::add_requirement(handler& cgh, const detail::buffer_id bid, std::unique_ptr<detail::range_mapper_base> rm);
370
        friend void detail::add_requirement(handler& cgh, const detail::host_object_id hoid, const experimental::side_effect_order order, const bool is_void);
371
        friend void detail::add_reduction(handler& cgh, const detail::reduction_info& rinfo);
372
        template <int Dims>
373
        friend void experimental::constrain_split(handler& cgh, const range<Dims>& constraint);
374
        template <typename Hint>
375
        friend void experimental::hint(handler& cgh, Hint&& hint);
376
        friend void detail::set_task_name(handler& cgh, const std::string& debug_name);
377

378
        detail::task_id m_tid;
379
        detail::buffer_access_map m_access_map;
380
        detail::side_effect_map m_side_effects;
381
        size_t m_non_void_side_effects_count = 0;
382
        detail::reduction_set m_reductions;
383
        std::unique_ptr<detail::task> m_task = nullptr;
384
        size_t m_num_collective_nodes;
385
        detail::hydration_id m_next_accessor_hydration_id = 1;
386
        std::optional<std::string> m_usr_def_task_name;
387
        range<3> m_split_constraint = detail::ones;
388
        std::vector<std::unique_ptr<detail::hint_base>> m_hints;
389

390
        handler(detail::task_id tid, size_t num_collective_nodes) : m_tid(tid), m_num_collective_nodes(num_collective_nodes) {}
4,462✔
391

392
        template <typename KernelFlavor, typename KernelName, int Dims, typename... ReductionsAndKernel, size_t... ReductionIndices>
393
        void parallel_for_reductions_and_kernel(range<Dims> global_range, id<Dims> global_offset,
1,006✔
394
            typename detail::kernel_flavor_traits<KernelFlavor, Dims>::local_size_type local_size, std::index_sequence<ReductionIndices...> indices,
395
            ReductionsAndKernel&&... kernel_and_reductions) {
396
                auto args_tuple = std::forward_as_tuple(kernel_and_reductions...);
1,006✔
397
                auto&& kernel = std::get<sizeof...(kernel_and_reductions) - 1>(args_tuple);
1,006✔
398
                parallel_for_kernel_and_reductions<KernelFlavor, KernelName>(
1,006✔
399
                    global_range, global_offset, local_size, std::forward<decltype(kernel)>(kernel), std::get<ReductionIndices>(args_tuple)...);
400
        }
992✔
401

402
        template <typename KernelFlavor, typename KernelName, int Dims, typename Kernel, typename... Reductions>
403
        void parallel_for_kernel_and_reductions(range<Dims> global_range, id<Dims> global_offset,
1,006✔
404
            typename detail::kernel_flavor_traits<KernelFlavor, Dims>::local_size_type local_range, Kernel&& kernel, Reductions&... reductions) {
405
                range<3> granularity = {1, 1, 1};
1,006✔
406
                if constexpr(detail::kernel_flavor_traits<KernelFlavor, Dims>::has_local_size) {
407
                        for(int d = 0; d < Dims; ++d) {
135✔
408
                                granularity[d] = local_range[d];
86✔
409
                        }
410
                }
411
                const detail::task_geometry geometry{Dims, detail::range_cast<3>(global_range), detail::id_cast<3>(global_offset),
1,006✔
412
                    get_constrained_granularity(global_range, detail::range_cast<Dims>(granularity))};
1,006✔
413
                auto launcher = make_device_kernel_launcher<KernelFlavor, KernelName, Dims>(
996✔
414
                    global_range, global_offset, local_range, std::forward<Kernel>(kernel), std::index_sequence_for<Reductions...>(), reductions...);
415
                create_device_compute_task(geometry, detail::kernel_debug_name<KernelName>(), std::move(launcher));
992✔
416
        }
1,984✔
417

418
        [[nodiscard]] detail::hydration_id add_requirement(const detail::buffer_id bid, std::unique_ptr<detail::range_mapper_base> rm) {
3,595✔
419
                assert(m_task == nullptr);
3,595✔
420
                m_access_map.add_access(bid, std::move(rm));
3,595✔
421
                return m_next_accessor_hydration_id++;
3,595✔
422
        }
423

424
        void add_requirement(const detail::host_object_id hoid, const experimental::side_effect_order order, const bool is_void) {
73✔
425
                assert(m_task == nullptr);
73✔
426
                m_side_effects.add_side_effect(hoid, order);
73✔
427
                if(!is_void) { m_non_void_side_effects_count++; }
73✔
428
        }
73✔
429

430
        void add_reduction(const detail::reduction_info& rinfo) {
121✔
431
                assert(m_task == nullptr);
121✔
432
                m_reductions.push_back(rinfo);
121✔
433
        }
121✔
434

435
        template <int Dims>
436
        void experimental_constrain_split(const range<Dims>& constraint) {
18✔
437
                assert(m_task == nullptr);
18✔
438
                m_split_constraint = detail::range_cast<3>(constraint);
18✔
439
        }
18✔
440

441
        template <typename Hint>
442
        void experimental_hint(Hint&& hint) {
74✔
443
                static_assert(std::is_base_of_v<detail::hint_base, std::decay_t<Hint>>, "Hint must extend hint_base");
444
                static_assert(std::is_move_constructible_v<Hint>, "Hint must be move-constructible");
445
                for(auto& h : m_hints) {
87✔
446
                        // We currently don't allow more than one hint of the same type for simplicity; this could be loosened in the future.
447
                        auto& hr = *h; // Need to do this here to avoid -Wpotentially-evaluated-expression
17✔
448
                        if(typeid(hr) == typeid(hint)) { throw std::runtime_error("Providing more than one hint of the same type is not allowed"); }
17✔
449
                        h->validate(hint);
16✔
450
                }
451
                m_hints.emplace_back(std::make_unique<std::decay_t<Hint>>(std::forward<Hint>(hint)));
70✔
452
        }
70✔
453

454
        template <int Dims>
455
        range<3> get_constrained_granularity(const range<Dims>& global_size, const range<Dims>& granularity) const {
1,079✔
456
                range<3> result = detail::range_cast<3>(granularity);
1,079✔
457
                for(int i = 0; i < Dims; ++i) {
2,479✔
458
                        const auto lcm = std::lcm(granularity[i], m_split_constraint[i]);
1,403✔
459
                        if(lcm == 0) { throw std::runtime_error("Split constraint cannot be 0"); }
1,403✔
460
                        result[i] = lcm;
1,400✔
461
                }
462
                if(global_size % detail::range_cast<Dims>(result) != range<Dims>(detail::zeros)) {
1,076✔
463
                        throw std::runtime_error(fmt::format("The{}split constraint {} does not evenly divide the kernel global size {}",
28✔
464
                            granularity.size() > 1 ? " effective " : " ", detail::range_cast<Dims>(result), global_size));
21✔
465
                }
466
                return result;
1,069✔
467
        }
468

469
        void create_host_compute_task(const detail::task_geometry& geometry, detail::host_task_launcher launcher) {
73✔
470
                assert(m_task == nullptr);
73✔
471
                if(geometry.global_size.size() == 0) {
73!
472
                        // TODO this can be easily supported by not creating a task in case the execution range is empty
473
                        throw std::runtime_error{"The execution range of distributed host tasks must have at least one item"};
×
474
                }
475
                m_task =
73✔
476
                    detail::task::make_host_compute(m_tid, geometry, std::move(launcher), std::move(m_access_map), std::move(m_side_effects), std::move(m_reductions));
73✔
477

478
                m_task->set_debug_name(m_usr_def_task_name.value_or(""));
73✔
479
        }
73✔
480

481
        void create_device_compute_task(const detail::task_geometry& geometry, const std::string& debug_name, detail::device_kernel_launcher launcher) {
992✔
482
                assert(m_task == nullptr);
992✔
483
                if(geometry.global_size.size() == 0) {
992!
484
                        // TODO unless reductions are involved, this can be easily supported by not creating a task in case the execution range is empty.
485
                        // Edge case: If the task includes reductions that specify property::reduction::initialize_to_identity, we need to create a task that sets
486
                        // the buffer state to an empty pending_reduction_state in the graph_generator. This will cause a trivial reduction_command to be generated on
487
                        // each node that reads from the reduction output buffer, initializing it to the identity value locally.
488
                        throw std::runtime_error{"The execution range of device tasks must have at least one item"};
×
489
                }
490
                // Note that cgf_diagnostics has a similar check, but we don't catch void side effects there.
491
                if(!m_side_effects.empty()) { throw std::runtime_error{"Side effects cannot be used in device kernels"}; }
992!
492
                m_task = detail::task::make_device_compute(m_tid, geometry, std::move(launcher), std::move(m_access_map), std::move(m_reductions));
992✔
493

494
                m_task->set_debug_name(m_usr_def_task_name.value_or(debug_name));
992✔
495
        }
992✔
496

497
        void create_collective_task(const detail::collective_group_id cgid, detail::host_task_launcher launcher) {
59✔
498
                assert(m_task == nullptr);
59✔
499
                m_task = detail::task::make_collective(m_tid, cgid, m_num_collective_nodes, std::move(launcher), std::move(m_access_map), std::move(m_side_effects));
59✔
500

501
                m_task->set_debug_name(m_usr_def_task_name.value_or(""));
59✔
502
        }
59✔
503

504
        void create_master_node_task(detail::host_task_launcher launcher) {
3,318✔
505
                assert(m_task == nullptr);
3,318✔
506
                m_task = detail::task::make_master_node(m_tid, std::move(launcher), std::move(m_access_map), std::move(m_side_effects));
3,318✔
507

508
                m_task->set_debug_name(m_usr_def_task_name.value_or(""));
3,318✔
509
        }
3,318✔
510

511
        template <typename KernelFlavor, typename KernelName, int Dims, typename Kernel, size_t... ReductionIndices, typename... Reductions>
512
        detail::device_kernel_launcher make_device_kernel_launcher(const range<Dims>& global_range, const id<Dims>& global_offset,
996✔
513
            typename detail::kernel_flavor_traits<KernelFlavor, Dims>::local_size_type local_range, Kernel&& kernel,
514
            std::index_sequence<ReductionIndices...> /* indices */, Reductions... reductions) {
515
                static_assert(std::is_copy_constructible_v<std::decay_t<Kernel>>, "Kernel functor must be copyable"); // Required for hydration
516

517
                // Check whether all accessors are being captured by value etc.
518
                // Although the diagnostics should always be available, we currently disable them for some test cases.
519
                if(detail::cgf_diagnostics::is_available()) { detail::cgf_diagnostics::get_instance().check<target::device>(kernel, m_access_map); }
996✔
520

521
                return [=](sycl::handler& sycl_cgh, const detail::box<3>& execution_range, const std::vector<void*>& reduction_ptrs) {
1,524!
522
                        constexpr int sycl_dims = std::max(1, Dims);
376✔
523
                        if constexpr(std::is_same_v<KernelFlavor, detail::simple_kernel_flavor>) {
524
                                const auto sycl_global_range = sycl::range<sycl_dims>(detail::range_cast<sycl_dims>(execution_range.get_range()));
313✔
525
                                detail::invoke_sycl_parallel_for<KernelName>(sycl_cgh, sycl_global_range,
922✔
526
                                    detail::make_sycl_reduction(reductions, reduction_ptrs[ReductionIndices])...,
71✔
527
                                    detail::bind_simple_kernel(kernel, global_range, global_offset, detail::id_cast<Dims>(execution_range.get_offset())));
626✔
528
                        } else if constexpr(std::is_same_v<KernelFlavor, detail::nd_range_kernel_flavor>) {
529
                                const auto sycl_global_range = sycl::range<sycl_dims>(detail::range_cast<sycl_dims>(execution_range.get_range()));
63✔
530
                                const auto sycl_local_range = sycl::range<sycl_dims>(detail::range_cast<sycl_dims>(local_range));
63✔
531
                                detail::invoke_sycl_parallel_for<KernelName>(sycl_cgh, sycl::nd_range{sycl_global_range, sycl_local_range},
218✔
532
                                    detail::make_sycl_reduction(reductions, reduction_ptrs[ReductionIndices])...,
6✔
533
                                    detail::bind_nd_range_kernel(kernel, global_range, global_offset, detail::id_cast<Dims>(execution_range.get_offset()),
165✔
534
                                        global_range / local_range, detail::id_cast<Dims>(execution_range.get_offset()) / local_range));
189✔
535
                        } else {
536
                                static_assert(detail::constexpr_false<KernelFlavor>);
537
                        }
538
                };
2,138✔
539
        }
540

541
        template <int Dims, bool Collective, typename Kernel>
542
        detail::host_task_launcher make_host_task_launcher(const range<3>& global_range, const detail::collective_group_id cgid, Kernel&& kernel) {
3,453✔
543
                static_assert(Collective || std::is_invocable_v<Kernel> || std::is_invocable_v<Kernel, const partition<Dims>>,
544
                    "Kernel for host task must be invocable with either no arguments or a celerity::partition<Dims>");
545
                static_assert(!Collective || std::is_invocable_v<Kernel> || std::is_invocable_v<Kernel, const experimental::collective_partition>,
546
                    "Kernel for collective host task must be invocable with either no arguments or a celerity::experimental::collective_partition");
547
                static_assert(std::is_copy_constructible_v<std::decay_t<Kernel>>, "Kernel functor must be copyable"); // Required for hydration
548
                static_assert(Dims >= 0);
549

550
                // Check whether all accessors are being captured by value etc.
551
                // Although the diagnostics should always be available, we currently disable them for some test cases.
552
                if(detail::cgf_diagnostics::is_available()) {
3,453✔
553
                        detail::cgf_diagnostics::get_instance().check<target::host_task>(kernel, m_access_map, m_non_void_side_effects_count);
3,203✔
554
                }
555

556
                return [kernel, global_range](const detail::box<3>& execution_range, const detail::communicator* collective_comm) {
11,882✔
557
                        (void)global_range;
558
                        (void)collective_comm;
559
                        if constexpr(Dims > 0) {
560
                                if constexpr(Collective) {
561
                                        static_assert(Dims == 1);
562
                                        assert(collective_comm != nullptr);
26✔
563
                                        const auto part =
52✔
564
                                            detail::make_collective_partition(detail::range_cast<1>(global_range), detail::box_cast<1>(execution_range), *collective_comm);
52✔
565
                                        kernel(part);
26✔
566
                                } else {
567
                                        const auto part = detail::make_partition<Dims>(detail::range_cast<Dims>(global_range), detail::box_cast<Dims>(execution_range));
14✔
568
                                        kernel(part);
14✔
569
                                }
570
                        } else if constexpr(std::is_invocable_v<Kernel, const partition<0>&>) {
571
                                (void)execution_range;
572
                                const auto part = detail::make_partition<0>(range<0>(), subrange<0>());
3✔
573
                                kernel(part);
3✔
574
                        } else {
575
                                (void)execution_range;
576
                                kernel();
3,112✔
577
                        }
578
                };
9,067✔
579
        }
580

581
        std::unique_ptr<detail::task> into_task() && {
4,442✔
582
                assert(m_task != nullptr);
4,442✔
583
                for(auto& h : m_hints) {
4,512✔
584
                        m_task->add_hint(std::move(h));
70✔
585
                }
586
                return std::move(m_task);
4,442✔
587
        }
588
};
589

590
namespace detail {
591

592
        inline handler make_command_group_handler(const detail::task_id tid, const size_t num_collective_nodes) { return handler(tid, num_collective_nodes); }
4,462✔
593

594
        inline std::unique_ptr<detail::task> into_task(handler&& cgh) { return std::move(cgh).into_task(); }
4,442✔
595

596
        [[nodiscard]] inline hydration_id add_requirement(handler& cgh, const buffer_id bid, std::unique_ptr<range_mapper_base> rm) {
3,595✔
597
                return cgh.add_requirement(bid, std::move(rm));
7,190✔
598
        }
599

600
        inline void add_requirement(handler& cgh, const host_object_id hoid, const experimental::side_effect_order order, const bool is_void) {
73✔
601
                return cgh.add_requirement(hoid, order, is_void);
73✔
602
        }
603

604
        inline void add_reduction(handler& cgh, const detail::reduction_info& rinfo) { return cgh.add_reduction(rinfo); }
121✔
605

606
        inline void set_task_name(handler& cgh, const std::string& debug_name) { cgh.m_usr_def_task_name = {debug_name}; }
295✔
607

608
        // TODO: The _impl functions in detail only exist during the grace period for deprecated reductions on const buffers; move outside again afterwards.
609
        template <typename DataT, int Dims, typename BinaryOperation>
610
        auto reduction_impl(const buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
48✔
611
                static_assert(sycl::has_known_identity_v<BinaryOperation, DataT>,
612
                    "Celerity does not currently support reductions without an identity. Either specialize "
613
                    "sycl::known_identity or use the reduction() overload taking an identity at runtime");
614
                return detail::make_reduction<false>(vars, cgh, combiner, sycl::known_identity_v<BinaryOperation, DataT>, prop_list);
48✔
615
        }
616

617
        template <typename DataT, int Dims, typename BinaryOperation>
618
        auto reduction_impl(
6✔
619
            const buffer<DataT, Dims>& vars, handler& cgh, const DataT identity, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
620
                static_assert(!sycl::has_known_identity_v<BinaryOperation, DataT>, "Identity is known to SYCL, remove the identity parameter from reduction()");
621
                return detail::make_reduction<true>(vars, cgh, combiner, identity, prop_list);
6✔
622
        }
623

624
} // namespace detail
625

626
template <typename DataT, int Dims, typename BinaryOperation>
627
auto reduction(buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
47✔
628
        return detail::reduction_impl(vars, cgh, combiner, prop_list);
47✔
629
}
630

631
template <typename DataT, int Dims, typename BinaryOperation>
632
auto reduction(buffer<DataT, Dims>& vars, handler& cgh, const DataT identity, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
6✔
633
        return detail::reduction_impl(vars, cgh, identity, combiner, prop_list);
6✔
634
}
635

636
template <typename DataT, int Dims, typename BinaryOperation>
637
[[deprecated("Creating reduction from const buffer is deprecated, capture buffer by reference instead")]] auto reduction(
1✔
638
    const buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
639
        return detail::reduction_impl(vars, cgh, combiner, prop_list);
1✔
640
}
641

642
template <typename DataT, int Dims, typename BinaryOperation>
643
[[deprecated("Creating reduction from const buffer is deprecated, capture buffer by reference instead")]] auto reduction(
644
    const buffer<DataT, Dims>& vars, handler& cgh, const DataT identity, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
645
        return detail::reduction_impl(vars, cgh, identity, combiner, prop_list);
646
}
647

648
} // namespace celerity
649

650
namespace celerity::experimental {
651
template <int Dims>
652
void constrain_split(handler& cgh, const range<Dims>& constraint) {
18✔
653
        cgh.experimental_constrain_split(constraint);
18✔
654
}
18✔
655

656
template <typename Hint>
657
void hint(handler& cgh, Hint&& hint) {
74✔
658
        cgh.experimental_hint(std::forward<Hint>(hint));
74✔
659
}
70✔
660
} // namespace celerity::experimental
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc