• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

celerity / celerity-runtime / 11253295570

09 Oct 2024 10:34AM UTC coverage: 95.362% (+0.3%) from 95.051%
11253295570

Pull #289

github

psalz
Update benchmark results for command_graph_generator refactor
Pull Request #289: Refactor command graph generation, bring testing infrastructure up to speed with IDAG

2957 of 3332 branches covered (88.75%)

Branch coverage included in aggregate %.

365 of 367 new or added lines in 5 files covered. (99.46%)

12 existing lines in 6 files now uncovered.

6625 of 6716 relevant lines covered (98.65%)

1489224.13 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.02
/include/handler.h
1
#pragma once
2

3
#include <memory>
4
#include <type_traits>
5
#include <typeinfo>
6
#include <utility>
7

8
#include <fmt/format.h>
9
#include <sycl/sycl.hpp>
10

11
#include "buffer.h"
12
#include "cgf_diagnostics.h"
13
#include "item.h"
14
#include "partition.h"
15
#include "range_mapper.h"
16
#include "ranges.h"
17
#include "task.h"
18
#include "types.h"
19
#include "version.h"
20
#include "workaround.h"
21

22
namespace celerity {
23
class handler;
24
}
25

26
namespace celerity::experimental {
27

28
/**
29
 * Constrains the granularity at which a task's global range can be split into chunks.
30
 *
31
 * In some situations an output buffer access is only guaranteed to write to non-overlapping subranges
32
 * if the task is split in a certain way. For example when computing the row-wise sum of a 2D matrix into
33
 * a 1D vector, a split constraint is required to ensure that each element of the vector is written by
34
 * exactly one chunk.
35
 *
36
 * Another use case is for performance optimization, for example when the creation of lots of small chunks
37
 * would result in hardware under-utilization and excessive data transfers.
38
 *
39
 * Since ND-range parallel_for kernels are already constrained to be split with group size granularity,
40
 * adding an additional constraint on top results in an effective constraint of LCM(group size, constraint).
41
 *
42
 * The constraint (or effective constraint) must evenly divide the global range.
43
 * This function has no effect when called for a task without a user-provided global range.
44
 */
45
template <int Dims>
46
void constrain_split(handler& cgh, const range<Dims>& constraint);
47

48
} // namespace celerity::experimental
49

50
namespace celerity {
51

52
namespace detail {
53
        class task_manager;
54

55
        handler make_command_group_handler(const task_id tid, const size_t num_collective_nodes);
56
        std::unique_ptr<task> into_task(handler&& cgh);
57
        hydration_id add_requirement(handler& cgh, const buffer_id bid, std::unique_ptr<range_mapper_base> rm);
58
        void add_requirement(handler& cgh, const host_object_id hoid, const experimental::side_effect_order order, const bool is_void);
59
        void add_reduction(handler& cgh, const reduction_info& rinfo);
60

61
        void set_task_name(handler& cgh, const std::string& debug_name);
62

63
        struct unnamed_kernel {};
64

65
        template <typename KernelName>
66
        constexpr bool is_unnamed_kernel = std::is_same_v<KernelName, unnamed_kernel>;
67

68
        template <typename KernelName>
69
        std::string kernel_debug_name() {
2,407✔
70
                return !is_unnamed_kernel<KernelName> ? utils::get_simplified_type_name<KernelName>() : std::string{};
2,407✔
71
        }
72

73
        struct simple_kernel_flavor {};
74
        struct nd_range_kernel_flavor {};
75

76
        template <typename Flavor, int Dims>
77
        struct kernel_flavor_traits;
78

79
        struct no_local_size {};
80

81
        template <int Dims>
82
        struct kernel_flavor_traits<simple_kernel_flavor, Dims> {
83
                inline static constexpr bool has_local_size = false;
84
                using local_size_type = no_local_size;
85
        };
86

87
        template <int Dims>
88
        struct kernel_flavor_traits<nd_range_kernel_flavor, Dims> {
89
                inline static constexpr bool has_local_size = true;
90
                using local_size_type = range<Dims>;
91
        };
92
} // namespace detail
93

94
/**
95
 * Tag type marking a `handler::host_task` as a master-node task. Do not construct this type directly, but use `celerity::on_master_node`.
96
 */
97
class on_master_node_tag {};
98

99
/**
100
 * Pass to `handler::host_task` to select the master-node task overload.
101
 */
102
inline constexpr on_master_node_tag on_master_node;
103

104
namespace experimental {
105
        class collective_tag_factory;
106

107
        /**
108
         * Each collective host task is executed within a collective group. If multiple host tasks are scheduled within the same collective group, they are
109
         * guaranteed to execute in the same order on every node and within a single thread per node. Each group has its own MPI communicator spanning all
110
         * participating nodes, so MPI operations the user invokes from different collective groups do not race.
111
         */
112
        class collective_group {
113
          public:
114
                /// Creates a new collective group with a globally unique id. This must only be called from the main thread.
115
                collective_group() noexcept : m_cgid(s_next_cgid++) {}
23✔
116

117
          private:
118
                friend class collective_tag_factory;
119
                detail::collective_group_id m_cgid;
120
                inline static detail::collective_group_id s_next_cgid = detail::root_collective_group_id + 1;
121
        };
122

123
        /**
124
         * Tag type marking a `handler::host_task` as a collective task. Do not construct this type directly, but use `celerity::experimental::collective`
125
         * or `celerity::experimental::collective(group)`.
126
         */
127
        class collective_tag {
128
          private:
129
                friend class collective_tag_factory;
130
                friend class celerity::handler;
131
                collective_tag(detail::collective_group_id cgid) : m_cgid(cgid) {}
61✔
132
                detail::collective_group_id m_cgid;
133
        };
134

135
        /**
136
         * The collective group used in collective host tasks when no group is specified explicitly.
137
         */
138
        inline const collective_group default_collective_group;
139

140
        /**
141
         * Tag type construction helper. Do not construct this type directly, use `celerity::experimental::collective` instead.
142
         */
143
        class collective_tag_factory {
144
          public:
145
                operator experimental::collective_tag() const { return default_collective_group.m_cgid; }
28✔
146
                experimental::collective_tag operator()(experimental::collective_group cg) const { return cg.m_cgid; }
33✔
147
        };
148

149
        /**
150
         * Pass to `handler::host_task` to select the collective host task overload.
151
         *
152
         * Either as a value to schedule with the `default_collective_group`:
153
         * ```c++
154
         * cgh.host_task(celerity::experimental::collective, []...);
155
         * ```
156
         *
157
         * Or by specifying a collective group explicitly:
158
         * ```c++
159
         * celerity::experimental::collective_group my_group;
160
         * ...
161
         * cgh.host_task(celerity::experimental::collective(my_group), []...);
162
         * ```
163
         */
164
        inline constexpr collective_tag_factory collective;
165
} // namespace experimental
166

167
namespace detail {
168
        template <typename Kernel, int Dims, typename... Reducers>
169
        inline void invoke_kernel(const Kernel& kernel, const sycl::id<std::max(1, Dims)>& s_id, const range<Dims>& global_range, const id<Dims>& global_offset,
16,805,511✔
170
            const id<Dims>& chunk_offset, Reducers&... reducers) {
171
                kernel(make_item<Dims>(id_cast<Dims>(id<std::max(1, Dims)>(s_id)) + chunk_offset, global_offset, global_range), reducers...);
16,805,511✔
172
        }
16,805,511✔
173

174
        template <typename Kernel, int Dims, typename... Reducers>
175
        inline void invoke_kernel(const Kernel& kernel, const sycl::nd_item<std::max(1, Dims)>& s_item, const range<Dims>& global_range,
521,878✔
176
            const id<Dims>& global_offset, const id<Dims>& chunk_offset, const range<Dims>& group_range, const id<Dims>& group_offset, Reducers&... reducers) {
177
                kernel(make_nd_item<Dims>(s_item, global_range, global_offset, chunk_offset, group_range, group_offset), reducers...);
521,878✔
178
        }
521,878✔
179

180
        template <typename Kernel, int Dims>
181
        auto bind_simple_kernel(const Kernel& kernel, const range<Dims>& global_range, const id<Dims>& global_offset, const id<Dims>& chunk_offset) {
315✔
182
                return [=](auto s_item_or_id, auto&... reducers) {
16,805,511✔
183
                        static_assert(std::is_invocable_v<Kernel, celerity::item<Dims>, decltype(reducers)...>,
184
                            "Kernel function must be invocable with celerity::item<Dims> and as many reducer objects as reductions passed to parallel_for");
185
                        if constexpr(CELERITY_WORKAROUND(DPCPP) && std::is_same_v<sycl::id<Dims>, decltype(s_item_or_id)>) {
186
                                // CELERITY_WORKAROUND_LESS_OR_EQUAL: DPC++ passes a sycl::id instead of a sycl::item to kernels alongside reductions
187
                                invoke_kernel(kernel, s_item_or_id, global_range, global_offset, chunk_offset, reducers...);
188
                        } else {
189
                                invoke_kernel(kernel, s_item_or_id.get_id(), global_range, global_offset, chunk_offset, reducers...);
16,805,511✔
190
                        }
191
                };
315✔
192
        }
193

194
        template <typename Kernel, int Dims>
195
        auto bind_nd_range_kernel(const Kernel& kernel, const range<Dims>& global_range, const id<Dims>& global_offset, const id<Dims> chunk_offset,
63✔
196
            const range<Dims>& group_range, const id<Dims>& group_offset) {
197
                return [=](sycl::nd_item<std::max(1, Dims)> s_item, auto&... reducers) {
1,043,760✔
198
                        static_assert(std::is_invocable_v<Kernel, celerity::nd_item<Dims>, decltype(reducers)...>,
199
                            "Kernel function must be invocable with celerity::nd_item<Dims> or and as many reducer objects as reductions passed to parallel_for");
200
                        invoke_kernel(kernel, s_item, global_range, global_offset, chunk_offset, group_range, group_offset, reducers...);
521,878✔
201
                };
63✔
202
        }
203

204
        template <typename KernelName, typename... Params>
205
        inline void invoke_sycl_parallel_for(sycl::handler& cgh, Params&&... args) {
376✔
206
                static_assert(CELERITY_FEATURE_UNNAMED_KERNELS || !is_unnamed_kernel<KernelName>,
207
                    "Your SYCL implementation does not support unnamed kernels, add a kernel name template parameter to this parallel_for invocation");
208
                if constexpr(detail::is_unnamed_kernel<KernelName>) {
209
#if CELERITY_FEATURE_UNNAMED_KERNELS // see static_assert above
210
                        cgh.parallel_for(std::forward<Params>(args)...);
172✔
211
#endif
212
                } else {
213
                        cgh.parallel_for<KernelName>(std::forward<Params>(args)...);
204✔
214
                }
215
        }
376✔
216

217
        template <typename DataT, int Dims, typename BinaryOperation, bool WithExplicitIdentity>
218
        class reduction_descriptor;
219

220
        template <typename DataT, int Dims, typename BinaryOperation, bool WithExplicitIdentity>
221
        auto make_sycl_reduction(const reduction_descriptor<DataT, Dims, BinaryOperation, WithExplicitIdentity>& d, void* ptr) {
77✔
222
                if constexpr(WithExplicitIdentity) {
223
                        return sycl::reduction(static_cast<DataT*>(ptr), d.m_identity, d.m_op, sycl::property_list{sycl::property::reduction::initialize_to_identity{}});
16✔
224
                } else {
225
                        return sycl::reduction(static_cast<DataT*>(ptr), d.m_op, sycl::property_list{sycl::property::reduction::initialize_to_identity{}});
138✔
226
                }
227
        }
228

229
        template <typename DataT, int Dims, typename BinaryOperation>
230
        class reduction_descriptor<DataT, Dims, BinaryOperation, false /* WithExplicitIdentity */> {
231
          public:
232
                reduction_descriptor(buffer_id bid, BinaryOperation combiner, DataT /* identity */, bool include_current_buffer_value)
45✔
233
                    : m_bid(bid), m_op(combiner), m_include_current_buffer_value(include_current_buffer_value) {}
45✔
234

235
          private:
236
                friend auto make_sycl_reduction<DataT, Dims, BinaryOperation, false>(const reduction_descriptor&, void*);
237

238
                buffer_id m_bid;
239
                BinaryOperation m_op;
240
                bool m_include_current_buffer_value;
241
        };
242

243
        template <typename DataT, int Dims, typename BinaryOperation>
244
        class reduction_descriptor<DataT, Dims, BinaryOperation, true /* WithExplicitIdentity */> {
245
          public:
246
                reduction_descriptor(buffer_id bid, BinaryOperation combiner, DataT identity, bool include_current_buffer_value)
6✔
247
                    : m_bid(bid), m_op(combiner), m_identity(identity), m_include_current_buffer_value(include_current_buffer_value) {}
6✔
248

249
          private:
250
                friend auto make_sycl_reduction<DataT, Dims, BinaryOperation, true>(const reduction_descriptor&, void*);
251

252
                buffer_id m_bid;
253
                BinaryOperation m_op;
254
                DataT m_identity{};
255
                bool m_include_current_buffer_value;
256
        };
257

258
        template <bool WithExplicitIdentity, typename DataT, int Dims, typename BinaryOperation>
259
        auto make_reduction(const buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation op, DataT identity, const sycl::property_list& prop_list) {
54✔
260
                if(vars.get_range().size() != 1) {
54✔
261
                        // Like SYCL 2020, Celerity only supports reductions to unit-sized buffers. This allows us to avoid tracking different parts of the buffer
262
                        // as distributed_state and pending_reduction_state.
263
                        throw std::runtime_error("Only unit-sized buffers can be reduction targets");
3✔
264
                }
265

266
                const auto bid = detail::get_buffer_id(vars);
51✔
267
                const auto include_current_buffer_value = !prop_list.has_property<celerity::property::reduction::initialize_to_identity>();
51✔
268

269
                const auto rid = detail::runtime::get_instance().create_reduction(detail::make_reducer(op, identity));
51✔
270
                add_reduction(cgh, reduction_info{rid, bid, include_current_buffer_value});
51✔
271

272
                return detail::reduction_descriptor<DataT, Dims, BinaryOperation, WithExplicitIdentity>{bid, op, identity, include_current_buffer_value};
102✔
273
        }
274

275
} // namespace detail
276

277
class handler {
278
  public:
279
        template <typename KernelName = detail::unnamed_kernel, int Dims, typename... ReductionsAndKernel>
280
        void parallel_for(range<Dims> global_range, ReductionsAndKernel&&... reductions_and_kernel) {
185✔
281
                static_assert(sizeof...(reductions_and_kernel) > 0, "No kernel given");
282
                parallel_for_reductions_and_kernel<detail::simple_kernel_flavor, KernelName, Dims, ReductionsAndKernel...>(global_range, id<Dims>(),
277✔
283
                    detail::no_local_size{}, std::make_index_sequence<sizeof...(reductions_and_kernel) - 1>{},
284
                    std::forward<ReductionsAndKernel>(reductions_and_kernel)...);
285
        }
174✔
286

287
        template <typename KernelName = detail::unnamed_kernel, int Dims, typename... ReductionsAndKernel>
288
        void parallel_for(range<Dims> global_range, id<Dims> global_offset, ReductionsAndKernel&&... reductions_and_kernel) {
783✔
289
                static_assert(sizeof...(reductions_and_kernel) > 0, "No kernel given");
290
                parallel_for_reductions_and_kernel<detail::simple_kernel_flavor, KernelName, Dims, ReductionsAndKernel...>(global_range, global_offset,
783✔
291
                    detail::no_local_size{}, std::make_index_sequence<sizeof...(reductions_and_kernel) - 1>{},
292
                    std::forward<ReductionsAndKernel>(reductions_and_kernel)...);
293
        }
783✔
294

295
        template <typename KernelName = detail::unnamed_kernel, int Dims, typename... ReductionsAndKernel>
296
        void parallel_for(celerity::nd_range<Dims> execution_range, ReductionsAndKernel&&... reductions_and_kernel) {
48✔
297
                static_assert(sizeof...(reductions_and_kernel) > 0, "No kernel given");
298
                parallel_for_reductions_and_kernel<detail::nd_range_kernel_flavor, KernelName, Dims, ReductionsAndKernel...>(execution_range.get_global_range(),
96✔
299
                    execution_range.get_offset(), execution_range.get_local_range(), std::make_index_sequence<sizeof...(reductions_and_kernel) - 1>{},
48✔
300
                    std::forward<ReductionsAndKernel>(reductions_and_kernel)...);
301
        }
45✔
302

303
        /**
304
         * Schedules `kernel` to execute on the master node only. Call via `cgh.host_task(celerity::on_master_node, []...)`. The kernel is assumed to be invocable
305
         * with the signature `void(const celerity::partition<0> &)` or `void()`.
306
         *
307
         * The kernel is executed in a background thread pool and multiple master node tasks may be executed concurrently if they are independent in the
308
         * task graph, so proper synchronization must be ensured.
309
         *
310
         * **Compatibility note:** This replaces master-access tasks from Celerity 0.1 which were executed on the master node's main thread, so this implementation
311
         * may require different lifetimes for captures. See `celerity::allow_by_ref` for more information on this topic.
312
         */
313
        template <typename Functor>
314
        void host_task(on_master_node_tag /* tag */, Functor&& kernel) {
3,320✔
315
                auto launcher = make_host_task_launcher<0, false>(detail::zeros, 0, std::forward<Functor>(kernel));
3,323✔
316
                create_master_node_task(std::move(launcher));
3,317✔
317
        }
6,634✔
318

319
        /**
320
         * Schedules `kernel` to be executed collectively on all nodes participating in the specified collective group. Call via
321
         * `cgh.host_task(celerity::experimental::collective, []...)` or  `cgh.host_task(celerity::experimental::collective(group), []...)`.
322
         * The kernel is assumed to be invocable with the signature `void(const celerity::experimental::collective_partition&)`
323
         * or `void(const celerity::partition<1>&)`.
324
         *
325
         * This provides framework to use arbitrary collective MPI operations in a host task, such as performing collective I/O with parallel HDF5.
326
         * The local node id,t the number of participating nodes as well as the group MPI communicator can be obtained from the `collective_partition` passed into
327
         * the kernel.
328
         *
329
         * All collective tasks within a collective group are guaranteed to be executed in the same order on all nodes, additionally, all internal MPI operations
330
         * and all host kernel invocations are executed in a single thread on each host.
331
         */
332
        template <typename Functor>
333
        void host_task(experimental::collective_tag tag, Functor&& kernel) {
61✔
334
                // FIXME: We should not have to know how the global range is determined for collective tasks to create the launcher
335
                auto launcher = make_host_task_launcher<1, true>(range<3>{m_num_collective_nodes, 1, 1}, tag.m_cgid, std::forward<Functor>(kernel));
61✔
336
                create_collective_task(tag.m_cgid, std::move(launcher));
61✔
337
        }
122✔
338

339
        /**
340
         * Schedules a distributed execution of `kernel` by splitting the iteration space in a runtime-defined manner. The kernel is assumed to be invocable
341
         * with the signature `void(const celerity::partition<Dims>&)`.
342
         *
343
         * The kernel is executed in a background thread pool with multiple host tasks being run concurrently if they are independent in the task graph,
344
         * so proper synchronization must be ensured. The partition passed into the kernel describes the split each host receives. It may be used with accessors
345
         * to obtain the per-node portion of a buffer en-bloc, see `celerity::accessor::get_allocation_window` for details.
346
         *
347
         * There are no guarantees with respect to the split size and the order in which host tasks are re-orered between nodes other than
348
         * the restrictions imposed by dependencies in the task graph. Also, the kernel may be invoked multiple times on one node and not be scheduled on
349
         * another node. If you need guarantees about execution order
350
         */
351
        template <int Dims, typename Functor>
352
        void host_task(range<Dims> global_range, id<Dims> global_offset, Functor&& kernel) {
73✔
353
                const detail::task_geometry geometry{
146✔
354
                    Dims, detail::range_cast<3>(global_range), detail::id_cast<3>(global_offset), get_constrained_granularity(global_range, range<Dims>(detail::ones))};
146✔
355
                auto launcher = make_host_task_launcher<Dims, false>(detail::range_cast<3>(global_range), 0, std::forward<Functor>(kernel));
73✔
356
                create_host_compute_task(geometry, std::move(launcher));
73✔
357
        }
146✔
358

359
        /**
360
         * Like `host_task(range<Dims> global_range, id<Dims> global_offset, Functor kernel)`, but with a `global_offset` of zero.
361
         */
362
        template <int Dims, typename Functor>
363
        void host_task(range<Dims> global_range, Functor&& kernel) {
70✔
364
                host_task(global_range, {}, std::forward<Functor>(kernel));
86✔
365
        }
70✔
366

367
  private:
368
        friend handler detail::make_command_group_handler(const detail::task_id tid, const size_t num_collective_nodes);
369
        friend std::unique_ptr<detail::task> detail::into_task(handler&& cgh);
370
        friend detail::hydration_id detail::add_requirement(handler& cgh, const detail::buffer_id bid, std::unique_ptr<detail::range_mapper_base> rm);
371
        friend void detail::add_requirement(handler& cgh, const detail::host_object_id hoid, const experimental::side_effect_order order, const bool is_void);
372
        friend void detail::add_reduction(handler& cgh, const detail::reduction_info& rinfo);
373
        template <int Dims>
374
        friend void experimental::constrain_split(handler& cgh, const range<Dims>& constraint);
375
        template <typename Hint>
376
        friend void experimental::hint(handler& cgh, Hint&& hint);
377
        friend void detail::set_task_name(handler& cgh, const std::string& debug_name);
378

379
        detail::task_id m_tid;
380
        detail::buffer_access_map m_access_map;
381
        detail::side_effect_map m_side_effects;
382
        size_t m_non_void_side_effects_count = 0;
383
        detail::reduction_set m_reductions;
384
        std::unique_ptr<detail::task> m_task = nullptr;
385
        size_t m_num_collective_nodes;
386
        detail::hydration_id m_next_accessor_hydration_id = 1;
387
        std::optional<std::string> m_usr_def_task_name;
388
        range<3> m_split_constraint = detail::ones;
389
        std::vector<std::unique_ptr<detail::hint_base>> m_hints;
390

391
        handler(detail::task_id tid, size_t num_collective_nodes) : m_tid(tid), m_num_collective_nodes(num_collective_nodes) {}
4,473✔
392

393
        template <typename KernelFlavor, typename KernelName, int Dims, typename... ReductionsAndKernel, size_t... ReductionIndices>
394
        void parallel_for_reductions_and_kernel(range<Dims> global_range, id<Dims> global_offset,
1,016✔
395
            typename detail::kernel_flavor_traits<KernelFlavor, Dims>::local_size_type local_size, std::index_sequence<ReductionIndices...> indices,
396
            ReductionsAndKernel&&... kernel_and_reductions) {
397
                auto args_tuple = std::forward_as_tuple(kernel_and_reductions...);
1,016✔
398
                auto&& kernel = std::get<sizeof...(kernel_and_reductions) - 1>(args_tuple);
1,016✔
399
                parallel_for_kernel_and_reductions<KernelFlavor, KernelName>(
1,016✔
400
                    global_range, global_offset, local_size, std::forward<decltype(kernel)>(kernel), std::get<ReductionIndices>(args_tuple)...);
401
        }
1,002✔
402

403
        template <typename KernelFlavor, typename KernelName, int Dims, typename Kernel, typename... Reductions>
404
        void parallel_for_kernel_and_reductions(range<Dims> global_range, id<Dims> global_offset,
1,016✔
405
            typename detail::kernel_flavor_traits<KernelFlavor, Dims>::local_size_type local_range, Kernel&& kernel, Reductions&... reductions) {
406
                range<3> granularity = {1, 1, 1};
1,016✔
407
                if constexpr(detail::kernel_flavor_traits<KernelFlavor, Dims>::has_local_size) {
408
                        for(int d = 0; d < Dims; ++d) {
133✔
409
                                granularity[d] = local_range[d];
85✔
410
                        }
411
                }
412
                const detail::task_geometry geometry{Dims, detail::range_cast<3>(global_range), detail::id_cast<3>(global_offset),
1,016✔
413
                    get_constrained_granularity(global_range, detail::range_cast<Dims>(granularity))};
1,016✔
414
                auto launcher = make_device_kernel_launcher<KernelFlavor, KernelName, Dims>(
1,006✔
415
                    global_range, global_offset, local_range, std::forward<Kernel>(kernel), std::index_sequence_for<Reductions...>(), reductions...);
416
                create_device_compute_task(geometry, detail::kernel_debug_name<KernelName>(), std::move(launcher));
1,002✔
417
        }
2,004✔
418

419
        [[nodiscard]] detail::hydration_id add_requirement(const detail::buffer_id bid, std::unique_ptr<detail::range_mapper_base> rm) {
3,605✔
420
                assert(m_task == nullptr);
3,605✔
421
                m_access_map.add_access(bid, std::move(rm));
3,605✔
422
                return m_next_accessor_hydration_id++;
3,605✔
423
        }
424

425
        void add_requirement(const detail::host_object_id hoid, const experimental::side_effect_order order, const bool is_void) {
73✔
426
                assert(m_task == nullptr);
73✔
427
                m_side_effects.add_side_effect(hoid, order);
73✔
428
                if(!is_void) { m_non_void_side_effects_count++; }
73✔
429
        }
73✔
430

431
        void add_reduction(const detail::reduction_info& rinfo) {
123✔
432
                assert(m_task == nullptr);
123✔
433
                m_reductions.push_back(rinfo);
123✔
434
        }
123✔
435

436
        template <int Dims>
437
        void experimental_constrain_split(const range<Dims>& constraint) {
18✔
438
                assert(m_task == nullptr);
18✔
439
                m_split_constraint = detail::range_cast<3>(constraint);
18✔
440
        }
18✔
441

442
        template <typename Hint>
443
        void experimental_hint(Hint&& hint) {
74✔
444
                static_assert(std::is_base_of_v<detail::hint_base, std::decay_t<Hint>>, "Hint must extend hint_base");
445
                static_assert(std::is_move_constructible_v<Hint>, "Hint must be move-constructible");
446
                for(auto& h : m_hints) {
87✔
447
                        // We currently don't allow more than one hint of the same type for simplicity; this could be loosened in the future.
448
                        auto& hr = *h; // Need to do this here to avoid -Wpotentially-evaluated-expression
17✔
449
                        if(typeid(hr) == typeid(hint)) { throw std::runtime_error("Providing more than one hint of the same type is not allowed"); }
17✔
450
                        h->validate(hint);
16✔
451
                }
452
                m_hints.emplace_back(std::make_unique<std::decay_t<Hint>>(std::forward<Hint>(hint)));
70✔
453
        }
70✔
454

455
        template <int Dims>
456
        range<3> get_constrained_granularity(const range<Dims>& global_size, const range<Dims>& granularity) const {
1,089✔
457
                range<3> result = detail::range_cast<3>(granularity);
1,089✔
458
                for(int i = 0; i < Dims; ++i) {
2,499✔
459
                        const auto lcm = std::lcm(granularity[i], m_split_constraint[i]);
1,413✔
460
                        if(lcm == 0) { throw std::runtime_error("Split constraint cannot be 0"); }
1,413✔
461
                        result[i] = lcm;
1,410✔
462
                }
463
                if(global_size % detail::range_cast<Dims>(result) != range<Dims>(detail::zeros)) {
1,086✔
464
                        throw std::runtime_error(fmt::format("The{}split constraint {} does not evenly divide the kernel global size {}",
28✔
465
                            granularity.size() > 1 ? " effective " : " ", detail::range_cast<Dims>(result), global_size));
21✔
466
                }
467
                return result;
1,079✔
468
        }
469

470
        void create_host_compute_task(const detail::task_geometry& geometry, detail::host_task_launcher launcher) {
73✔
471
                assert(m_task == nullptr);
73✔
472
                if(geometry.global_size.size() == 0) {
73!
473
                        // TODO this can be easily supported by not creating a task in case the execution range is empty
UNCOV
474
                        throw std::runtime_error{"The execution range of distributed host tasks must have at least one item"};
×
475
                }
476
                m_task =
73✔
477
                    detail::task::make_host_compute(m_tid, geometry, std::move(launcher), std::move(m_access_map), std::move(m_side_effects), std::move(m_reductions));
73✔
478

479
                m_task->set_debug_name(m_usr_def_task_name.value_or(""));
73✔
480
        }
73✔
481

482
        void create_device_compute_task(const detail::task_geometry& geometry, const std::string& debug_name, detail::device_kernel_launcher launcher) {
1,002✔
483
                assert(m_task == nullptr);
1,002✔
484
                if(geometry.global_size.size() == 0) {
1,002!
485
                        // TODO unless reductions are involved, this can be easily supported by not creating a task in case the execution range is empty.
486
                        // Edge case: If the task includes reductions that specify property::reduction::initialize_to_identity, we need to create a task that sets
487
                        // the buffer state to an empty pending_reduction_state in the graph_generator. This will cause a trivial reduction_command to be generated on
488
                        // each node that reads from the reduction output buffer, initializing it to the identity value locally.
UNCOV
489
                        throw std::runtime_error{"The execution range of device tasks must have at least one item"};
×
490
                }
491
                // Note that cgf_diagnostics has a similar check, but we don't catch void side effects there.
492
                if(!m_side_effects.empty()) { throw std::runtime_error{"Side effects cannot be used in device kernels"}; }
1,002!
493
                m_task = detail::task::make_device_compute(m_tid, geometry, std::move(launcher), std::move(m_access_map), std::move(m_reductions));
1,002✔
494

495
                m_task->set_debug_name(m_usr_def_task_name.value_or(debug_name));
1,002✔
496
        }
1,002✔
497

498
        void create_collective_task(const detail::collective_group_id cgid, detail::host_task_launcher launcher) {
61✔
499
                assert(m_task == nullptr);
61✔
500
                m_task = detail::task::make_collective(m_tid, cgid, m_num_collective_nodes, std::move(launcher), std::move(m_access_map), std::move(m_side_effects));
61✔
501

502
                m_task->set_debug_name(m_usr_def_task_name.value_or(""));
61✔
503
        }
61✔
504

505
        void create_master_node_task(detail::host_task_launcher launcher) {
3,317✔
506
                assert(m_task == nullptr);
3,317✔
507
                m_task = detail::task::make_master_node(m_tid, std::move(launcher), std::move(m_access_map), std::move(m_side_effects));
3,317✔
508

509
                m_task->set_debug_name(m_usr_def_task_name.value_or(""));
3,317✔
510
        }
3,317✔
511

512
        template <typename KernelFlavor, typename KernelName, int Dims, typename Kernel, size_t... ReductionIndices, typename... Reductions>
513
        detail::device_kernel_launcher make_device_kernel_launcher(const range<Dims>& global_range, const id<Dims>& global_offset,
1,006✔
514
            typename detail::kernel_flavor_traits<KernelFlavor, Dims>::local_size_type local_range, Kernel&& kernel,
515
            std::index_sequence<ReductionIndices...> /* indices */, Reductions... reductions) {
516
                static_assert(std::is_copy_constructible_v<std::decay_t<Kernel>>, "Kernel functor must be copyable"); // Required for hydration
517

518
                // Check whether all accessors are being captured by value etc.
519
                // Although the diagnostics should always be available, we currently disable them for some test cases.
520
                if(detail::cgf_diagnostics::is_available()) { detail::cgf_diagnostics::get_instance().check<target::device>(kernel, m_access_map); }
1,006✔
521

522
                return [=](sycl::handler& sycl_cgh, const detail::box<3>& execution_range, const std::vector<void*>& reduction_ptrs) {
1,534!
523
                        constexpr int sycl_dims = std::max(1, Dims);
376✔
524
                        if constexpr(std::is_same_v<KernelFlavor, detail::simple_kernel_flavor>) {
525
                                const auto sycl_global_range = sycl::range<sycl_dims>(detail::range_cast<sycl_dims>(execution_range.get_range()));
313✔
526
                                detail::invoke_sycl_parallel_for<KernelName>(sycl_cgh, sycl_global_range,
922✔
527
                                    detail::make_sycl_reduction(reductions, reduction_ptrs[ReductionIndices])...,
71✔
528
                                    detail::bind_simple_kernel(kernel, global_range, global_offset, detail::id_cast<Dims>(execution_range.get_offset())));
626✔
529
                        } else if constexpr(std::is_same_v<KernelFlavor, detail::nd_range_kernel_flavor>) {
530
                                const auto sycl_global_range = sycl::range<sycl_dims>(detail::range_cast<sycl_dims>(execution_range.get_range()));
63✔
531
                                const auto sycl_local_range = sycl::range<sycl_dims>(detail::range_cast<sycl_dims>(local_range));
63✔
532
                                detail::invoke_sycl_parallel_for<KernelName>(sycl_cgh, sycl::nd_range{sycl_global_range, sycl_local_range},
218✔
533
                                    detail::make_sycl_reduction(reductions, reduction_ptrs[ReductionIndices])...,
6✔
534
                                    detail::bind_nd_range_kernel(kernel, global_range, global_offset, detail::id_cast<Dims>(execution_range.get_offset()),
165✔
535
                                        global_range / local_range, detail::id_cast<Dims>(execution_range.get_offset()) / local_range));
189✔
536
                        } else {
537
                                static_assert(detail::constexpr_false<KernelFlavor>);
538
                        }
539
                };
2,158✔
540
        }
541

542
        template <int Dims, bool Collective, typename Kernel>
543
        detail::host_task_launcher make_host_task_launcher(const range<3>& global_range, const detail::collective_group_id cgid, Kernel&& kernel) {
3,454✔
544
                static_assert(Collective || std::is_invocable_v<Kernel> || std::is_invocable_v<Kernel, const partition<Dims>>,
545
                    "Kernel for host task must be invocable with either no arguments or a celerity::partition<Dims>");
546
                static_assert(!Collective || std::is_invocable_v<Kernel> || std::is_invocable_v<Kernel, const experimental::collective_partition>,
547
                    "Kernel for collective host task must be invocable with either no arguments or a celerity::experimental::collective_partition");
548
                static_assert(std::is_copy_constructible_v<std::decay_t<Kernel>>, "Kernel functor must be copyable"); // Required for hydration
549
                static_assert(Dims >= 0);
550

551
                // Check whether all accessors are being captured by value etc.
552
                // Although the diagnostics should always be available, we currently disable them for some test cases.
553
                if(detail::cgf_diagnostics::is_available()) {
3,454✔
554
                        detail::cgf_diagnostics::get_instance().check<target::host_task>(kernel, m_access_map, m_non_void_side_effects_count);
3,203✔
555
                }
556

557
                return [kernel, global_range](const detail::box<3>& execution_range, const detail::communicator* collective_comm) {
11,883✔
558
                        (void)global_range;
559
                        (void)collective_comm;
560
                        if constexpr(Dims > 0) {
561
                                if constexpr(Collective) {
562
                                        static_assert(Dims == 1);
563
                                        assert(collective_comm != nullptr);
26✔
564
                                        const auto part =
52✔
565
                                            detail::make_collective_partition(detail::range_cast<1>(global_range), detail::box_cast<1>(execution_range), *collective_comm);
52✔
566
                                        kernel(part);
26✔
567
                                } else {
568
                                        const auto part = detail::make_partition<Dims>(detail::range_cast<Dims>(global_range), detail::box_cast<Dims>(execution_range));
14✔
569
                                        kernel(part);
14✔
570
                                }
571
                        } else if constexpr(std::is_invocable_v<Kernel, const partition<0>&>) {
572
                                (void)execution_range;
573
                                const auto part = detail::make_partition<0>(range<0>(), subrange<0>());
3✔
574
                                kernel(part);
3✔
575
                        } else {
576
                                (void)execution_range;
577
                                kernel();
3,112✔
578
                        }
579
                };
9,069✔
580
        }
581

582
        std::unique_ptr<detail::task> into_task() && {
4,453✔
583
                assert(m_task != nullptr);
4,453✔
584
                for(auto& h : m_hints) {
4,523✔
585
                        m_task->add_hint(std::move(h));
70✔
586
                }
587
                return std::move(m_task);
4,453✔
588
        }
589
};
590

591
namespace detail {
592

593
        inline handler make_command_group_handler(const detail::task_id tid, const size_t num_collective_nodes) { return handler(tid, num_collective_nodes); }
4,473✔
594

595
        inline std::unique_ptr<detail::task> into_task(handler&& cgh) { return std::move(cgh).into_task(); }
4,453✔
596

597
        [[nodiscard]] inline hydration_id add_requirement(handler& cgh, const buffer_id bid, std::unique_ptr<range_mapper_base> rm) {
3,605✔
598
                return cgh.add_requirement(bid, std::move(rm));
7,210✔
599
        }
600

601
        inline void add_requirement(handler& cgh, const host_object_id hoid, const experimental::side_effect_order order, const bool is_void) {
73✔
602
                return cgh.add_requirement(hoid, order, is_void);
73✔
603
        }
604

605
        inline void add_reduction(handler& cgh, const detail::reduction_info& rinfo) { return cgh.add_reduction(rinfo); }
123✔
606

607
        inline void set_task_name(handler& cgh, const std::string& debug_name) { cgh.m_usr_def_task_name = {debug_name}; }
320✔
608

609
        // TODO: The _impl functions in detail only exist during the grace period for deprecated reductions on const buffers; move outside again afterwards.
610
        template <typename DataT, int Dims, typename BinaryOperation>
611
        auto reduction_impl(const buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
48✔
612
                static_assert(sycl::has_known_identity_v<BinaryOperation, DataT>,
613
                    "Celerity does not currently support reductions without an identity. Either specialize "
614
                    "sycl::known_identity or use the reduction() overload taking an identity at runtime");
615
                return detail::make_reduction<false>(vars, cgh, combiner, sycl::known_identity_v<BinaryOperation, DataT>, prop_list);
48✔
616
        }
617

618
        template <typename DataT, int Dims, typename BinaryOperation>
619
        auto reduction_impl(
6✔
620
            const buffer<DataT, Dims>& vars, handler& cgh, const DataT identity, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
621
                static_assert(!sycl::has_known_identity_v<BinaryOperation, DataT>, "Identity is known to SYCL, remove the identity parameter from reduction()");
622
                return detail::make_reduction<true>(vars, cgh, combiner, identity, prop_list);
6✔
623
        }
624

625
} // namespace detail
626

627
template <typename DataT, int Dims, typename BinaryOperation>
628
auto reduction(buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
47✔
629
        return detail::reduction_impl(vars, cgh, combiner, prop_list);
47✔
630
}
631

632
template <typename DataT, int Dims, typename BinaryOperation>
633
auto reduction(buffer<DataT, Dims>& vars, handler& cgh, const DataT identity, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
6✔
634
        return detail::reduction_impl(vars, cgh, identity, combiner, prop_list);
6✔
635
}
636

637
template <typename DataT, int Dims, typename BinaryOperation>
638
[[deprecated("Creating reduction from const buffer is deprecated, capture buffer by reference instead")]] auto reduction(
1✔
639
    const buffer<DataT, Dims>& vars, handler& cgh, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
640
        return detail::reduction_impl(vars, cgh, combiner, prop_list);
1✔
641
}
642

643
template <typename DataT, int Dims, typename BinaryOperation>
644
[[deprecated("Creating reduction from const buffer is deprecated, capture buffer by reference instead")]] auto reduction(
645
    const buffer<DataT, Dims>& vars, handler& cgh, const DataT identity, BinaryOperation combiner, const sycl::property_list& prop_list = {}) {
646
        return detail::reduction_impl(vars, cgh, identity, combiner, prop_list);
647
}
648

649
} // namespace celerity
650

651
namespace celerity::experimental {
652
template <int Dims>
653
void constrain_split(handler& cgh, const range<Dims>& constraint) {
18✔
654
        cgh.experimental_constrain_split(constraint);
18✔
655
}
18✔
656

657
template <typename Hint>
658
void hint(handler& cgh, Hint&& hint) {
74✔
659
        cgh.experimental_hint(std::forward<Hint>(hint));
74✔
660
}
70✔
661
} // namespace celerity::experimental
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc