• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

celerity / celerity-runtime / 9945915519

15 Jul 2024 08:07PM UTC coverage: 93.077% (-1.3%) from 94.362%
9945915519

push

github

fknorr
Rename existing backend / executor -> legacy_backend / legacy_executor

Names 'backend' and 'executor' will be re-used, but we want to keep the
old APIs around in the meantime to keep changesets small.

3188 of 3687 branches covered (86.47%)

Branch coverage included in aggregate %.

17 of 23 new or added lines in 6 files covered. (73.91%)

95 existing lines in 8 files now uncovered.

7232 of 7508 relevant lines covered (96.32%)

169246.64 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

80.34
/include/task.h
1
#pragma once
2

3
#include <memory>
4
#include <unordered_map>
5
#include <unordered_set>
6
#include <utility>
7
#include <vector>
8

9
#include "device_queue.h"
10
#include "grid.h"
11
#include "hint.h"
12
#include "host_queue.h"
13
#include "intrusive_graph.h"
14
#include "launcher.h"
15
#include "lifetime_extending_state.h"
16
#include "range_mapper.h"
17
#include "types.h"
18

19
namespace celerity {
20

21
class handler;
22

23
namespace detail {
24

25
        class command_launcher_storage_base {
26
          public:
27
                command_launcher_storage_base() = default;
4,409✔
28
                command_launcher_storage_base(const command_launcher_storage_base&) = delete;
29
                command_launcher_storage_base(command_launcher_storage_base&&) = default;
30
                command_launcher_storage_base& operator=(const command_launcher_storage_base&) = delete;
31
                command_launcher_storage_base& operator=(command_launcher_storage_base&&) = default;
32
                virtual ~command_launcher_storage_base() = default;
4,409✔
33

34
                virtual sycl::event operator()(
35
                    device_queue& q, const subrange<3> execution_sr, const std::vector<void*>& reduction_ptrs, const bool is_reduction_initializer) const = 0;
36
                virtual std::future<host_queue::execution_info> operator()(host_queue& q, const subrange<3>& execution_sr) const = 0;
37
        };
38

39
        template <typename Functor>
40
        class command_launcher_storage : public command_launcher_storage_base {
41
          public:
42
                command_launcher_storage(Functor&& fun) : m_fun(std::move(fun)) {}
4,409✔
43

44
                sycl::event operator()(
169✔
45
                    device_queue& q, const subrange<3> execution_sr, const std::vector<void*>& reduction_ptrs, const bool is_reduction_initializer) const override {
46
                        return invoke<sycl::event>(q, execution_sr, reduction_ptrs, is_reduction_initializer);
169✔
47
                }
48

49
                std::future<host_queue::execution_info> operator()(host_queue& q, const subrange<3>& execution_sr) const override {
3,157✔
50
                        return invoke<std::future<host_queue::execution_info>>(q, execution_sr);
3,157✔
51
                }
52

53
          private:
54
                Functor m_fun;
55

56
                template <typename Ret, typename... Args>
57
                Ret invoke(Args&&... args) const {
3,326✔
58
                        if constexpr(std::is_invocable_v<Functor, Args...>) {
59
                                return m_fun(args...);
3,326✔
60
                        } else {
UNCOV
61
                                throw std::runtime_error("Cannot launch command function with provided arguments");
×
62
                        }
63
                }
64
        };
65

66
        class buffer_access_map {
67
          public:
68
                void add_access(buffer_id bid, std::unique_ptr<range_mapper_base>&& rm) { m_accesses.emplace_back(bid, std::move(rm)); }
3,572✔
69

70
                std::unordered_set<buffer_id> get_accessed_buffers() const;
71
                std::unordered_set<cl::sycl::access::mode> get_access_modes(buffer_id bid) const;
72
                size_t get_num_accesses() const { return m_accesses.size(); }
22,473✔
73
                std::pair<buffer_id, access_mode> get_nth_access(const size_t n) const {
2,667✔
74
                        const auto& [bid, rm] = m_accesses[n];
2,667✔
75
                        return {bid, rm->get_access_mode()};
2,667✔
76
                }
77

78
                /**
79
                 * @brief Computes the combined access-region for a given buffer, mode and subrange.
80
                 *
81
                 * @param bid
82
                 * @param mode
83
                 * @param sr The subrange to be passed to the range mappers (extended to a chunk using the global size of the task)
84
                 *
85
                 * @returns The region obtained by merging the results of all range-mappers for this buffer and mode
86
                 */
87
                region<3> get_mode_requirements(
88
                    const buffer_id bid, const access_mode mode, const int kernel_dims, const subrange<3>& sr, const range<3>& global_size) const;
89

90
                box<3> get_requirements_for_nth_access(const size_t n, const int kernel_dims, const subrange<3>& sr, const range<3>& global_size) const;
91

92
                std::vector<const range_mapper_base*> get_range_mappers(const buffer_id bid) const {
93
                        std::vector<const range_mapper_base*> rms;
94
                        for(const auto& [a_bid, a_rm] : m_accesses) {
95
                                if(a_bid == bid) { rms.push_back(a_rm.get()); }
96
                        }
97
                        return rms;
98
                }
99

100
                box_vector<3> get_required_contiguous_boxes(const buffer_id bid, const int kernel_dims, const subrange<3>& sr, const range<3>& global_size) const;
101

102
          private:
103
                std::vector<std::pair<buffer_id, std::unique_ptr<range_mapper_base>>> m_accesses;
104
        };
105

106
        using reduction_set = std::vector<reduction_info>;
107

108
        class side_effect_map : private std::unordered_map<host_object_id, experimental::side_effect_order> {
109
          private:
110
                using map_base = std::unordered_map<host_object_id, experimental::side_effect_order>;
111

112
          public:
113
                using typename map_base::const_iterator, map_base::value_type, map_base::key_type, map_base::mapped_type, map_base::const_reference,
114
                    map_base::const_pointer;
115
                using iterator = const_iterator;
116
                using reference = const_reference;
117
                using pointer = const_pointer;
118

119
                using map_base::size, map_base::count, map_base::empty, map_base::cbegin, map_base::cend, map_base::at;
120

121
                iterator begin() const { return cbegin(); }
5,789✔
122
                iterator end() const { return cend(); }
5,787✔
123
                iterator find(host_object_id key) const { return map_base::find(key); }
124

125
                void add_side_effect(host_object_id hoid, experimental::side_effect_order order);
126
        };
127

128
        class fence_promise {
129
          public:
130
                fence_promise() = default;
42✔
131
                fence_promise(const fence_promise&) = delete;
132
                fence_promise& operator=(const fence_promise&) = delete;
133
                virtual ~fence_promise() = default;
42✔
134

135
                virtual void fulfill() = 0;
136
                virtual allocation_id get_user_allocation_id() = 0;
137
        };
138

139
        struct task_geometry {
140
                int dimensions = 0;
141
                range<3> global_size{1, 1, 1};
142
                id<3> global_offset{};
143
                range<3> granularity{1, 1, 1};
144
        };
145

146
        class task : public intrusive_graph_node<task> {
147
          public:
148
                task_type get_type() const { return m_type; }
36,208✔
149

150
                task_id get_id() const { return m_tid; }
47,765✔
151

152
                collective_group_id get_collective_group_id() const { return m_cgid; }
13,275✔
153

154
                const buffer_access_map& get_buffer_access_map() const { return m_access_map; }
42,906✔
155

156
                const side_effect_map& get_side_effect_map() const { return m_side_effects; }
14,986✔
157

158
                const task_geometry& get_geometry() const { return m_geometry; }
3,941✔
159

160
                int get_dimensions() const { return m_geometry.dimensions; }
58,862✔
161

162
                range<3> get_global_size() const { return m_geometry.global_size; }
99,235✔
163

164
                id<3> get_global_offset() const { return m_geometry.global_offset; }
28,935✔
165

166
                range<3> get_granularity() const { return m_geometry.granularity; }
6,952✔
167

168
                void set_debug_name(const std::string& debug_name) { m_debug_name = debug_name; }
4,409✔
169
                const std::string& get_debug_name() const { return m_debug_name; }
4,923✔
170

171
                bool has_variable_split() const { return m_type == task_type::host_compute || m_type == task_type::device_compute; }
5,143✔
172

173
                execution_target get_execution_target() const {
9,075✔
174
                        switch(m_type) {
9,075!
UNCOV
175
                        case task_type::epoch: return execution_target::none;
×
176
                        case task_type::device_compute: return execution_target::device;
2,087✔
177
                        case task_type::host_compute:
6,988✔
178
                        case task_type::collective:
179
                        case task_type::master_node: return execution_target::host;
6,988✔
UNCOV
180
                        case task_type::horizon:
×
UNCOV
181
                        case task_type::fence: return execution_target::none;
×
UNCOV
182
                        default: assert(!"Unhandled task type"); return execution_target::none;
×
183
                        }
184
                }
185

186
                const reduction_set& get_reductions() const { return m_reductions; }
51,506✔
187

188
                epoch_action get_epoch_action() const { return m_epoch_action; }
417✔
189

190
                fence_promise* get_fence_promise() const { return m_fence_promise.get(); }
69✔
191

192
                template <typename Launcher>
193
                Launcher get_launcher() const {
466✔
194
                        return {};
466✔
195
                } // placeholder
196

197
                template <typename... Args>
198
                auto launch(Args&&... args) const {
3,704✔
199
                        return (*m_launcher)(std::forward<Args>(args)...);
3,704✔
200
                }
201

202
                void extend_lifetime(std::shared_ptr<lifetime_extending_state> state) { m_attached_state.emplace_back(std::move(state)); }
2,396✔
203

204
                void add_hint(std::unique_ptr<hint_base>&& h) { m_hints.emplace_back(std::move(h)); }
46✔
205

206
                template <typename Hint>
207
                const Hint* get_hint() const {
3,397✔
208
                        static_assert(std::is_base_of_v<hint_base, Hint>, "Hint must extend hint_base");
209
                        for(auto& h : m_hints) {
3,484✔
210
                                if(auto* ptr = dynamic_cast<Hint*>(h.get()); ptr != nullptr) { return ptr; }
139!
211
                        }
212
                        return nullptr;
3,345✔
213
                }
214

215
                static std::unique_ptr<task> make_epoch(task_id tid, detail::epoch_action action) {
916✔
216
                        return std::unique_ptr<task>(new task(tid, task_type::epoch, non_collective_group_id, task_geometry{}, nullptr, {}, {}, {}, action, nullptr));
2,748!
217
                }
218

219
                static std::unique_ptr<task> make_host_compute(task_id tid, task_geometry geometry, std::unique_ptr<command_launcher_storage_base> launcher,
73✔
220
                    buffer_access_map access_map, side_effect_map side_effect_map, reduction_set reductions) {
221
                        return std::unique_ptr<task>(new task(tid, task_type::host_compute, non_collective_group_id, geometry, std::move(launcher), std::move(access_map),
146✔
222
                            std::move(side_effect_map), std::move(reductions), {}, nullptr));
219!
223
                }
224

225
                static std::unique_ptr<task> make_device_compute(task_id tid, task_geometry geometry, std::unique_ptr<command_launcher_storage_base> launcher,
957✔
226
                    buffer_access_map access_map, reduction_set reductions) {
227
                        return std::unique_ptr<task>(new task(tid, task_type::device_compute, non_collective_group_id, geometry, std::move(launcher), std::move(access_map),
1,914✔
228
                            {}, std::move(reductions), {}, nullptr));
2,871!
229
                }
230

231
                static std::unique_ptr<task> make_collective(task_id tid, collective_group_id cgid, size_t num_collective_nodes,
59✔
232
                    std::unique_ptr<command_launcher_storage_base> launcher, buffer_access_map access_map, side_effect_map side_effect_map) {
233
                        const task_geometry geometry{1, detail::range_cast<3>(range(num_collective_nodes)), {}, {1, 1, 1}};
59✔
234
                        return std::unique_ptr<task>(
235
                            new task(tid, task_type::collective, cgid, geometry, std::move(launcher), std::move(access_map), std::move(side_effect_map), {}, {}, nullptr));
118!
236
                }
237

238
                static std::unique_ptr<task> make_master_node(
3,320✔
239
                    task_id tid, std::unique_ptr<command_launcher_storage_base> launcher, buffer_access_map access_map, side_effect_map side_effect_map) {
240
                        return std::unique_ptr<task>(new task(tid, task_type::master_node, non_collective_group_id, task_geometry{}, std::move(launcher),
3,320✔
241
                            std::move(access_map), std::move(side_effect_map), {}, {}, nullptr));
13,280!
242
                }
243

244
                static std::unique_ptr<task> make_horizon(task_id tid) {
1,070✔
245
                        return std::unique_ptr<task>(new task(tid, task_type::horizon, non_collective_group_id, task_geometry{}, nullptr, {}, {}, {}, {}, nullptr));
3,210!
246
                }
247

248
                static std::unique_ptr<task> make_fence(
60✔
249
                    task_id tid, buffer_access_map access_map, side_effect_map side_effect_map, std::unique_ptr<fence_promise> fence_promise) {
250
                        return std::unique_ptr<task>(new task(tid, task_type::fence, non_collective_group_id, task_geometry{}, nullptr, std::move(access_map),
60✔
251
                            std::move(side_effect_map), {}, {}, std::move(fence_promise)));
240!
252
                }
253

254
          private:
255
                task_id m_tid;
256
                task_type m_type;
257
                collective_group_id m_cgid;
258
                task_geometry m_geometry;
259
                std::unique_ptr<command_launcher_storage_base> m_launcher;
260
                buffer_access_map m_access_map;
261
                detail::side_effect_map m_side_effects;
262
                reduction_set m_reductions;
263
                std::string m_debug_name;
264
                detail::epoch_action m_epoch_action;
265
                // TODO I believe that `struct task` should not store command_group_launchers, fence_promise or other state that is related to execution instead of
266
                // abstract DAG building. For user-initialized buffers we already notify the runtime -> executor of this state directly. Maybe also do that for these.
267
                std::unique_ptr<fence_promise> m_fence_promise;
268
                std::vector<std::shared_ptr<lifetime_extending_state>> m_attached_state;
269
                std::vector<std::unique_ptr<hint_base>> m_hints;
270

271
                task(task_id tid, task_type type, collective_group_id cgid, task_geometry geometry, std::unique_ptr<command_launcher_storage_base> launcher,
6,287✔
272
                    buffer_access_map access_map, detail::side_effect_map side_effects, reduction_set reductions, detail::epoch_action epoch_action,
273
                    std::unique_ptr<fence_promise> fence_promise)
274
                    : m_tid(tid), m_type(type), m_cgid(cgid), m_geometry(geometry), m_launcher(std::move(launcher)), m_access_map(std::move(access_map)),
6,287✔
275
                      m_side_effects(std::move(side_effects)), m_reductions(std::move(reductions)), m_epoch_action(epoch_action),
6,287✔
276
                      m_fence_promise(std::move(fence_promise)) {
12,574✔
277
                        assert(type == task_type::host_compute || type == task_type::device_compute || get_granularity().size() == 1);
6,287✔
278
                        // Only host tasks can have side effects
279
                        assert(this->m_side_effects.empty() || type == task_type::host_compute || type == task_type::collective || type == task_type::master_node
6,287✔
280
                               || type == task_type::fence);
281
                }
6,287✔
282
        };
283

284
        [[nodiscard]] std::string print_task_debug_label(const task& tsk, bool title_case = false);
285

286
        /// Determines which overlapping regions appear between write accesses when the iteration space of `tsk` is split into `chunks`.
287
        std::unordered_map<buffer_id, region<3>> detect_overlapping_writes(const task& tsk, const box_vector<3>& chunks);
288

289
} // namespace detail
290
} // namespace celerity
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc