• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

celerity / celerity-runtime / 10216674169

02 Aug 2024 01:45PM UTC coverage: 94.951% (+2.1%) from 92.884%
10216674169

push

github

fknorr
Remove experimental::user_benchmarker

user_benchmarker has been obsolete ever since we moved away from
structured logging as a the profiler (CPAT) interface.

2978 of 3372 branches covered (88.32%)

Branch coverage included in aggregate %.

6557 of 6670 relevant lines covered (98.31%)

1534446.4 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

78.85
/include/task.h
1
#pragma once
2

3
#include <memory>
4
#include <unordered_map>
5
#include <unordered_set>
6
#include <utility>
7
#include <vector>
8

9
#include "grid.h"
10
#include "hint.h"
11
#include "intrusive_graph.h"
12
#include "launcher.h"
13
#include "range_mapper.h"
14
#include "reduction.h"
15
#include "types.h"
16

17

18
namespace celerity {
19

20
class handler;
21

22
namespace detail {
23

24
        class buffer_access_map {
25
          public:
26
                void add_access(buffer_id bid, std::unique_ptr<range_mapper_base>&& rm) { m_accesses.emplace_back(bid, std::move(rm)); }
3,654✔
27

28
                std::unordered_set<buffer_id> get_accessed_buffers() const;
29
                std::unordered_set<cl::sycl::access::mode> get_access_modes(buffer_id bid) const;
30
                size_t get_num_accesses() const { return m_accesses.size(); }
26,822✔
31
                std::pair<buffer_id, access_mode> get_nth_access(const size_t n) const {
2,858✔
32
                        const auto& [bid, rm] = m_accesses[n];
2,858✔
33
                        return {bid, rm->get_access_mode()};
2,858✔
34
                }
35

36
                /**
37
                 * @brief Computes the combined access-region for a given buffer, mode and subrange.
38
                 *
39
                 * @param bid
40
                 * @param mode
41
                 * @param sr The subrange to be passed to the range mappers (extended to a chunk using the global size of the task)
42
                 *
43
                 * @returns The region obtained by merging the results of all range-mappers for this buffer and mode
44
                 */
45
                region<3> get_mode_requirements(
46
                    const buffer_id bid, const access_mode mode, const int kernel_dims, const subrange<3>& sr, const range<3>& global_size) const;
47

48
                box<3> get_requirements_for_nth_access(const size_t n, const int kernel_dims, const subrange<3>& sr, const range<3>& global_size) const;
49

50
                std::vector<const range_mapper_base*> get_range_mappers(const buffer_id bid) const {
51
                        std::vector<const range_mapper_base*> rms;
52
                        for(const auto& [a_bid, a_rm] : m_accesses) {
53
                                if(a_bid == bid) { rms.push_back(a_rm.get()); }
54
                        }
55
                        return rms;
56
                }
57

58
                box_vector<3> get_required_contiguous_boxes(const buffer_id bid, const int kernel_dims, const subrange<3>& sr, const range<3>& global_size) const;
59

60
          private:
61
                std::vector<std::pair<buffer_id, std::unique_ptr<range_mapper_base>>> m_accesses;
62
        };
63

64
        using reduction_set = std::vector<reduction_info>;
65

66
        class side_effect_map : private std::unordered_map<host_object_id, experimental::side_effect_order> {
67
          private:
68
                using map_base = std::unordered_map<host_object_id, experimental::side_effect_order>;
69

70
          public:
71
                using typename map_base::const_iterator, map_base::value_type, map_base::key_type, map_base::mapped_type, map_base::const_reference,
72
                    map_base::const_pointer;
73
                using iterator = const_iterator;
74
                using reference = const_reference;
75
                using pointer = const_pointer;
76

77
                using map_base::size, map_base::count, map_base::empty, map_base::cbegin, map_base::cend, map_base::at;
78

79
                iterator begin() const { return cbegin(); }
5,946✔
80
                iterator end() const { return cend(); }
5,927✔
81
                iterator find(host_object_id key) const { return map_base::find(key); }
82

83
                void add_side_effect(host_object_id hoid, experimental::side_effect_order order);
84
        };
85

86
        class fence_promise {
87
          public:
88
                fence_promise() = default;
69✔
89
                fence_promise(const fence_promise&) = delete;
90
                fence_promise& operator=(const fence_promise&) = delete;
91
                virtual ~fence_promise() = default;
69✔
92

93
                virtual void fulfill() = 0;
94
                virtual allocation_id get_user_allocation_id() = 0; // TODO move to struct task instead
95
        };
96

97
        struct task_geometry {
98
                int dimensions = 0;
99
                range<3> global_size{1, 1, 1};
100
                id<3> global_offset{};
101
                range<3> granularity{1, 1, 1};
102
        };
103

104
        class task : public intrusive_graph_node<task> {
105
          public:
106
                task_type get_type() const { return m_type; }
45,675✔
107

108
                task_id get_id() const { return m_tid; }
51,245✔
109

110
                collective_group_id get_collective_group_id() const { return m_cgid; }
21,897✔
111

112
                const buffer_access_map& get_buffer_access_map() const { return m_access_map; }
54,583✔
113

114
                const side_effect_map& get_side_effect_map() const { return m_side_effects; }
19,557✔
115

116
                const task_geometry& get_geometry() const { return m_geometry; }
3,913✔
117

118
                int get_dimensions() const { return m_geometry.dimensions; }
88,472✔
119

120
                range<3> get_global_size() const { return m_geometry.global_size; }
136,085✔
121

122
                id<3> get_global_offset() const { return m_geometry.global_offset; }
29,190✔
123

124
                range<3> get_granularity() const { return m_geometry.granularity; }
10,797✔
125

126
                void set_debug_name(const std::string& debug_name) { m_debug_name = debug_name; }
4,442✔
127
                const std::string& get_debug_name() const { return m_debug_name; }
9,083✔
128

129
                bool has_variable_split() const { return m_type == task_type::host_compute || m_type == task_type::device_compute; }
8,940✔
130

131
                execution_target get_execution_target() const {
17,447✔
132
                        switch(m_type) {
17,447!
133
                        case task_type::epoch: return execution_target::none;
×
134
                        case task_type::device_compute: return execution_target::device;
3,944✔
135
                        case task_type::host_compute:
13,503✔
136
                        case task_type::collective:
137
                        case task_type::master_node: return execution_target::host;
13,503✔
138
                        case task_type::horizon:
×
139
                        case task_type::fence: return execution_target::none;
×
140
                        default: assert(!"Unhandled task type"); return execution_target::none;
×
141
                        }
142
                }
143

144
                const reduction_set& get_reductions() const { return m_reductions; }
93,829✔
145

146
                epoch_action get_epoch_action() const { return m_epoch_action; }
754✔
147

148
                fence_promise* get_fence_promise() const { return m_fence_promise.get(); }
141✔
149

150
                template <typename Launcher>
151
                Launcher get_launcher() const {
4,599✔
152
                        return std::get<Launcher>(m_launcher);
4,599✔
153
                }
154

155
                void add_hint(std::unique_ptr<hint_base>&& h) { m_hints.emplace_back(std::move(h)); }
70✔
156

157
                template <typename Hint>
158
                const Hint* get_hint() const {
10,995✔
159
                        static_assert(std::is_base_of_v<hint_base, Hint>, "Hint must extend hint_base");
160
                        for(auto& h : m_hints) {
11,175✔
161
                                if(auto* ptr = dynamic_cast<Hint*>(h.get()); ptr != nullptr) { return ptr; }
259!
162
                        }
163
                        return nullptr;
10,916✔
164
                }
165

166
                static std::unique_ptr<task> make_epoch(task_id tid, detail::epoch_action action) {
979✔
167
                        return std::unique_ptr<task>(new task(tid, task_type::epoch, non_collective_group_id, task_geometry{}, {}, {}, {}, {}, action, nullptr));
2,937!
168
                }
169

170
                static std::unique_ptr<task> make_host_compute(task_id tid, task_geometry geometry, host_task_launcher launcher, buffer_access_map access_map,
73✔
171
                    side_effect_map side_effect_map, reduction_set reductions) {
172
                        return std::unique_ptr<task>(new task(tid, task_type::host_compute, non_collective_group_id, geometry, std::move(launcher), std::move(access_map),
146✔
173
                            std::move(side_effect_map), std::move(reductions), {}, nullptr));
219!
174
                }
175

176
                static std::unique_ptr<task> make_device_compute(
992✔
177
                    task_id tid, task_geometry geometry, device_kernel_launcher launcher, buffer_access_map access_map, reduction_set reductions) {
178
                        return std::unique_ptr<task>(new task(tid, task_type::device_compute, non_collective_group_id, geometry, std::move(launcher), std::move(access_map),
1,984✔
179
                            {}, std::move(reductions), {}, nullptr));
2,976!
180
                }
181

182
                static std::unique_ptr<task> make_collective(task_id tid, collective_group_id cgid, size_t num_collective_nodes, host_task_launcher launcher,
59✔
183
                    buffer_access_map access_map, side_effect_map side_effect_map) {
184
                        const task_geometry geometry{1, detail::range_cast<3>(range(num_collective_nodes)), {}, {1, 1, 1}};
59✔
185
                        return std::unique_ptr<task>(
186
                            new task(tid, task_type::collective, cgid, geometry, std::move(launcher), std::move(access_map), std::move(side_effect_map), {}, {}, nullptr));
118!
187
                }
188

189
                static std::unique_ptr<task> make_master_node(task_id tid, host_task_launcher launcher, buffer_access_map access_map, side_effect_map side_effect_map) {
3,318✔
190
                        return std::unique_ptr<task>(new task(tid, task_type::master_node, non_collective_group_id, task_geometry{}, std::move(launcher),
3,318✔
191
                            std::move(access_map), std::move(side_effect_map), {}, {}, nullptr));
13,272!
192
                }
193

194
                static std::unique_ptr<task> make_horizon(task_id tid) {
1,065✔
195
                        return std::unique_ptr<task>(new task(tid, task_type::horizon, non_collective_group_id, task_geometry{}, {}, {}, {}, {}, {}, nullptr));
3,195!
196
                }
197

198
                static std::unique_ptr<task> make_fence(
84✔
199
                    task_id tid, buffer_access_map access_map, side_effect_map side_effect_map, std::unique_ptr<fence_promise> fence_promise) {
200
                        return std::unique_ptr<task>(new task(tid, task_type::fence, non_collective_group_id, task_geometry{}, {}, std::move(access_map),
84✔
201
                            std::move(side_effect_map), {}, {}, std::move(fence_promise)));
336!
202
                }
203

204
          private:
205
                task_id m_tid;
206
                task_type m_type;
207
                collective_group_id m_cgid;
208
                task_geometry m_geometry;
209
                command_group_launcher m_launcher;
210
                buffer_access_map m_access_map;
211
                detail::side_effect_map m_side_effects;
212
                reduction_set m_reductions;
213
                std::string m_debug_name;
214
                detail::epoch_action m_epoch_action;
215
                // TODO I believe that `struct task` should not store command_group_launchers, fence_promise or other state that is related to execution instead of
216
                // abstract DAG building. For user-initialized buffers we already notify the runtime -> executor of this state directly. Maybe also do that for these.
217
                std::unique_ptr<fence_promise> m_fence_promise;
218
                std::vector<std::unique_ptr<hint_base>> m_hints;
219

220
                task(task_id tid, task_type type, collective_group_id cgid, task_geometry geometry, command_group_launcher launcher, buffer_access_map access_map,
6,402✔
221
                    detail::side_effect_map side_effects, reduction_set reductions, detail::epoch_action epoch_action, std::unique_ptr<fence_promise> fence_promise)
222
                    : m_tid(tid), m_type(type), m_cgid(cgid), m_geometry(geometry), m_launcher(std::move(launcher)), m_access_map(std::move(access_map)),
6,402✔
223
                      m_side_effects(std::move(side_effects)), m_reductions(std::move(reductions)), m_epoch_action(epoch_action),
6,402✔
224
                      m_fence_promise(std::move(fence_promise)) {
12,804✔
225
                        assert(type == task_type::host_compute || type == task_type::device_compute || get_granularity().size() == 1);
6,402✔
226
                        // Only host tasks can have side effects
227
                        assert(this->m_side_effects.empty() || type == task_type::host_compute || type == task_type::collective || type == task_type::master_node
6,402✔
228
                               || type == task_type::fence);
229
                }
6,402✔
230
        };
231

232
        [[nodiscard]] std::string print_task_debug_label(const task& tsk, bool title_case = false);
233

234
        /// Determines which overlapping regions appear between write accesses when the iteration space of `tsk` is split into `chunks`.
235
        std::unordered_map<buffer_id, region<3>> detect_overlapping_writes(const task& tsk, const box_vector<3>& chunks);
236

237
} // namespace detail
238
} // namespace celerity
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc