• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

daisytuner / sdfglib / 20764569418

06 Jan 2026 10:50PM UTC coverage: 62.168% (+21.4%) from 40.764%
20764569418

push

github

web-flow
Merge pull request #433 from daisytuner/clang-coverage

updates clang coverage flags

14988 of 24109 relevant lines covered (62.17%)

88.57 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.86
/src/data_flow/library_nodes/math/tensor/reduce_node.cpp
1
#include "sdfg/data_flow/library_nodes/math/tensor/reduce_node.h"
2

3
#include "sdfg/analysis/analysis.h"
4
#include "sdfg/analysis/scope_analysis.h"
5
#include "sdfg/builder/structured_sdfg_builder.h"
6
#include "sdfg/types/array.h"
7
#include "sdfg/types/pointer.h"
8

9
#include <algorithm>
10

11
namespace sdfg {
12
namespace math {
13
namespace tensor {
14

15
ReduceNode::ReduceNode(
16
    size_t element_id,
17
    const DebugInfo& debug_info,
18
    const graph::Vertex vertex,
19
    data_flow::DataFlowGraph& parent,
20
    const data_flow::LibraryNodeCode& code,
21
    const std::vector<symbolic::Expression>& shape,
22
    const std::vector<int64_t>& axes,
23
    bool keepdims
24
)
25
    : TensorNode(element_id, debug_info, vertex, parent, code, {"Y"}, {"X"}, data_flow::ImplementationType_NONE),
18✔
26
      shape_(shape), axes_(axes), keepdims_(keepdims) {}
18✔
27

28
symbolic::SymbolSet ReduceNode::symbols() const {
×
29
    symbolic::SymbolSet syms;
×
30
    for (const auto& dim : shape_) {
×
31
        for (auto& atom : symbolic::atoms(dim)) {
×
32
            syms.insert(atom);
×
33
        }
×
34
    }
×
35
    return syms;
×
36
}
×
37

38
void ReduceNode::replace(const symbolic::Expression old_expression, const symbolic::Expression new_expression) {
×
39
    for (auto& dim : shape_) {
×
40
        dim = symbolic::subs(dim, old_expression, new_expression);
×
41
    }
×
42
}
×
43

44
bool ReduceNode::expand(builder::StructuredSDFGBuilder& builder, analysis::AnalysisManager& analysis_manager) {
6✔
45
    auto& dataflow = this->get_parent();
6✔
46
    auto& block = static_cast<structured_control_flow::Block&>(*dataflow.get_parent());
6✔
47

48
    if (dataflow.in_degree(*this) != 1 || dataflow.out_degree(*this) != 1) {
6✔
49
        return false;
×
50
    }
×
51

52
    auto& scope_analysis = analysis_manager.get<analysis::ScopeAnalysis>();
6✔
53
    auto& parent = static_cast<structured_control_flow::Sequence&>(*scope_analysis.parent_scope(&block));
6✔
54
    int index = parent.index(block);
6✔
55
    auto& transition = parent.at(index).second;
6✔
56

57
    auto& iedge = *dataflow.in_edges(*this).begin();
6✔
58
    auto& oedge = *dataflow.out_edges(*this).begin();
6✔
59

60
    auto& input_node = static_cast<data_flow::AccessNode&>(iedge.src());
6✔
61
    auto& output_node = static_cast<data_flow::AccessNode&>(oedge.dst());
6✔
62

63
    if (dataflow.in_degree(input_node) != 0 || dataflow.out_degree(output_node) != 0) {
6✔
64
        return false;
×
65
    }
×
66

67
    // Calculate output shape
68
    std::vector<symbolic::Expression> output_shape;
6✔
69
    std::vector<int64_t> sorted_axes = axes_;
6✔
70
    std::sort(sorted_axes.begin(), sorted_axes.end());
6✔
71

72
    for (size_t i = 0; i < shape_.size(); ++i) {
18✔
73
        bool is_axis = false;
12✔
74
        for (auto axis : sorted_axes) {
14✔
75
            if (axis == (int64_t) i) {
14✔
76
                is_axis = true;
7✔
77
                break;
7✔
78
            }
7✔
79
        }
14✔
80

81
        if (is_axis) {
12✔
82
            if (keepdims_) {
7✔
83
                output_shape.push_back(symbolic::one());
1✔
84
            }
1✔
85
        } else {
7✔
86
            output_shape.push_back(shape_[i]);
5✔
87
        }
5✔
88
    }
12✔
89

90
    sdfg::types::Scalar element_type(oedge.base_type().primitive_type());
6✔
91

92
    // Add new sequence
93
    auto& new_sequence = builder.add_sequence_before(parent, block, transition.assignments(), block.debug_info());
6✔
94

95
    // 1. Initialization Loop
96
    {
6✔
97
        symbolic::Expression init_expr = symbolic::zero();
6✔
98
        structured_control_flow::Sequence* last_scope = &new_sequence;
6✔
99
        structured_control_flow::Map* last_map = nullptr;
6✔
100

101
        for (size_t i = 0; i < output_shape.size(); ++i) {
12✔
102
            std::string indvar_str = builder.find_new_name("_i_init");
6✔
103
            builder.add_container(indvar_str, types::Scalar(types::PrimitiveType::Int64));
6✔
104

105
            auto indvar = symbolic::symbol(indvar_str);
6✔
106
            auto init = symbolic::zero();
6✔
107
            auto update = symbolic::add(indvar, symbolic::one());
6✔
108
            auto condition = symbolic::Lt(indvar, output_shape[i]);
6✔
109

110
            last_map = &builder.add_map(
6✔
111
                *last_scope,
6✔
112
                indvar,
6✔
113
                condition,
6✔
114
                init,
6✔
115
                update,
6✔
116
                structured_control_flow::ScheduleType_Sequential::create(),
6✔
117
                {},
6✔
118
                block.debug_info()
6✔
119
            );
6✔
120
            last_scope = &last_map->root();
6✔
121
            init_expr = symbolic::add(symbolic::mul(init_expr, output_shape[i]), indvar);
6✔
122
        }
6✔
123
        data_flow::Subset init_subset;
6✔
124
        if (!output_shape.empty()) {
6✔
125
            init_subset.push_back(init_expr);
5✔
126
        }
5✔
127

128
        // Add initialization tasklet
129
        auto& init_block = builder.add_block(*last_scope, {}, block.debug_info());
6✔
130
        auto& init_tasklet =
6✔
131
            builder.add_tasklet(init_block, data_flow::TaskletCode::assign, {"_out"}, {"_in"}, block.debug_info());
6✔
132

133
        auto& const_node = builder.add_constant(init_block, this->identity(), element_type, block.debug_info());
6✔
134
        auto& out_access = builder.add_access(init_block, output_node.data(), block.debug_info());
6✔
135

136
        builder
6✔
137
            .add_computational_memlet(init_block, const_node, init_tasklet, "_in", {}, element_type, block.debug_info());
6✔
138
        builder.add_computational_memlet(
6✔
139
            init_block, init_tasklet, "_out", out_access, init_subset, oedge.base_type(), block.debug_info()
6✔
140
        );
6✔
141
    }
6✔
142

143
    // 2. Reduction Loop
144
    {
6✔
145
        data_flow::Subset input_subset;
6✔
146
        data_flow::Subset output_subset;
6✔
147

148
        structured_control_flow::Sequence* last_scope = &new_sequence;
6✔
149
        structured_control_flow::StructuredLoop* last_loop = nullptr;
6✔
150

151
        std::map<size_t, symbolic::Expression> loop_vars_map;
6✔
152
        std::vector<size_t> outer_dims;
6✔
153
        std::vector<size_t> inner_dims;
6✔
154

155
        // Partition dimensions
156
        for (size_t i = 0; i < shape_.size(); ++i) {
18✔
157
            bool is_axis = false;
12✔
158
            for (auto axis : sorted_axes) {
14✔
159
                if (axis == (int64_t) i) {
14✔
160
                    is_axis = true;
7✔
161
                    break;
7✔
162
                }
7✔
163
            }
14✔
164
            if (is_axis) {
12✔
165
                inner_dims.push_back(i);
7✔
166
            } else {
7✔
167
                outer_dims.push_back(i);
5✔
168
            }
5✔
169
        }
12✔
170

171
        // Generate outer parallel loops (Maps)
172
        for (size_t dim_idx : outer_dims) {
6✔
173
            std::string indvar_str = builder.find_new_name("_i");
5✔
174
            builder.add_container(indvar_str, types::Scalar(types::PrimitiveType::Int64));
5✔
175

176
            auto indvar = symbolic::symbol(indvar_str);
5✔
177
            auto init = symbolic::zero();
5✔
178
            auto update = symbolic::add(indvar, symbolic::one());
5✔
179
            auto condition = symbolic::Lt(indvar, shape_[dim_idx]);
5✔
180

181
            auto& map = builder.add_map(
5✔
182
                *last_scope,
5✔
183
                indvar,
5✔
184
                condition,
5✔
185
                init,
5✔
186
                update,
5✔
187
                structured_control_flow::ScheduleType_Sequential::create(),
5✔
188
                {},
5✔
189
                block.debug_info()
5✔
190
            );
5✔
191
            last_scope = &map.root();
5✔
192
            loop_vars_map[dim_idx] = indvar;
5✔
193
        }
5✔
194

195
        // Generate inner sequential loops (Fors)
196
        for (size_t dim_idx : inner_dims) {
7✔
197
            std::string indvar_str = builder.find_new_name("_k");
7✔
198
            builder.add_container(indvar_str, types::Scalar(types::PrimitiveType::Int64));
7✔
199

200
            auto indvar = symbolic::symbol(indvar_str);
7✔
201
            auto init = symbolic::zero();
7✔
202
            auto update = symbolic::add(indvar, symbolic::one());
7✔
203
            auto condition = symbolic::Lt(indvar, shape_[dim_idx]);
7✔
204

205
            last_loop = &builder.add_for(*last_scope, indvar, condition, init, update, {}, block.debug_info());
7✔
206
            last_scope = &last_loop->root();
7✔
207
            loop_vars_map[dim_idx] = indvar;
7✔
208
        }
7✔
209

210
        // Linearize input
211
        symbolic::Expression input_linear_index = symbolic::zero();
6✔
212
        for (size_t i = 0; i < shape_.size(); ++i) {
18✔
213
            input_linear_index = symbolic::add(symbolic::mul(input_linear_index, shape_[i]), loop_vars_map[i]);
12✔
214
        }
12✔
215
        if (!shape_.empty()) {
6✔
216
            input_subset.push_back(input_linear_index);
6✔
217
        }
6✔
218

219
        // Construct output indices
220
        std::vector<symbolic::Expression> output_indices;
6✔
221
        for (size_t i = 0; i < shape_.size(); ++i) {
18✔
222
            bool is_axis = false;
12✔
223
            for (auto axis : sorted_axes) {
14✔
224
                if (axis == (int64_t) i) {
14✔
225
                    is_axis = true;
7✔
226
                    break;
7✔
227
                }
7✔
228
            }
14✔
229

230
            if (is_axis) {
12✔
231
                if (keepdims_) {
7✔
232
                    output_indices.push_back(symbolic::zero());
1✔
233
                }
1✔
234
            } else {
7✔
235
                output_indices.push_back(loop_vars_map[i]);
5✔
236
            }
5✔
237
        }
12✔
238

239
        // Linearize output
240
        symbolic::Expression output_linear_index = symbolic::zero();
6✔
241
        for (size_t i = 0; i < output_shape.size(); ++i) {
12✔
242
            output_linear_index = symbolic::add(symbolic::mul(output_linear_index, output_shape[i]), output_indices[i]);
6✔
243
        }
6✔
244
        if (!output_shape.empty()) {
6✔
245
            output_subset.push_back(output_linear_index);
5✔
246
        }
5✔
247

248
        this->expand_reduction(
6✔
249
            builder,
6✔
250
            analysis_manager,
6✔
251
            *last_scope,
6✔
252
            input_node.data(),
6✔
253
            output_node.data(),
6✔
254
            iedge.base_type(),
6✔
255
            oedge.base_type(),
6✔
256
            input_subset,
6✔
257
            output_subset
6✔
258
        );
6✔
259
    }
6✔
260

261
    // Clean up block
262
    builder.remove_memlet(block, iedge);
6✔
263
    builder.remove_memlet(block, oedge);
6✔
264
    builder.remove_node(block, input_node);
6✔
265
    builder.remove_node(block, output_node);
6✔
266
    builder.remove_node(block, *this);
6✔
267
    builder.remove_child(parent, index + 1);
6✔
268

269

270
    return true;
6✔
271
}
6✔
272

273
} // namespace tensor
274
} // namespace math
275
} // namespace sdfg
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc