• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

daisytuner / sdfglib / 20561412006

29 Dec 2025 12:03AM UTC coverage: 40.369% (+1.4%) from 38.976%
20561412006

Pull #409

github

web-flow
Merge d082d4d04 into eec4d6f74
Pull Request #409: restructures library nodes

14299 of 45900 branches covered (31.15%)

Branch coverage included in aggregate %.

259 of 388 new or added lines in 19 files covered. (66.75%)

27 existing lines in 1 file now uncovered.

12248 of 19861 relevant lines covered (61.67%)

89.05 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

65.01
/src/data_flow/library_nodes/math/tensor/reduce_node.cpp
1
#include "sdfg/data_flow/library_nodes/math/tensor/reduce_node.h"
2

3
#include "sdfg/analysis/analysis.h"
4
#include "sdfg/analysis/scope_analysis.h"
5
#include "sdfg/builder/structured_sdfg_builder.h"
6
#include "sdfg/types/array.h"
7
#include "sdfg/types/pointer.h"
8

9
#include <algorithm>
10

11
namespace sdfg {
12
namespace math {
13
namespace tensor {
14

15
ReduceNode::ReduceNode(
4✔
16
    size_t element_id,
17
    const DebugInfo& debug_info,
18
    const graph::Vertex vertex,
19
    data_flow::DataFlowGraph& parent,
20
    const data_flow::LibraryNodeCode& code,
21
    const std::vector<symbolic::Expression>& shape,
22
    const std::vector<int64_t>& axes,
23
    bool keepdims
24
)
25
    : MathNode(element_id, debug_info, vertex, parent, code, {"Y"}, {"X"}, data_flow::ImplementationType_NONE),
4!
26
      shape_(shape), axes_(axes), keepdims_(keepdims) {}
4!
27

NEW
28
symbolic::SymbolSet ReduceNode::symbols() const {
×
NEW
29
    symbolic::SymbolSet syms;
×
NEW
30
    for (const auto& dim : shape_) {
×
NEW
31
        for (auto& atom : symbolic::atoms(dim)) {
×
NEW
32
            syms.insert(atom);
×
33
        }
34
    }
NEW
35
    return syms;
×
NEW
36
}
×
37

NEW
38
void ReduceNode::replace(const symbolic::Expression old_expression, const symbolic::Expression new_expression) {
×
NEW
39
    for (auto& dim : shape_) {
×
NEW
40
        dim = symbolic::subs(dim, old_expression, new_expression);
×
41
    }
NEW
42
}
×
43

NEW
44
void ReduceNode::validate(const Function& function) const {}
×
45

46
bool ReduceNode::expand(builder::StructuredSDFGBuilder& builder, analysis::AnalysisManager& analysis_manager) {
4✔
47
    auto& dataflow = this->get_parent();
4✔
48
    auto& block = static_cast<structured_control_flow::Block&>(*dataflow.get_parent());
4✔
49

50
    if (dataflow.in_degree(*this) != 1 || dataflow.out_degree(*this) != 1) {
4!
NEW
51
        return false;
×
52
    }
53

54
    auto& scope_analysis = analysis_manager.get<analysis::ScopeAnalysis>();
4✔
55
    auto& parent = static_cast<structured_control_flow::Sequence&>(*scope_analysis.parent_scope(&block));
4✔
56
    int index = parent.index(block);
4✔
57
    auto& transition = parent.at(index).second;
4✔
58

59
    auto& iedge = *dataflow.in_edges(*this).begin();
4✔
60
    auto& oedge = *dataflow.out_edges(*this).begin();
4✔
61

62
    auto& input_node = static_cast<data_flow::AccessNode&>(iedge.src());
4✔
63
    auto& output_node = static_cast<data_flow::AccessNode&>(oedge.dst());
4✔
64

65
    if (dataflow.in_degree(input_node) != 0 || dataflow.out_degree(output_node) != 0) {
4!
NEW
66
        return false;
×
67
    }
68

69
    // Calculate output shape
70
    std::vector<symbolic::Expression> output_shape;
4✔
71
    std::vector<int64_t> sorted_axes = axes_;
4!
72
    std::sort(sorted_axes.begin(), sorted_axes.end());
4!
73

74
    for (size_t i = 0; i < shape_.size(); ++i) {
12✔
75
        bool is_axis = false;
8✔
76
        for (auto axis : sorted_axes) {
13✔
77
            if (axis == (int64_t) i) {
10✔
78
                is_axis = true;
5✔
79
                break;
5✔
80
            }
81
        }
82

83
        if (is_axis) {
8✔
84
            if (keepdims_) {
5✔
85
                output_shape.push_back(symbolic::one());
1!
86
            }
1✔
87
        } else {
5✔
88
            output_shape.push_back(shape_[i]);
3!
89
        }
90
    }
8✔
91

92
    sdfg::types::Scalar element_type(sdfg::types::PrimitiveType::Float);
4!
93

94
    // Add new sequence
95
    auto& new_sequence = builder.add_sequence_before(parent, block, transition.assignments(), block.debug_info());
4!
96

97
    // 1. Initialization Loop
98
    {
99
        symbolic::Expression init_expr = symbolic::zero();
4!
100
        structured_control_flow::Sequence* last_scope = &new_sequence;
4✔
101
        structured_control_flow::Map* last_map = nullptr;
4✔
102

103
        for (size_t i = 0; i < output_shape.size(); ++i) {
8✔
104
            std::string indvar_str = builder.find_new_name("_i_init");
4!
105
            builder.add_container(indvar_str, types::Scalar(types::PrimitiveType::Int64));
4!
106

107
            auto indvar = symbolic::symbol(indvar_str);
4!
108
            auto init = symbolic::zero();
4!
109
            auto update = symbolic::add(indvar, symbolic::one());
4!
110
            auto condition = symbolic::Lt(indvar, output_shape[i]);
4!
111

112
            last_map = &builder.add_map(
8!
113
                *last_scope,
4✔
114
                indvar,
4!
115
                condition,
4!
116
                init,
4!
117
                update,
4!
118
                structured_control_flow::ScheduleType_Sequential::create(),
4!
119
                {},
4✔
120
                block.debug_info()
4!
121
            );
122
            last_scope = &last_map->root();
4!
123
            init_expr = symbolic::add(symbolic::mul(init_expr, output_shape[i]), indvar);
4!
124
        }
4✔
125
        data_flow::Subset init_subset;
4✔
126
        if (!output_shape.empty()) {
4✔
127
            init_subset.push_back(init_expr);
3!
128
        }
3✔
129

130
        // Add initialization tasklet
131
        auto& init_block = builder.add_block(*last_scope, {}, block.debug_info());
4!
132
        auto& init_tasklet =
4✔
133
            builder.add_tasklet(init_block, data_flow::TaskletCode::assign, {"_out"}, {"_in"}, block.debug_info());
4!
134

135
        auto& const_node = builder.add_constant(init_block, this->identity(), element_type, block.debug_info());
4!
136
        auto& out_access = builder.add_access(init_block, output_node.data(), block.debug_info());
4!
137

138
        builder
8✔
139
            .add_computational_memlet(init_block, const_node, init_tasklet, "_in", {}, element_type, block.debug_info());
4!
140
        builder.add_computational_memlet(
8!
141
            init_block, init_tasklet, "_out", out_access, init_subset, oedge.base_type(), block.debug_info()
4!
142
        );
143
    }
4✔
144

145
    // 2. Reduction Loop
146
    {
147
        data_flow::Subset input_subset;
4✔
148
        data_flow::Subset output_subset;
4✔
149

150
        structured_control_flow::Sequence* last_scope = &new_sequence;
4✔
151
        structured_control_flow::StructuredLoop* last_loop = nullptr;
4✔
152

153
        std::map<size_t, symbolic::Expression> loop_vars_map;
4✔
154
        std::vector<size_t> outer_dims;
4✔
155
        std::vector<size_t> inner_dims;
4✔
156

157
        // Partition dimensions
158
        for (size_t i = 0; i < shape_.size(); ++i) {
12✔
159
            bool is_axis = false;
8✔
160
            for (auto axis : sorted_axes) {
13✔
161
                if (axis == (int64_t) i) {
10✔
162
                    is_axis = true;
5✔
163
                    break;
5✔
164
                }
165
            }
166
            if (is_axis) {
8✔
167
                inner_dims.push_back(i);
5!
168
            } else {
5✔
169
                outer_dims.push_back(i);
3!
170
            }
171
        }
8✔
172

173
        // Generate outer parallel loops (Maps)
174
        for (size_t dim_idx : outer_dims) {
7✔
175
            std::string indvar_str = builder.find_new_name("_i");
3!
176
            builder.add_container(indvar_str, types::Scalar(types::PrimitiveType::Int64));
3!
177

178
            auto indvar = symbolic::symbol(indvar_str);
3!
179
            auto init = symbolic::zero();
3!
180
            auto update = symbolic::add(indvar, symbolic::one());
3!
181
            auto condition = symbolic::Lt(indvar, shape_[dim_idx]);
3!
182

183
            auto& map = builder.add_map(
6!
184
                *last_scope,
3✔
185
                indvar,
3!
186
                condition,
3!
187
                init,
3!
188
                update,
3!
189
                structured_control_flow::ScheduleType_Sequential::create(),
3!
190
                {},
3✔
191
                block.debug_info()
3!
192
            );
193
            last_scope = &map.root();
3!
194
            loop_vars_map[dim_idx] = indvar;
3!
195
        }
3✔
196

197
        // Generate inner sequential loops (Fors)
198
        for (size_t dim_idx : inner_dims) {
9✔
199
            std::string indvar_str = builder.find_new_name("_k");
5!
200
            builder.add_container(indvar_str, types::Scalar(types::PrimitiveType::Int64));
5!
201

202
            auto indvar = symbolic::symbol(indvar_str);
5!
203
            auto init = symbolic::zero();
5!
204
            auto update = symbolic::add(indvar, symbolic::one());
5!
205
            auto condition = symbolic::Lt(indvar, shape_[dim_idx]);
5!
206

207
            last_loop = &builder.add_for(*last_scope, indvar, condition, init, update, {}, block.debug_info());
5!
208
            last_scope = &last_loop->root();
5!
209
            loop_vars_map[dim_idx] = indvar;
5!
210
        }
5✔
211

212
        // Linearize input
213
        symbolic::Expression input_linear_index = symbolic::zero();
4!
214
        for (size_t i = 0; i < shape_.size(); ++i) {
12✔
215
            input_linear_index = symbolic::add(symbolic::mul(input_linear_index, shape_[i]), loop_vars_map[i]);
8!
216
        }
8✔
217
        if (!shape_.empty()) {
4!
218
            input_subset.push_back(input_linear_index);
4!
219
        }
4✔
220

221
        // Construct output indices
222
        std::vector<symbolic::Expression> output_indices;
4✔
223
        for (size_t i = 0; i < shape_.size(); ++i) {
12✔
224
            bool is_axis = false;
8✔
225
            for (auto axis : sorted_axes) {
13✔
226
                if (axis == (int64_t) i) {
10✔
227
                    is_axis = true;
5✔
228
                    break;
5✔
229
                }
230
            }
231

232
            if (is_axis) {
8✔
233
                if (keepdims_) {
5✔
234
                    output_indices.push_back(symbolic::zero());
1!
235
                }
1✔
236
            } else {
5✔
237
                output_indices.push_back(loop_vars_map[i]);
3!
238
            }
239
        }
8✔
240

241
        // Linearize output
242
        symbolic::Expression output_linear_index = symbolic::zero();
4!
243
        for (size_t i = 0; i < output_shape.size(); ++i) {
8✔
244
            output_linear_index = symbolic::add(symbolic::mul(output_linear_index, output_shape[i]), output_indices[i]);
4!
245
        }
4✔
246
        if (!output_shape.empty()) {
4✔
247
            output_subset.push_back(output_linear_index);
3!
248
        }
3✔
249

250
        this->expand_reduction(
4!
251
            builder,
4✔
252
            analysis_manager,
4✔
253
            *last_scope,
4✔
254
            input_node.data(),
4!
255
            output_node.data(),
4!
256
            iedge.base_type(),
4!
257
            oedge.base_type(),
4!
258
            input_subset,
259
            output_subset
260
        );
261
    }
4✔
262

263
    // Clean up block
264
    builder.remove_memlet(block, iedge);
4!
265
    builder.remove_memlet(block, oedge);
4!
266
    builder.remove_node(block, input_node);
4!
267
    builder.remove_node(block, output_node);
4!
268
    builder.remove_node(block, *this);
4!
269
    builder.remove_child(parent, index + 1);
4!
270

271

272
    return true;
4✔
273
}
4✔
274

275
} // namespace tensor
276
} // namespace math
277
} // namespace sdfg
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc