• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

daisytuner / docc / 23053141451

13 Mar 2026 01:30PM UTC coverage: 63.617% (-0.1%) from 63.722%
23053141451

push

github

web-flow
Merge pull request #579 from daisytuner/SkipNestedTiling

Skip nested GPU tiling on the same container to avoid redefinitions o…

5 of 7 new or added lines in 2 files covered. (71.43%)

22 existing lines in 3 files now uncovered.

25200 of 39612 relevant lines covered (63.62%)

400.84 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

59.2
/sdfg/src/data_flow/library_nodes/math/tensor/matmul_node.cpp
1
#include "sdfg/data_flow/library_nodes/math/tensor/matmul_node.h"
2
#include <string>
3

4
#include "sdfg/analysis/scope_analysis.h"
5
#include "sdfg/builder/structured_sdfg_builder.h"
6
#include "sdfg/data_flow/library_nodes/math/blas/gemm_node.h"
7
#include "sdfg/data_flow/library_nodes/stdlib/free.h"
8
#include "sdfg/data_flow/library_nodes/stdlib/malloc.h"
9
#include "sdfg/data_flow/tasklet.h"
10
#include "sdfg/element.h"
11
#include "sdfg/structured_control_flow/control_flow_node.h"
12
#include "sdfg/structured_control_flow/map.h"
13
#include "sdfg/structured_control_flow/sequence.h"
14
#include "sdfg/symbolic/symbolic.h"
15
#include "sdfg/types/pointer.h"
16
#include "sdfg/types/scalar.h"
17
#include "sdfg/types/tensor.h"
18
#include "sdfg/types/type.h"
19
#include "sdfg/types/utils.h"
20

21
namespace sdfg {
22
namespace math {
23
namespace tensor {
24

25
MatMulNode::MatMulNode(
26
    size_t element_id,
27
    const DebugInfo& debug_info,
28
    const graph::Vertex vertex,
29
    data_flow::DataFlowGraph& parent,
30
    const symbolic::MultiExpression& shape_a,
31
    const symbolic::MultiExpression& shape_b,
32
    const symbolic::MultiExpression& strides_a,
33
    const symbolic::MultiExpression& strides_b,
34
    symbolic::Expression offset_a,
35
    symbolic::Expression offset_b
36
)
37
    : TensorNode(
5✔
38
          element_id,
5✔
39
          debug_info,
5✔
40
          vertex,
5✔
41
          parent,
5✔
42
          LibraryNodeType_MatMul,
5✔
43
          {"Y"},
5✔
44
          {"A", "B"},
5✔
45
          data_flow::ImplementationType_NONE
5✔
46
      ),
5✔
47
      shape_a_(shape_a), shape_b_(shape_b), strides_a_(strides_a), strides_b_(strides_b), offset_a_(offset_a),
5✔
48
      offset_b_(offset_b) {
5✔
49
    if (shape_a_.size() < 2) {
5✔
50
        throw std::invalid_argument("MatMulNode: Input A must have at least 2 dimensions");
×
51
    }
×
52
    if (shape_b_.size() < 2) {
5✔
53
        throw std::invalid_argument("MatMulNode: Input B must have at least 2 dimensions");
×
54
    }
×
55
    // Compute default row-major strides if not provided
56
    if (strides_a_.empty()) {
5✔
57
        strides_a_.resize(shape_a_.size());
5✔
58
        strides_a_[shape_a_.size() - 1] = symbolic::integer(1);
5✔
59
        for (int i = static_cast<int>(shape_a_.size()) - 2; i >= 0; --i) {
11✔
60
            strides_a_[i] = symbolic::mul(strides_a_[i + 1], shape_a_[i + 1]);
6✔
61
        }
6✔
62
    }
5✔
63
    if (strides_b_.empty()) {
5✔
64
        strides_b_.resize(shape_b_.size());
5✔
65
        strides_b_[shape_b_.size() - 1] = symbolic::integer(1);
5✔
66
        for (int i = static_cast<int>(shape_b_.size()) - 2; i >= 0; --i) {
11✔
67
            strides_b_[i] = symbolic::mul(strides_b_[i + 1], shape_b_[i + 1]);
6✔
68
        }
6✔
69
    }
5✔
70
}
5✔
71

72
symbolic::Expression MatMulNode::m() const {
8✔
73
    // M is the second-to-last dimension of A
74
    return shape_a_[shape_a_.size() - 2];
8✔
75
}
8✔
76

77
symbolic::Expression MatMulNode::n() const {
12✔
78
    // N is the last dimension of B
79
    return shape_b_[shape_b_.size() - 1];
12✔
80
}
12✔
81

82
symbolic::Expression MatMulNode::k() const {
7✔
83
    // K is the last dimension of A (and second-to-last of B)
84
    return shape_a_[shape_a_.size() - 1];
7✔
85
}
7✔
86

87
void MatMulNode::validate(const Function& function) const {
5✔
88
    TensorNode::validate(function);
5✔
89

90
    auto& graph = this->get_parent();
5✔
91

92
    // Check that we have exactly 2 inputs and 1 output
93
    if (graph.in_degree(*this) != 2) {
5✔
94
        throw InvalidSDFGException("MatMulNode: Expected exactly 2 inputs (A and B)");
×
95
    }
×
96
    if (graph.out_degree(*this) != 1) {
5✔
97
        throw InvalidSDFGException("MatMulNode: Expected exactly 1 output (Y)");
×
98
    }
×
99

100
    // Validate K dimension matches between A and B
101
    auto k_a = shape_a_[shape_a_.size() - 1];
5✔
102
    auto k_b = shape_b_[shape_b_.size() - 2];
5✔
103
    if (!symbolic::eq(k_a, k_b)) {
5✔
104
        throw InvalidSDFGException(
×
105
            "MatMulNode: K dimension mismatch. A has K=" + k_a->__str__() + ", B has K=" + k_b->__str__()
×
106
        );
×
107
    }
×
108
}
5✔
109

110
symbolic::SymbolSet MatMulNode::symbols() const {
1✔
111
    symbolic::SymbolSet syms;
1✔
112
    for (const auto& dim : shape_a_) {
2✔
113
        for (auto& atom : symbolic::atoms(dim)) {
2✔
114
            syms.insert(atom);
2✔
115
        }
2✔
116
    }
2✔
117
    for (const auto& dim : shape_b_) {
2✔
118
        for (auto& atom : symbolic::atoms(dim)) {
2✔
119
            syms.insert(atom);
2✔
120
        }
2✔
121
    }
2✔
122
    for (const auto& stride : strides_a_) {
2✔
123
        for (auto& atom : symbolic::atoms(stride)) {
2✔
124
            syms.insert(atom);
1✔
125
        }
1✔
126
    }
2✔
127
    for (const auto& stride : strides_b_) {
2✔
128
        for (auto& atom : symbolic::atoms(stride)) {
2✔
129
            syms.insert(atom);
1✔
130
        }
1✔
131
    }
2✔
132
    for (auto& atom : symbolic::atoms(offset_a_)) {
1✔
133
        syms.insert(atom);
×
134
    }
×
135
    for (auto& atom : symbolic::atoms(offset_b_)) {
1✔
136
        syms.insert(atom);
×
137
    }
×
138
    return syms;
1✔
139
}
1✔
140

141
void MatMulNode::replace(const symbolic::Expression old_expression, const symbolic::Expression new_expression) {
×
142
    for (auto& dim : shape_a_) {
×
143
        dim = symbolic::subs(dim, old_expression, new_expression);
×
144
    }
×
145
    for (auto& dim : shape_b_) {
×
146
        dim = symbolic::subs(dim, old_expression, new_expression);
×
147
    }
×
148
    for (auto& stride : strides_a_) {
×
149
        stride = symbolic::subs(stride, old_expression, new_expression);
×
150
    }
×
151
    for (auto& stride : strides_b_) {
×
152
        stride = symbolic::subs(stride, old_expression, new_expression);
×
153
    }
×
154
    offset_a_ = symbolic::subs(offset_a_, old_expression, new_expression);
×
155
    offset_b_ = symbolic::subs(offset_b_, old_expression, new_expression);
×
156
}
×
157

158
std::unique_ptr<data_flow::DataFlowNode> MatMulNode::
159
    clone(size_t element_id, const graph::Vertex vertex, data_flow::DataFlowGraph& parent) const {
×
160
    return std::unique_ptr<data_flow::DataFlowNode>(new MatMulNode(
×
161
        element_id, debug_info(), vertex, parent, shape_a_, shape_b_, strides_a_, strides_b_, offset_a_, offset_b_
×
162
    ));
×
163
}
×
164

165
std::string MatMulNode::toStr() const {
×
166
    std::stringstream ss;
×
167
    ss << "MatMul(";
×
168
    ss << "A=[";
×
169
    for (size_t i = 0; i < shape_a_.size(); ++i) {
×
170
        if (i > 0) ss << ", ";
×
171
        ss << shape_a_[i]->__str__();
×
172
    }
×
173
    ss << "], strides_a=[";
×
174
    for (size_t i = 0; i < strides_a_.size(); ++i) {
×
175
        if (i > 0) ss << ", ";
×
176
        ss << strides_a_[i]->__str__();
×
177
    }
×
178
    ss << "], offset_a=" << offset_a_->__str__();
×
179
    ss << ", B=[";
×
180
    for (size_t i = 0; i < shape_b_.size(); ++i) {
×
181
        if (i > 0) ss << ", ";
×
182
        ss << shape_b_[i]->__str__();
×
183
    }
×
184
    ss << "], strides_b=[";
×
185
    for (size_t i = 0; i < strides_b_.size(); ++i) {
×
186
        if (i > 0) ss << ", ";
×
187
        ss << strides_b_[i]->__str__();
×
188
    }
×
189
    ss << "], offset_b=" << offset_b_->__str__();
×
190
    ss << ")";
×
191
    return ss.str();
×
192
}
×
193

194
void free_after_copy(
195
    const std::string& copy_name, builder::StructuredSDFGBuilder& builder, structured_control_flow::Sequence& parent
UNCOV
196
) {
×
UNCOV
197
    auto& block = builder.add_block(parent, {}, DebugInfo());
×
UNCOV
198
    auto& access_in = builder.add_access(block, copy_name);
×
UNCOV
199
    auto& access_out = builder.add_access(block, copy_name);
×
UNCOV
200
    auto& free_node = builder.add_library_node<stdlib::FreeNode>(block, DebugInfo());
×
UNCOV
201
    builder.add_computational_memlet(
×
UNCOV
202
        block, access_in, free_node, "_ptr", {}, types::Pointer(types::Scalar(types::PrimitiveType::Void))
×
UNCOV
203
    );
×
UNCOV
204
    builder.add_computational_memlet(
×
UNCOV
205
        block, free_node, "_ptr", access_out, {}, types::Pointer(types::Scalar(types::PrimitiveType::Void))
×
UNCOV
206
    );
×
UNCOV
207
}
×
208

209
bool MatMulNode::expand(builder::StructuredSDFGBuilder& builder, analysis::AnalysisManager& analysis_manager) {
5✔
210
    auto& dataflow = this->get_parent();
5✔
211
    auto& block = static_cast<structured_control_flow::Block&>(*dataflow.get_parent());
5✔
212

213
    if (dataflow.in_degree(*this) != 2 || dataflow.out_degree(*this) != 1) {
5✔
214
        return false;
×
215
    }
×
216

217
    auto& scope_analysis = analysis_manager.get<analysis::ScopeAnalysis>();
5✔
218
    auto& parent = static_cast<structured_control_flow::Sequence&>(*scope_analysis.parent_scope(&block));
5✔
219
    int index = parent.index(block);
5✔
220
    auto& transition = parent.at(index).second;
5✔
221

222
    // Get input and output edges
223
    data_flow::Memlet* iedge_a = nullptr;
5✔
224
    data_flow::Memlet* iedge_b = nullptr;
5✔
225
    for (auto& iedge : dataflow.in_edges(*this)) {
10✔
226
        if (iedge.dst_conn() == "A") {
10✔
227
            iedge_a = &iedge;
5✔
228
        } else if (iedge.dst_conn() == "B") {
5✔
229
            iedge_b = &iedge;
5✔
230
        }
5✔
231
    }
10✔
232
    auto& oedge = *dataflow.out_edges(*this).begin();
5✔
233

234
    if (!iedge_a || !iedge_b) {
5✔
235
        return false;
×
236
    }
×
237

238
    // Check if legal - access nodes must not have other connections
239
    auto& input_node_a = static_cast<data_flow::AccessNode&>(iedge_a->src());
5✔
240
    auto& input_node_b = static_cast<data_flow::AccessNode&>(iedge_b->src());
5✔
241
    auto& output_node = static_cast<data_flow::AccessNode&>(oedge.dst());
5✔
242

243
    if (dataflow.in_degree(input_node_a) != 0 || dataflow.in_degree(input_node_b) != 0 ||
5✔
244
        dataflow.out_degree(output_node) != 0) {
5✔
245
        return false;
×
246
    }
×
247

248
    // Determine BLAS precision from primitive type
249
    auto prim_type = this->primitive_type(dataflow);
5✔
250
    blas::BLAS_Precision precision;
5✔
251
    switch (prim_type) {
5✔
252
        case types::PrimitiveType::Half:
×
253
            precision = blas::BLAS_Precision::h;
×
254
            break;
×
255
        case types::PrimitiveType::Float:
3✔
256
            precision = blas::BLAS_Precision::s;
3✔
257
            break;
3✔
258
        case types::PrimitiveType::Double:
1✔
259
            precision = blas::BLAS_Precision::d;
1✔
260
            break;
1✔
261
        default:
1✔
262
            // GEMM only supports floating point types, fall back to naive expansion
263
            return false;
1✔
264
    };
5✔
265

266
    // Add new graph after the current block
267
    auto& new_sequence = builder.add_sequence_before(parent, block, transition.assignments(), block.debug_info());
4✔
268

269
    auto copy_name_a = input_node_a.data();
4✔
270
    strides_a_ = types::Tensor::strides_from_shape(shape_a_);
4✔
271
    auto copy_name_b = input_node_b.data();
4✔
272
    strides_b_ = types::Tensor::strides_from_shape(shape_b_);
4✔
273

274
    // Create maps for batch dimensions and M, N dimensions
275
    structured_control_flow::Sequence* last_scope = &new_sequence;
4✔
276
    structured_control_flow::Map* last_map = nullptr;
4✔
277
    symbolic::MultiExpression batch_vars;
4✔
278

279
    // Compute batch dimensions (all except last 2)
280
    size_t batch_dims_a = shape_a_.size() - 2;
4✔
281
    size_t batch_dims_b = shape_b_.size() - 2;
4✔
282
    size_t max_batch_dims = std::max(batch_dims_a, batch_dims_b);
4✔
283

284
    // Create maps for batch dimensions (using broadcasting)
285
    for (size_t i = 0; i < max_batch_dims; ++i) {
5✔
286
        std::string indvar_str = builder.find_new_name("_b");
1✔
287
        builder.add_container(indvar_str, types::Scalar(types::PrimitiveType::UInt64));
1✔
288

289
        auto indvar = symbolic::symbol(indvar_str);
1✔
290
        auto init = symbolic::zero();
1✔
291
        auto update = symbolic::add(indvar, symbolic::one());
1✔
292

293
        // Determine the bound for this batch dimension (max of A and B for broadcasting)
294
        symbolic::Expression bound;
1✔
295
        size_t a_idx = batch_dims_a >= (max_batch_dims - i) ? i - (max_batch_dims - batch_dims_a) : SIZE_MAX;
1✔
296
        size_t b_idx = batch_dims_b >= (max_batch_dims - i) ? i - (max_batch_dims - batch_dims_b) : SIZE_MAX;
1✔
297

298
        if (a_idx != SIZE_MAX && b_idx != SIZE_MAX) {
1✔
299
            // Both have this dimension - they should be equal or one should be 1 (broadcasting)
300
            bound = shape_a_[a_idx]; // Assume they match or broadcasting is handled
1✔
301
        } else if (a_idx != SIZE_MAX) {
1✔
302
            bound = shape_a_[a_idx];
×
303
        } else {
×
304
            bound = shape_b_[b_idx];
×
305
        }
×
306

307
        auto condition = symbolic::Lt(indvar, bound);
1✔
308
        last_map = &builder.add_map(
1✔
309
            *last_scope,
1✔
310
            indvar,
1✔
311
            condition,
1✔
312
            init,
1✔
313
            update,
1✔
314
            structured_control_flow::ScheduleType_Sequential::create(),
1✔
315
            {},
1✔
316
            block.debug_info()
1✔
317
        );
1✔
318
        last_scope = &last_map->root();
1✔
319
        batch_vars.push_back(indvar);
1✔
320
    }
1✔
321

322
    auto& ref_block = builder.add_block(*last_scope, {}, block.debug_info());
4✔
323

324
    auto scalar_type = types::Scalar(prim_type);
4✔
325

326
    // Compute offsets for this batch iteration
327
    // For A: base_offset_a = offset_a + sum_i(batch_idx_i * batch_stride_a_i)
328
    symbolic::Expression a_batch_offset = offset_a_;
4✔
329
    for (size_t i = 0; i < batch_dims_a; ++i) {
5✔
330
        size_t batch_idx = max_batch_dims - batch_dims_a + i;
1✔
331
        a_batch_offset = symbolic::add(a_batch_offset, symbolic::mul(batch_vars[batch_idx], strides_a_[i]));
1✔
332
    }
1✔
333

334
    // For B: base_offset_b = offset_b + sum_i(batch_idx_i * batch_stride_b_i)
335
    symbolic::Expression b_batch_offset = offset_b_;
4✔
336
    for (size_t i = 0; i < batch_dims_b; ++i) {
5✔
337
        size_t batch_idx = max_batch_dims - batch_dims_b + i;
1✔
338
        b_batch_offset = symbolic::add(b_batch_offset, symbolic::mul(batch_vars[batch_idx], strides_b_[i]));
1✔
339
    }
1✔
340

341
    // Compute output batch offset (same as batch_vars pattern for Y)
342
    symbolic::Expression c_batch_offset = symbolic::integer(0);
4✔
343
    for (size_t i = 0; i < batch_vars.size(); ++i) {
5✔
344
        // Output has shape [batch..., M, N] with row-major strides
345
        // Stride for batch dim i is: M * N * product of remaining batch dims
346
        symbolic::Expression c_stride = symbolic::mul(this->m(), this->n());
1✔
347
        for (size_t j = i + 1; j < batch_vars.size(); ++j) {
1✔
348
            // Multiply by subsequent batch dimensions
349
            if (j < batch_dims_a) {
×
350
                c_stride = symbolic::mul(c_stride, shape_a_[j]);
×
351
            } else if (j - batch_dims_a < batch_dims_b) {
×
352
                c_stride = symbolic::mul(c_stride, shape_b_[j - batch_dims_a]);
×
353
            }
×
354
        }
×
355
        c_batch_offset = symbolic::add(c_batch_offset, symbolic::mul(batch_vars[i], c_stride));
1✔
356
    }
1✔
357

358
    // Create access nodes
359
    auto& a_access = builder.add_access(ref_block, copy_name_a, debug_info());
4✔
360
    auto& b_access = builder.add_access(ref_block, copy_name_b, debug_info());
4✔
361
    auto& c_access_in = builder.add_access(ref_block, output_node.data(), debug_info());
4✔
362

363
    std::string ref_name_a = builder.find_new_name(copy_name_a + "_ref");
4✔
364
    builder.add_container(ref_name_a, types::Pointer(types::Scalar(types::PrimitiveType::Void)));
4✔
365
    auto& a_access_ref = builder.add_access(ref_block, ref_name_a, debug_info());
4✔
366
    std::string ref_name_b = builder.find_new_name(copy_name_b + "_ref");
4✔
367
    builder.add_container(ref_name_b, types::Pointer(types::Scalar(types::PrimitiveType::Void)));
4✔
368
    auto& b_access_ref = builder.add_access(ref_block, ref_name_b, debug_info());
4✔
369
    std::string ref_name_c = builder.find_new_name(output_node.data() + "_ref");
4✔
370
    builder.add_container(ref_name_c, types::Pointer(types::Scalar(types::PrimitiveType::Void)));
4✔
371
    auto& c_access_ref_in = builder.add_access(ref_block, ref_name_c, debug_info());
4✔
372

373
    builder.add_reference_memlet(
4✔
374
        ref_block, a_access, a_access_ref, {a_batch_offset}, ::sdfg::types::Pointer(scalar_type), debug_info()
4✔
375
    );
4✔
376
    builder.add_reference_memlet(
4✔
377
        ref_block, b_access, b_access_ref, {b_batch_offset}, ::sdfg::types::Pointer(scalar_type), debug_info()
4✔
378
    );
4✔
379
    builder.add_reference_memlet(
4✔
380
        ref_block, c_access_in, c_access_ref_in, {c_batch_offset}, ::sdfg::types::Pointer(scalar_type), debug_info()
4✔
381
    );
4✔
382

383
    // Create block with GEMM library node
384
    auto& gemm_block = builder.add_block(*last_scope, {}, block.debug_info());
4✔
385

386
    // Leading dimensions: stride of the row dimension (second-to-last dim)
387
    // For row-major A[M, K]: lda = stride for M dimension = strides_a_[-2]
388
    // For row-major B[K, N]: ldb = stride for K dimension = strides_b_[-2]
389
    auto lda = strides_a_[strides_a_.size() - 2];
4✔
390
    auto ldb = strides_b_[strides_b_.size() - 2];
4✔
391
    // For output C[M, N] in row-major: ldc = N
392
    auto ldc = this->n();
4✔
393

394
    // Add GEMM node: C = alpha * A * B + beta * C
395
    // With alpha = 1.0, beta = 0.0: C = A * B
396
    auto& gemm_node = builder.add_library_node<blas::GEMMNode>(
4✔
397
        gemm_block,
4✔
398
        debug_info(),
4✔
399
        blas::ImplementationType_BLAS,
4✔
400
        precision,
4✔
401
        blas::BLAS_Layout::RowMajor,
4✔
402
        blas::BLAS_Transpose::No, // trans_a
4✔
403
        blas::BLAS_Transpose::No, // trans_b
4✔
404
        this->m(),
4✔
405
        this->n(),
4✔
406
        this->k(),
4✔
407
        lda,
4✔
408
        ldb,
4✔
409
        ldc
4✔
410
    );
4✔
411

412
    auto& a_access_ref_in_gemm = builder.add_access(gemm_block, ref_name_a, debug_info());
4✔
413
    auto& b_access_ref_in_gemm = builder.add_access(gemm_block, ref_name_b, debug_info());
4✔
414
    auto& c_access_ref_in_gemm = builder.add_access(gemm_block, ref_name_c, debug_info());
4✔
415

416
    auto& c_access_ref_out = builder.add_access(gemm_block, ref_name_c, debug_info());
4✔
417

418
    // Create alpha and beta constants
419
    auto& alpha_const = builder.add_constant(gemm_block, "1.0", scalar_type, debug_info());
4✔
420
    auto& beta_const = builder.add_constant(gemm_block, "0.0", scalar_type, debug_info());
4✔
421

422
    // Connect memlets with batch offsets
423
    // Input A with offset
424
    builder.add_computational_memlet(
4✔
425
        gemm_block, a_access_ref_in_gemm, gemm_node, "__A", {}, ::sdfg::types::Pointer(scalar_type), debug_info()
4✔
426
    );
4✔
427
    // Input B with offset
428
    builder.add_computational_memlet(
4✔
429
        gemm_block, b_access_ref_in_gemm, gemm_node, "__B", {}, ::sdfg::types::Pointer(scalar_type), debug_info()
4✔
430
    );
4✔
431
    // Input C (for beta * C, but beta=0 so just needs to be connected)
432
    builder.add_computational_memlet(
4✔
433
        gemm_block, c_access_ref_in_gemm, gemm_node, "__C", {}, ::sdfg::types::Pointer(scalar_type), debug_info()
4✔
434
    );
4✔
435
    // Alpha constant
436
    builder.add_computational_memlet(gemm_block, alpha_const, gemm_node, "__alpha", {}, scalar_type, debug_info());
4✔
437
    // Beta constant
438
    builder.add_computational_memlet(gemm_block, beta_const, gemm_node, "__beta", {}, scalar_type, debug_info());
4✔
439
    // Output C
440
    builder.add_computational_memlet(
4✔
441
        gemm_block, gemm_node, "__C", c_access_ref_out, {}, ::sdfg::types::Pointer(scalar_type), debug_info()
4✔
442
    );
4✔
443

444
    // Free copies if we made them
445
    if (copy_name_a != input_node_a.data()) {
4✔
UNCOV
446
        free_after_copy(copy_name_a, builder, new_sequence);
×
UNCOV
447
    }
×
448
    if (copy_name_b != input_node_b.data()) {
4✔
UNCOV
449
        free_after_copy(copy_name_b, builder, new_sequence);
×
UNCOV
450
    }
×
451

452
    // Remove the original nodes
453
    builder.remove_memlet(block, *iedge_a);
4✔
454
    builder.remove_memlet(block, *iedge_b);
4✔
455
    builder.remove_memlet(block, oedge);
4✔
456
    if (&input_node_a != &input_node_b) {
4✔
457
        builder.remove_node(block, input_node_a);
4✔
458
    }
4✔
459
    builder.remove_node(block, input_node_b);
4✔
460
    builder.remove_node(block, output_node);
4✔
461
    builder.remove_node(block, *this);
4✔
462
    builder.remove_child(parent, index + 1);
4✔
463

464
    return true;
4✔
465
}
5✔
466

467
nlohmann::json MatMulNodeSerializer::serialize(const data_flow::LibraryNode& library_node) {
×
468
    const MatMulNode& matmul_node = static_cast<const MatMulNode&>(library_node);
×
469
    nlohmann::json j;
×
470

471
    j["code"] = matmul_node.code().value();
×
472

473
    serializer::JSONSerializer serializer;
×
474

475
    j["shape_a"] = nlohmann::json::array();
×
476
    for (auto& dim : matmul_node.shape_a()) {
×
477
        j["shape_a"].push_back(serializer.expression(dim));
×
478
    }
×
479

480
    j["shape_b"] = nlohmann::json::array();
×
481
    for (auto& dim : matmul_node.shape_b()) {
×
482
        j["shape_b"].push_back(serializer.expression(dim));
×
483
    }
×
484

485
    j["strides_a"] = nlohmann::json::array();
×
486
    for (auto& stride : matmul_node.strides_a()) {
×
487
        j["strides_a"].push_back(serializer.expression(stride));
×
488
    }
×
489

490
    j["strides_b"] = nlohmann::json::array();
×
491
    for (auto& stride : matmul_node.strides_b()) {
×
492
        j["strides_b"].push_back(serializer.expression(stride));
×
493
    }
×
494

495
    j["offset_a"] = serializer.expression(matmul_node.offset_a());
×
496
    j["offset_b"] = serializer.expression(matmul_node.offset_b());
×
497

498
    return j;
×
499
}
×
500

501
data_flow::LibraryNode& MatMulNodeSerializer::deserialize(
502
    const nlohmann::json& j, builder::StructuredSDFGBuilder& builder, structured_control_flow::Block& parent
503
) {
×
504
    assert(j.contains("element_id"));
×
505
    assert(j.contains("code"));
×
506
    assert(j.contains("debug_info"));
×
507
    assert(j.contains("shape_a"));
×
508
    assert(j.contains("shape_b"));
×
509

510
    symbolic::MultiExpression shape_a;
×
511
    for (const auto& dim : j["shape_a"]) {
×
512
        shape_a.push_back(symbolic::parse(dim.get<std::string>()));
×
513
    }
×
514

515
    symbolic::MultiExpression shape_b;
×
516
    for (const auto& dim : j["shape_b"]) {
×
517
        shape_b.push_back(symbolic::parse(dim.get<std::string>()));
×
518
    }
×
519

520
    symbolic::MultiExpression strides_a;
×
521
    if (j.contains("strides_a")) {
×
522
        for (const auto& stride : j["strides_a"]) {
×
523
            strides_a.push_back(symbolic::parse(stride.get<std::string>()));
×
524
        }
×
525
    }
×
526

527
    symbolic::MultiExpression strides_b;
×
528
    if (j.contains("strides_b")) {
×
529
        for (const auto& stride : j["strides_b"]) {
×
530
            strides_b.push_back(symbolic::parse(stride.get<std::string>()));
×
531
        }
×
532
    }
×
533

534
    symbolic::Expression offset_a = symbolic::integer(0);
×
535
    if (j.contains("offset_a")) {
×
536
        offset_a = symbolic::parse(j["offset_a"].get<std::string>());
×
537
    }
×
538

539
    symbolic::Expression offset_b = symbolic::integer(0);
×
540
    if (j.contains("offset_b")) {
×
541
        offset_b = symbolic::parse(j["offset_b"].get<std::string>());
×
542
    }
×
543

544
    sdfg::serializer::JSONSerializer serializer;
×
545
    DebugInfo debug_info = serializer.json_to_debug_info(j["debug_info"]);
×
546

547
    return builder
×
548
        .add_library_node<MatMulNode>(parent, debug_info, shape_a, shape_b, strides_a, strides_b, offset_a, offset_b);
×
549
}
×
550

551
} // namespace tensor
552
} // namespace math
553
} // namespace sdfg
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc