• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

daisytuner / docc / 24128050488

08 Apr 2026 09:23AM UTC coverage: 64.832% (-0.02%) from 64.848%
24128050488

push

github

web-flow
Batchnorm Node (#655)

* Batchnorm Node:

 + symbolic flop estimation
 ~ fixed: expand was missing the division operation.
 ~ ensure clone also respects impl type

* Added batchnorm2d test with explicit input values, to tease out the last bug,

 + toStr() for tensor layout gives layout details

10 of 29 new or added lines in 2 files covered. (34.48%)

1 existing line in 1 file now uncovered.

28998 of 44728 relevant lines covered (64.83%)

603.43 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

12.7
/sdfg/src/data_flow/library_nodes/math/tensor/tensor_layout.cpp
1
#include "sdfg/data_flow/library_nodes/math/tensor/tensor_layout.h"
2

3
#include "sdfg/serializer/json_serializer.h"
4

5
namespace sdfg::math::tensor {
6

7
TensorLayout::TensorLayout(
8
    const symbolic::MultiExpression& shape, const symbolic::MultiExpression& strides, const symbolic::Expression offset
9
)
10
    : shape_(shape), strides_(strides), offset_(offset) {
1✔
11
    if (strides.empty()) {
1✔
12
        strides_ = linear_strides();
1✔
13
    }
1✔
14
}
1✔
15

16
void TensorLayout::serialize_to_json(nlohmann::json& j) const {
×
17
    nlohmann::json shape_arr = nlohmann::json::array();
×
18
    sdfg::serializer::JSONSerializer serializer;
×
19

20
    for (auto& dim : shape_) {
×
21
        shape_arr.push_back(serializer.expression(dim));
×
22
    }
×
23
    j["shape"] = shape_arr;
×
24

25
    nlohmann::json stride_arr = nlohmann::json::array();
×
26
    for (auto& stride : strides_) {
×
27
        stride_arr.push_back(serializer.expression(stride));
×
28
    }
×
29
    j["strides"] = stride_arr;
×
30

31
    j["offset"] = serializer.expression(offset_);
×
32
}
×
33

34
std::string TensorLayout::toStr() const {
×
35
    std::stringstream ss;
×
NEW
36
    ss << "TLayout(shape=[";
×
NEW
37
    for (auto& s : shape_) {
×
NEW
38
        ss << s->__str__() << ",";
×
NEW
39
    }
×
NEW
40
    ss << "], strides=[";
×
NEW
41
    for (auto& s : strides_) {
×
NEW
42
        ss << s->__str__() << ",";
×
NEW
43
    }
×
NEW
44
    ss << "])";
×
45
    return ss.str();
×
46
}
×
47

48
symbolic::MultiExpression TensorLayout::linear_strides(const symbolic::MultiExpression& shape) {
1✔
49
    symbolic::MultiExpression lin_strides;
1✔
50
    std::size_t dims = shape.size();
1✔
51
    lin_strides.resize(dims);
1✔
52
    lin_strides[dims - 1] = symbolic::integer(1);
1✔
53
    for (int i = static_cast<int>(dims) - 2; i >= 0; --i) {
4✔
54
        lin_strides[i] = symbolic::mul(lin_strides.at(i + 1), shape.at(i + 1));
3✔
55
    }
3✔
56

57
    return std::move(lin_strides);
1✔
58
}
1✔
59
void TensorLayout::collect_symbols(symbolic::SymbolSet& syms) const {
×
60
    for (const auto& dim : shape_) {
×
61
        for (auto& atom : symbolic::atoms(dim)) {
×
62
            syms.insert(atom);
×
63
        }
×
64
    }
×
65
    for (const auto& dim : strides_) {
×
66
        for (auto& atom : symbolic::atoms(dim)) {
×
67
            syms.insert(atom);
×
68
        }
×
69
    }
×
70
    for (auto& atom : symbolic::atoms(offset_)) {
×
71
        syms.insert(atom);
×
72
    }
×
73
}
×
74

75
void TensorLayout::replace_symbols(const symbolic::Expression& old, const symbolic::Expression& new_expr) {
×
76
    for (auto& dim : shape_) {
×
77
        dim = symbolic::subs(dim, old, new_expr);
×
78
    }
×
79
    for (auto& stride : strides_) {
×
80
        stride = symbolic::subs(stride, old, new_expr);
×
81
    }
×
82
    offset_ = symbolic::subs(offset_, old, new_expr);
×
83
}
×
84

85
symbolic::MultiExpression TensorLayout::linear_strides() const { return std::move(linear_strides(shape_)); }
1✔
86

87
TensorLayout TensorLayout::deserialize_from_json(const nlohmann::json& j) {
×
88
    symbolic::MultiExpression shape;
×
89
    for (const auto& dim : j["shape"]) {
×
90
        shape.push_back(symbolic::parse(dim.get<std::string>()));
×
91
    }
×
92

93
    symbolic::MultiExpression strides;
×
94
    for (const auto& stride : j["strides"]) {
×
95
        strides.push_back(symbolic::parse(stride.get<std::string>()));
×
96
    }
×
97

98
    symbolic::Expression offset = symbolic::parse(j["offset"].get<std::string>());
×
99

100
    return std::move(TensorLayout(shape, strides, offset));
×
101
}
×
102

103
std::ostream& operator<<(std::ostream& stream, const TensorLayout& layout) {
×
104
    stream << "{shape[";
×
105
    for (size_t i = 0; i < layout.shape().size(); ++i) {
×
106
        if (i > 0) stream << ", ";
×
107
        stream << layout.shape().at(i)->__str__();
×
108
    }
×
109
    stream << "], strides=[";
×
110
    for (size_t i = 0; i < layout.strides().size(); ++i) {
×
111
        if (i > 0) stream << ", ";
×
112
        stream << layout.strides().at(i)->__str__();
×
113
    }
×
114
    stream << "]";
×
115
    if (SymEngine::neq(*layout.offset(), *symbolic::integer(0))) {
×
116
        stream << ", off=" << layout.offset()->__str__();
×
117
    }
×
118
    stream << "}";
×
119

120
    return stream;
×
121
}
×
122

123
bool TensorLayout::has_linear_accesses_no_padding(symbolic::MultiExpression shape, symbolic::MultiExpression strides) {
×
124
    auto basic_strides = types::Tensor::strides_from_shape(shape);
×
125
    if (basic_strides.size() != strides.size()) {
×
126
        return false;
×
127
    }
×
128
    for (size_t i = 0; i < strides.size(); i++) {
×
129
        if (!symbolic::eq(basic_strides.at(i), strides.at(i))) {
×
130
            return false;
×
131
        }
×
132
    }
×
133
    return true;
×
134
}
×
135

136
bool TensorLayout::has_linear_accesses_no_padding() const { return has_linear_accesses_no_padding(shape_, strides_); }
×
137

138
bool TensorLayout::has_transposed_strides_no_padding() const {
×
139
    if (shape_.size() < 2) {
×
140
        return false;
×
141
    }
×
142
    symbolic::MultiExpression new_shape;
×
143
    new_shape.reserve(shape_.size());
×
144
    for (size_t i = 0; i < shape_.size() - 2; i++) {
×
145
        new_shape.push_back(shape_.at(i));
×
146
    }
×
147
    new_shape.push_back(shape_.at(shape_.size() - 1));
×
148
    new_shape.push_back(shape_.at(shape_.size() - 2));
×
149
    symbolic::MultiExpression transposed_strides(strides_);
×
150
    transposed_strides[strides_.size() - 2] = strides_.at(strides_.size() - 1);
×
151
    transposed_strides[strides_.size() - 1] = strides_.at(strides_.size() - 2);
×
152
    return TensorLayout::has_linear_accesses_no_padding(new_shape, transposed_strides);
×
153
}
×
154

155
} // namespace sdfg::math::tensor
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc