• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

daisytuner / docc / 25316236512

04 May 2026 11:24AM UTC coverage: 64.448% (+0.1%) from 64.317%
25316236512

push

github

web-flow
Merge pull request #699 from daisytuner/memory-tile-groups

Extends memory layout analysis to support groups of memlets

306 of 355 new or added lines in 7 files covered. (86.2%)

2 existing lines in 1 file now uncovered.

31949 of 49573 relevant lines covered (64.45%)

2301.3 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.43
/opt/src/transformations/out_local_storage.cpp
1
#include "sdfg/transformations/out_local_storage.h"
2

3
#include <cstddef>
4
#include <functional>
5
#include <string>
6

7
#include "sdfg/analysis/memory_layout_analysis.h"
8
#include "sdfg/analysis/scope_analysis.h"
9
#include "sdfg/analysis/users.h"
10
#include "sdfg/builder/structured_sdfg_builder.h"
11
#include "sdfg/data_flow/access_node.h"
12
#include "sdfg/data_flow/library_nodes/barrier_local_node.h"
13
#include "sdfg/data_flow/memlet.h"
14
#include "sdfg/passes/structured_control_flow/dead_cfg_elimination.h"
15
#include "sdfg/passes/structured_control_flow/sequence_fusion.h"
16
#include "sdfg/structured_control_flow/if_else.h"
17
#include "sdfg/structured_control_flow/sequence.h"
18
#include "sdfg/structured_control_flow/structured_loop.h"
19
#include "sdfg/symbolic/symbolic.h"
20
#include "sdfg/targets/gpu/gpu_schedule_type.h"
21
#include "sdfg/types/array.h"
22
#include "sdfg/types/pointer.h"
23
#include "sdfg/types/scalar.h"
24

25
namespace sdfg {
26
namespace transformations {
27

28
OutLocalStorage::OutLocalStorage(
29
    structured_control_flow::StructuredLoop& loop,
30
    const data_flow::AccessNode& access_node,
31
    const types::StorageType& storage_type
32
)
33
    : loop_(loop), access_node_(access_node), container_(access_node.data()), storage_type_(storage_type) {};
28✔
34

35
std::string OutLocalStorage::name() const { return "OutLocalStorage"; };
5✔
36

37
bool OutLocalStorage::can_be_applied(builder::StructuredSDFGBuilder& builder, analysis::AnalysisManager& analysis_manager) {
26✔
38
    auto& sdfg = builder.subject();
26✔
39
    auto& body = this->loop_.root();
26✔
40

41
    tile_info_ = TileInfo{};
26✔
42

43
    // Criterion: Container must exist
44
    if (!sdfg.exists(this->container_)) {
26✔
45
        return false;
×
46
    }
×
47

48
    auto& type = sdfg.type(this->container_);
26✔
49

50
    // Criterion: Container must be used in the loop body
51
    auto& users = analysis_manager.get<analysis::Users>();
26✔
52
    analysis::UsersView body_users(users, body);
26✔
53
    if (body_users.uses(this->container_).empty()) {
26✔
54
        return false;
2✔
55
    }
2✔
56

57
    // Criterion: Container must have writes (this is OutLocalStorage, not InLocalStorage)
58
    if (body_users.writes(this->container_).empty()) {
24✔
59
        return false;
1✔
60
    }
1✔
61

62
    // Determine if container is also read (read-write vs write-only)
63
    tile_info_.has_read = !body_users.reads(this->container_).empty();
23✔
64

65
    // Handle scalar containers: no tile needed, dimensions stay empty
66
    if (type.type_id() == types::TypeID::Scalar) {
23✔
67
        return true;
1✔
68
    }
1✔
69

70
    // For Array/Pointer types: use MemoryLayoutAnalysis tile group API
71
    if (type.type_id() != types::TypeID::Pointer && type.type_id() != types::TypeID::Array) {
22✔
72
        return false;
×
73
    }
×
74

75
    auto& mla = analysis_manager.get<analysis::MemoryLayoutAnalysis>();
22✔
76

77
    // Find a representative memlet from the access node to identify its group
78
    const data_flow::Memlet* representative_memlet = nullptr;
22✔
79
    auto& dfg = access_node_.get_parent();
22✔
80
    for (auto& memlet : dfg.in_edges(access_node_)) {
22✔
81
        representative_memlet = &memlet;
11✔
82
        break;
11✔
83
    }
11✔
84
    if (!representative_memlet) {
22✔
85
        for (auto& memlet : dfg.out_edges(access_node_)) {
11✔
86
            representative_memlet = &memlet;
11✔
87
            break;
11✔
88
        }
11✔
89
    }
11✔
90
    if (!representative_memlet) {
22✔
NEW
91
        return false;
×
NEW
92
    }
×
93

94
    auto* group = mla.tile_group_for(loop_, *representative_memlet);
22✔
95
    if (!group) {
22✔
96
        return false;
1✔
97
    }
1✔
98

99
    auto& tile = group->tile;
21✔
100

101
    // Store group memlets for use in apply()
102
    group_memlets_.clear();
21✔
103
    group_memlets_.insert(group->memlets.begin(), group->memlets.end());
21✔
104

105
    // Get overapproximated extents (integer upper bounds)
106
    auto extents = tile.extents_approx();
21✔
107
    if (extents.empty()) {
21✔
108
        return false;
×
109
    }
×
110

111
    // Store tile info (before substitution, bases/strides stay symbolic)
112
    tile_info_.dimensions = extents;
21✔
113
    tile_info_.bases = tile.min_subset;
21✔
114
    tile_info_.strides = std::vector<symbolic::Expression>(tile.layout.strides().begin(), tile.layout.strides().end());
21✔
115
    tile_info_.offset = tile.layout.offset();
21✔
116

117
    // GPU shared memory: resolve symbolic extents using GPU block sizes and
118
    // require at least one cooperative dimension
119
    if (storage_type_.is_nv_shared()) {
21✔
120
        auto& scope_analysis = analysis_manager.get<analysis::ScopeAnalysis>();
6✔
121
        auto ancestors = scope_analysis.ancestor_scopes(&loop_);
6✔
122

123
        // Build substitution map: symbolic GPU map bounds → integer block sizes
124
        for (auto* node : ancestors) {
26✔
125
            if (auto* ancestor_map = dynamic_cast<structured_control_flow::Map*>(node)) {
26✔
126
                if (!gpu::is_gpu_schedule(ancestor_map->schedule_type())) {
10✔
127
                    continue;
×
128
                }
×
129
                auto block_size = gpu::gpu_block_size(ancestor_map->schedule_type());
10✔
130
                // Extract symbolic bound from condition: Lt(indvar, BOUND)
131
                auto condition = ancestor_map->condition();
10✔
132
                if (SymEngine::is_a<SymEngine::StrictLessThan>(*condition)) {
10✔
133
                    auto stl = SymEngine::rcp_static_cast<const SymEngine::StrictLessThan>(condition);
10✔
134
                    auto rhs = stl->get_args()[1];
10✔
135
                    auto iter_count = symbolic::sub(rhs, ancestor_map->init());
10✔
136
                    if (!SymEngine::is_a<SymEngine::Integer>(*iter_count)) {
10✔
137
                        // Symbolic bound — substitute with block size in extents and bases
138
                        for (auto& ext : tile_info_.dimensions) {
17✔
139
                            ext = symbolic::simplify(symbolic::subs(ext, iter_count, block_size));
17✔
140
                        }
17✔
141
                        for (auto& base : tile_info_.bases) {
17✔
142
                            base = symbolic::simplify(symbolic::subs(base, iter_count, block_size));
17✔
143
                        }
17✔
144
                    }
10✔
145
                }
10✔
146
            }
10✔
147
        }
26✔
148

149
        // Criterion: All extents must now be provably integer
150
        for (auto& ext : tile_info_.dimensions) {
10✔
151
            if (!SymEngine::is_a<SymEngine::Integer>(*ext)) {
10✔
152
                return false;
2✔
153
            }
2✔
154
        }
10✔
155

156
        // Criterion: At least one cooperative dimension
157
        bool has_cooperative_dim = false;
4✔
158
        for (auto* node : ancestors) {
12✔
159
            if (auto* ancestor_map = dynamic_cast<structured_control_flow::Map*>(node)) {
12✔
160
                if (!gpu::is_gpu_schedule(ancestor_map->schedule_type())) {
6✔
161
                    continue;
×
162
                }
×
163
                bool appears_in_bases = false;
6✔
164
                for (auto& base : tile_info_.bases) {
9✔
165
                    if (symbolic::uses(base, ancestor_map->indvar())) {
9✔
166
                        appears_in_bases = true;
2✔
167
                        break;
2✔
168
                    }
2✔
169
                }
9✔
170
                if (!appears_in_bases) {
6✔
171
                    has_cooperative_dim = true;
4✔
172
                    break;
4✔
173
                }
4✔
174
            }
6✔
175
        }
12✔
176
        if (!has_cooperative_dim) {
4✔
177
            return false;
×
178
        }
×
179
    } else {
15✔
180
        // CPU path: All extents must be provably integer
181
        for (auto& ext : tile_info_.dimensions) {
23✔
182
            if (!SymEngine::is_a<SymEngine::Integer>(*ext)) {
23✔
183
                return false;
×
184
            }
×
185
        }
23✔
186
    }
15✔
187

188
    return true;
19✔
189
}
21✔
190

191
void OutLocalStorage::apply(builder::StructuredSDFGBuilder& builder, analysis::AnalysisManager& analysis_manager) {
18✔
192
    auto& sdfg = builder.subject();
18✔
193
    auto& users = analysis_manager.get<analysis::Users>();
18✔
194
    auto& scope_analysis = analysis_manager.get<analysis::ScopeAnalysis>();
18✔
195

196
    auto parent_node = scope_analysis.parent_scope(&loop_);
18✔
197
    auto parent = dynamic_cast<structured_control_flow::Sequence*>(parent_node);
18✔
198
    if (!parent) {
18✔
199
        throw InvalidSDFGException("OutLocalStorage: Parent of loop must be a Sequence!");
×
200
    }
×
201

202
    // Get type information
203
    auto& type = sdfg.type(this->container_);
18✔
204
    types::Scalar scalar_type(type.primitive_type());
18✔
205

206
    // Create local buffer name
207
    local_name_ = builder.find_new_name("__daisy_out_local_storage_" + this->container_);
18✔
208

209
    // ========================================================================
210
    // SCALAR PATH: tile_info_.dimensions is empty
211
    // ========================================================================
212
    if (tile_info_.dimensions.empty()) {
18✔
213
        // Create scalar local buffer
214
        builder.add_container(local_name_, scalar_type);
1✔
215

216
        // Get the access subset from the first user (all scalar, so empty subset)
217
        analysis::UsersView body_users(users, loop_.root());
1✔
218
        auto accesses = body_users.uses(this->container_);
1✔
219
        auto first_access = accesses.at(0);
1✔
220
        auto first_subset = first_access->subsets().at(0);
1✔
221

222
        // Init block (copy from container to local) - before loop
223
        if (tile_info_.has_read) {
1✔
224
            auto& init_block = builder.add_block_before(*parent, loop_, {}, loop_.debug_info());
1✔
225
            auto& init_src = builder.add_access(init_block, this->container_);
1✔
226
            auto& init_dst = builder.add_access(init_block, local_name_);
1✔
227
            auto& init_tasklet = builder.add_tasklet(init_block, data_flow::TaskletCode::assign, "_out", {"_in"});
1✔
228
            builder.add_computational_memlet(init_block, init_src, init_tasklet, "_in", first_subset, type);
1✔
229
            builder.add_computational_memlet(init_block, init_tasklet, "_out", init_dst, {}, scalar_type);
1✔
230
        }
1✔
231

232
        // Writeback block (copy from local to container) - after loop
233
        {
1✔
234
            auto& wb_block = builder.add_block_after(*parent, loop_, {}, loop_.debug_info());
1✔
235
            auto& wb_src = builder.add_access(wb_block, local_name_);
1✔
236
            auto& wb_dst = builder.add_access(wb_block, this->container_);
1✔
237
            auto& wb_tasklet = builder.add_tasklet(wb_block, data_flow::TaskletCode::assign, "_out", {"_in"});
1✔
238
            builder.add_computational_memlet(wb_block, wb_src, wb_tasklet, "_in", {}, scalar_type);
1✔
239
            builder.add_computational_memlet(wb_block, wb_tasklet, "_out", wb_dst, first_subset, type);
1✔
240
        }
1✔
241

242
        // Rewrite body accesses to use scalar local
243
        for (auto* user : body_users.uses(this->container_)) {
2✔
244
            auto element = user->element();
2✔
245
            if (auto access = dynamic_cast<data_flow::AccessNode*>(element)) {
2✔
246
                for (auto& iedge : access->get_parent().in_edges(*access)) {
2✔
247
                    auto memlet = &iedge;
1✔
248
                    memlet->set_subset({});
1✔
249
                    memlet->set_base_type(scalar_type);
1✔
250
                }
1✔
251
                for (auto& oedge : access->get_parent().out_edges(*access)) {
2✔
252
                    auto memlet = &oedge;
1✔
253
                    memlet->set_subset({});
1✔
254
                    memlet->set_base_type(scalar_type);
1✔
255
                }
1✔
256
            }
2✔
257
        }
2✔
258

259
        // Replace container name in the loop body
260
        loop_.replace(symbolic::symbol(this->container_), symbolic::symbol(local_name_));
1✔
261
    }
1✔
262
    // ========================================================================
263
    // ARRAY PATH: tile_info_.dimensions is non-empty
264
    // ========================================================================
265
    else {
17✔
266
        // Collect varying dimensions (extent > 1) and compute buffer layout
267
        std::vector<size_t> varying_dims;
17✔
268
        std::vector<symbolic::Expression> dim_sizes;
17✔
269
        for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
44✔
270
            auto& dim_size = tile_info_.dimensions.at(d);
27✔
271
            if (!symbolic::eq(dim_size, symbolic::integer(1))) {
27✔
272
                varying_dims.push_back(d);
16✔
273
                dim_sizes.push_back(dim_size);
16✔
274
            }
16✔
275
        }
27✔
276

277
        // Compute total buffer size
278
        symbolic::Expression total_size = symbolic::integer(1);
17✔
279
        for (auto& ds : dim_sizes) {
17✔
280
            total_size = symbolic::mul(total_size, ds);
16✔
281
        }
16✔
282

283
        // Create the local buffer with specified storage type
284
        types::Array buffer_type(storage_type_, 0, {}, scalar_type, total_size);
17✔
285
        builder.add_container(local_name_, buffer_type);
17✔
286

287
        // Helper: build linearized local index from per-dimension expressions
288
        auto linearize_exprs = [&](const std::vector<symbolic::Expression>& indices) -> symbolic::Expression {
53✔
289
            symbolic::Expression linear_idx = symbolic::integer(0);
53✔
290
            symbolic::Expression stride = symbolic::integer(1);
53✔
291
            for (int i = indices.size() - 1; i >= 0; i--) {
100✔
292
                linear_idx = symbolic::add(linear_idx, symbolic::mul(indices[i], stride));
47✔
293
                stride = symbolic::mul(stride, dim_sizes[i]);
47✔
294
            }
47✔
295
            return linear_idx;
53✔
296
        };
53✔
297

298
        // Helper: build linearized local index from per-dimension indvars (symbols)
299
        auto linearize = [&](const std::vector<symbolic::Symbol>& indvars) -> symbolic::Expression {
23✔
300
            std::vector<symbolic::Expression> exprs(indvars.begin(), indvars.end());
23✔
301
            return linearize_exprs(exprs);
23✔
302
        };
23✔
303

304
        // Helper: build source subset (base[d] + copy_indvar[d]) for original container
305
        bool is_pointer = (type.type_id() == types::TypeID::Pointer);
17✔
306
        auto build_original_subset = [&](const std::vector<symbolic::Expression>& copy_indices) -> data_flow::Subset {
28✔
307
            std::vector<symbolic::Expression> full_indices;
28✔
308
            size_t var_idx = 0;
28✔
309
            for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
73✔
310
                if (!symbolic::eq(tile_info_.dimensions.at(d), symbolic::integer(1))) {
45✔
311
                    full_indices.push_back(symbolic::add(tile_info_.bases.at(d), copy_indices.at(var_idx++)));
26✔
312
                } else {
26✔
313
                    full_indices.push_back(tile_info_.bases.at(d));
19✔
314
                }
19✔
315
            }
45✔
316

317
            if (is_pointer) {
28✔
318
                symbolic::Expression linear = tile_info_.offset;
28✔
319
                for (size_t d = 0; d < full_indices.size(); d++) {
73✔
320
                    linear = symbolic::add(linear, symbolic::mul(tile_info_.strides.at(d), full_indices.at(d)));
45✔
321
                }
45✔
322
                return {linear};
28✔
323
            } else {
28✔
324
                return data_flow::Subset(full_indices.begin(), full_indices.end());
×
325
            }
×
326
        };
28✔
327

328
        if (storage_type_.is_nv_shared()) {
17✔
329
            // ============================================================
330
            // GPU COOPERATIVE PATH
331
            // ============================================================
332
            auto ancestors = scope_analysis.ancestor_scopes(&loop_);
4✔
333

334
            // Collect cooperative GPU dimensions
335
            struct CoopDim {
4✔
336
                symbolic::Symbol indvar;
4✔
337
                symbolic::Integer block_size;
4✔
338
                gpu::GPUDimension dimension;
4✔
339
            };
4✔
340
            std::vector<CoopDim> coop_dims;
4✔
341

342
            for (auto* node : ancestors) {
20✔
343
                if (auto* ancestor_map = dynamic_cast<structured_control_flow::Map*>(node)) {
20✔
344
                    if (!gpu::is_gpu_schedule(ancestor_map->schedule_type())) {
8✔
345
                        continue;
×
346
                    }
×
347
                    bool appears_in_bases = false;
8✔
348
                    for (auto& base : tile_info_.bases) {
11✔
349
                        if (symbolic::uses(base, ancestor_map->indvar())) {
11✔
350
                            appears_in_bases = true;
3✔
351
                            break;
3✔
352
                        }
3✔
353
                    }
11✔
354
                    if (!appears_in_bases) {
8✔
355
                        coop_dims.push_back(
5✔
356
                            {ancestor_map->indvar(),
5✔
357
                             gpu::gpu_block_size(ancestor_map->schedule_type()),
5✔
358
                             gpu::gpu_dimension(ancestor_map->schedule_type())}
5✔
359
                        );
5✔
360
                    }
5✔
361
                }
8✔
362
            }
20✔
363

364
            // Compute total cooperative thread count
365
            symbolic::Expression total_coop_threads = symbolic::integer(1);
4✔
366
            for (auto& cd : coop_dims) {
5✔
367
                total_coop_threads = symbolic::mul(total_coop_threads, cd.block_size);
5✔
368
            }
5✔
369

370
            // Flatten cooperative thread index
371
            symbolic::Expression coop_flat = symbolic::integer(0);
4✔
372
            symbolic::Expression coop_stride = symbolic::integer(1);
4✔
373
            for (int i = coop_dims.size() - 1; i >= 0; i--) {
9✔
374
                coop_flat = symbolic::add(coop_flat, symbolic::mul(coop_dims[i].indvar, coop_stride));
5✔
375
                coop_stride = symbolic::mul(coop_stride, coop_dims[i].block_size);
5✔
376
            }
5✔
377

378
            // INIT: barrier → cooperative copy-in → barrier (if has_read)
379
            if (tile_info_.has_read) {
4✔
380
                // Barrier before init
381
                auto& barrier_block1 = builder.add_block_before(*parent, loop_, {}, loop_.debug_info());
1✔
382
                builder.add_library_node<data_flow::BarrierLocalNode>(barrier_block1, {});
1✔
383

384
                // Cooperative copy-in loop
385
                auto idx_name = builder.find_new_name("__daisy_ols_coop_init_" + this->container_);
1✔
386
                types::Scalar idx_type(types::PrimitiveType::UInt64);
1✔
387
                builder.add_container(idx_name, idx_type);
1✔
388
                auto idx_var = symbolic::symbol(idx_name);
1✔
389

390
                auto& init_loop = builder.add_map_before(
1✔
391
                    *parent,
1✔
392
                    loop_,
1✔
393
                    idx_var,
1✔
394
                    symbolic::Lt(idx_var, total_size),
1✔
395
                    coop_flat,
1✔
396
                    symbolic::add(idx_var, total_coop_threads),
1✔
397
                    structured_control_flow::ScheduleType_Sequential::create(),
1✔
398
                    {},
1✔
399
                    loop_.debug_info()
1✔
400
                );
1✔
401

402
                auto& init_block = builder.add_block(init_loop.root());
1✔
403
                auto& init_src = builder.add_access(init_block, this->container_);
1✔
404
                auto& init_dst = builder.add_access(init_block, local_name_);
1✔
405
                auto& init_tasklet = builder.add_tasklet(init_block, data_flow::TaskletCode::assign, "_out", {"_in"});
1✔
406

407
                // Decompose idx_var into per-dim indices
408
                std::vector<symbolic::Expression> init_indices;
1✔
409
                symbolic::Expression remainder = idx_var;
1✔
410
                for (size_t i = 0; i < dim_sizes.size(); i++) {
2✔
411
                    if (i < dim_sizes.size() - 1) {
1✔
412
                        symbolic::Expression divisor = symbolic::integer(1);
×
413
                        for (size_t j = i + 1; j < dim_sizes.size(); j++) {
×
414
                            divisor = symbolic::mul(divisor, dim_sizes[j]);
×
415
                        }
×
416
                        init_indices.push_back(symbolic::div(remainder, divisor));
×
417
                        remainder = symbolic::mod(remainder, divisor);
×
418
                    } else {
1✔
419
                        init_indices.push_back(remainder);
1✔
420
                    }
1✔
421
                }
1✔
422

423
                auto init_src_subset = build_original_subset(init_indices);
1✔
424
                builder.add_computational_memlet(init_block, init_src, init_tasklet, "_in", init_src_subset, type);
1✔
425
                builder.add_computational_memlet(init_block, init_tasklet, "_out", init_dst, {idx_var}, buffer_type);
1✔
426

427
                // Barrier after init
428
                auto& barrier_block2 = builder.add_block_before(*parent, loop_, {}, loop_.debug_info());
1✔
429
                builder.add_library_node<data_flow::BarrierLocalNode>(barrier_block2, {});
1✔
430
            }
1✔
431

432
            // WRITEBACK: barrier → cooperative copy-out → barrier
433
            {
4✔
434
                // Barrier before writeback
435
                auto& barrier_block3 = builder.add_block_after(*parent, loop_, {}, loop_.debug_info());
4✔
436
                builder.add_library_node<data_flow::BarrierLocalNode>(barrier_block3, {});
4✔
437

438
                // Cooperative writeback loop
439
                auto idx_name = builder.find_new_name("__daisy_ols_coop_wb_" + this->container_);
4✔
440
                types::Scalar idx_type(types::PrimitiveType::UInt64);
4✔
441
                builder.add_container(idx_name, idx_type);
4✔
442
                auto idx_var = symbolic::symbol(idx_name);
4✔
443

444
                auto& wb_loop = builder.add_map_after(
4✔
445
                    *parent,
4✔
446
                    loop_,
4✔
447
                    idx_var,
4✔
448
                    symbolic::Lt(idx_var, total_size),
4✔
449
                    coop_flat,
4✔
450
                    symbolic::add(idx_var, total_coop_threads),
4✔
451
                    structured_control_flow::ScheduleType_Sequential::create(),
4✔
452
                    {},
4✔
453
                    loop_.debug_info()
4✔
454
                );
4✔
455

456
                auto& wb_block = builder.add_block(wb_loop.root());
4✔
457
                auto& wb_src = builder.add_access(wb_block, local_name_);
4✔
458
                auto& wb_dst = builder.add_access(wb_block, this->container_);
4✔
459
                auto& wb_tasklet = builder.add_tasklet(wb_block, data_flow::TaskletCode::assign, "_out", {"_in"});
4✔
460

461
                // Decompose idx_var into per-dim indices
462
                std::vector<symbolic::Expression> wb_indices;
4✔
463
                symbolic::Expression remainder = idx_var;
4✔
464
                for (size_t i = 0; i < dim_sizes.size(); i++) {
8✔
465
                    if (i < dim_sizes.size() - 1) {
4✔
466
                        symbolic::Expression divisor = symbolic::integer(1);
×
467
                        for (size_t j = i + 1; j < dim_sizes.size(); j++) {
×
468
                            divisor = symbolic::mul(divisor, dim_sizes[j]);
×
469
                        }
×
470
                        wb_indices.push_back(symbolic::div(remainder, divisor));
×
471
                        remainder = symbolic::mod(remainder, divisor);
×
472
                    } else {
4✔
473
                        wb_indices.push_back(remainder);
4✔
474
                    }
4✔
475
                }
4✔
476

477
                auto wb_dst_subset = build_original_subset(wb_indices);
4✔
478
                builder.add_computational_memlet(wb_block, wb_src, wb_tasklet, "_in", {idx_var}, buffer_type);
4✔
479
                builder.add_computational_memlet(wb_block, wb_tasklet, "_out", wb_dst, wb_dst_subset, type);
4✔
480

481
                // Barrier after writeback
482
                auto& barrier_block4 = builder.add_block_after(*parent, loop_, {}, loop_.debug_info());
4✔
483
                builder.add_library_node<data_flow::BarrierLocalNode>(barrier_block4, {});
4✔
484
            }
4✔
485
        } else {
13✔
486
            // ============================================================
487
            // CPU SEQUENTIAL PATH
488
            // ============================================================
489
            if (tile_info_.has_read) {
13✔
490
                std::vector<symbolic::Symbol> init_indvars;
10✔
491
                structured_control_flow::Sequence* init_scope = parent;
10✔
492
                bool first_init_loop = true;
10✔
493

494
                for (size_t i = 0; i < varying_dims.size(); i++) {
19✔
495
                    size_t d = varying_dims[i];
9✔
496
                    auto indvar_name =
9✔
497
                        builder.find_new_name("__daisy_ols_init_" + this->container_ + "_d" + std::to_string(d));
9✔
498
                    types::Scalar indvar_type(types::PrimitiveType::UInt64);
9✔
499
                    builder.add_container(indvar_name, indvar_type);
9✔
500
                    auto indvar = symbolic::symbol(indvar_name);
9✔
501
                    init_indvars.push_back(indvar);
9✔
502

503
                    auto init = symbolic::integer(0);
9✔
504
                    auto condition = symbolic::Lt(indvar, dim_sizes[i]);
9✔
505
                    auto update = symbolic::add(indvar, symbolic::integer(1));
9✔
506

507
                    if (first_init_loop) {
9✔
508
                        auto& init_loop = builder.add_map_before(
6✔
509
                            *init_scope,
6✔
510
                            loop_,
6✔
511
                            indvar,
6✔
512
                            condition,
6✔
513
                            init,
6✔
514
                            update,
6✔
515
                            structured_control_flow::ScheduleType_Sequential::create(),
6✔
516
                            {},
6✔
517
                            loop_.debug_info()
6✔
518
                        );
6✔
519
                        init_scope = &init_loop.root();
6✔
520
                        first_init_loop = false;
6✔
521
                    } else {
6✔
522
                        auto& init_loop = builder.add_map(
3✔
523
                            *init_scope,
3✔
524
                            indvar,
3✔
525
                            condition,
3✔
526
                            init,
3✔
527
                            update,
3✔
528
                            structured_control_flow::ScheduleType_Sequential::create(),
3✔
529
                            {},
3✔
530
                            loop_.debug_info()
3✔
531
                        );
3✔
532
                        init_scope = &init_loop.root();
3✔
533
                    }
3✔
534
                }
9✔
535

536
                // Create init copy block
537
                auto& init_block = builder.add_block(*init_scope);
10✔
538
                auto& init_src = builder.add_access(init_block, this->container_);
10✔
539
                auto& init_dst = builder.add_access(init_block, local_name_);
10✔
540
                auto& init_tasklet = builder.add_tasklet(init_block, data_flow::TaskletCode::assign, "_out", {"_in"});
10✔
541

542
                std::vector<symbolic::Expression> init_exprs(init_indvars.begin(), init_indvars.end());
10✔
543
                auto init_src_subset = build_original_subset(init_exprs);
10✔
544
                data_flow::Subset init_dst_subset = {linearize(init_indvars)};
10✔
545

546
                builder.add_computational_memlet(init_block, init_src, init_tasklet, "_in", init_src_subset, type);
10✔
547
                builder
10✔
548
                    .add_computational_memlet(init_block, init_tasklet, "_out", init_dst, init_dst_subset, buffer_type);
10✔
549
            }
10✔
550

551
            // Writeback Maps
552
            {
13✔
553
                std::vector<symbolic::Symbol> wb_indvars;
13✔
554
                structured_control_flow::Sequence* wb_scope = parent;
13✔
555
                bool first_wb_loop = true;
13✔
556

557
                for (size_t i = 0; i < varying_dims.size(); i++) {
25✔
558
                    size_t d = varying_dims[i];
12✔
559
                    auto indvar_name =
12✔
560
                        builder.find_new_name("__daisy_ols_wb_" + this->container_ + "_d" + std::to_string(d));
12✔
561
                    types::Scalar indvar_type(types::PrimitiveType::UInt64);
12✔
562
                    builder.add_container(indvar_name, indvar_type);
12✔
563
                    auto indvar = symbolic::symbol(indvar_name);
12✔
564
                    wb_indvars.push_back(indvar);
12✔
565

566
                    auto init = symbolic::integer(0);
12✔
567
                    auto condition = symbolic::Lt(indvar, dim_sizes[i]);
12✔
568
                    auto update = symbolic::add(indvar, symbolic::integer(1));
12✔
569

570
                    if (first_wb_loop) {
12✔
571
                        auto& wb_loop = builder.add_map_after(
9✔
572
                            *wb_scope,
9✔
573
                            loop_,
9✔
574
                            indvar,
9✔
575
                            condition,
9✔
576
                            init,
9✔
577
                            update,
9✔
578
                            structured_control_flow::ScheduleType_Sequential::create(),
9✔
579
                            {},
9✔
580
                            loop_.debug_info()
9✔
581
                        );
9✔
582
                        wb_scope = &wb_loop.root();
9✔
583
                        first_wb_loop = false;
9✔
584
                    } else {
9✔
585
                        auto& wb_loop = builder.add_map(
3✔
586
                            *wb_scope,
3✔
587
                            indvar,
3✔
588
                            condition,
3✔
589
                            init,
3✔
590
                            update,
3✔
591
                            structured_control_flow::ScheduleType_Sequential::create(),
3✔
592
                            {},
3✔
593
                            loop_.debug_info()
3✔
594
                        );
3✔
595
                        wb_scope = &wb_loop.root();
3✔
596
                    }
3✔
597
                }
12✔
598

599
                // Create writeback copy block
600
                auto& wb_block = builder.add_block(*wb_scope);
13✔
601
                auto& wb_src = builder.add_access(wb_block, local_name_);
13✔
602
                auto& wb_dst = builder.add_access(wb_block, this->container_);
13✔
603
                auto& wb_tasklet = builder.add_tasklet(wb_block, data_flow::TaskletCode::assign, "_out", {"_in"});
13✔
604

605
                std::vector<symbolic::Expression> wb_exprs(wb_indvars.begin(), wb_indvars.end());
13✔
606
                data_flow::Subset wb_src_subset = {linearize(wb_indvars)};
13✔
607
                auto wb_dst_subset = build_original_subset(wb_exprs);
13✔
608

609
                builder.add_computational_memlet(wb_block, wb_src, wb_tasklet, "_in", wb_src_subset, buffer_type);
13✔
610
                builder.add_computational_memlet(wb_block, wb_tasklet, "_out", wb_dst, wb_dst_subset, type);
13✔
611
            }
13✔
612
        }
13✔
613

614
        // ==================================================================
615
        // Update accesses in the main loop to use the local buffer
616
        // ==================================================================
617
        auto& mla = analysis_manager.get<analysis::MemoryLayoutAnalysis>();
17✔
618

619
        // Recursive helper to traverse all blocks in the loop body
620
        std::function<void(structured_control_flow::ControlFlowNode&)> rewrite_accesses;
17✔
621
        rewrite_accesses = [&](structured_control_flow::ControlFlowNode& node) {
51✔
622
            if (auto* block = dynamic_cast<structured_control_flow::Block*>(&node)) {
51✔
623
                auto& dfg = block->dataflow();
20✔
624
                for (auto* access : dfg.data_nodes()) {
54✔
625
                    if (access->data() != this->container_) continue;
54✔
626
                    bool all_rewritten = true;
30✔
627
                    // Rewrite outgoing memlets (reads from this access node)
628
                    for (auto& memlet : dfg.out_edges(*access)) {
30✔
629
                        if (group_memlets_.count(&memlet) == 0) {
12✔
NEW
630
                            all_rewritten = false;
×
NEW
631
                            continue;
×
NEW
632
                        }
×
633
                        auto* acc = mla.access(memlet);
12✔
634
                        if (acc && acc->subset.size() == tile_info_.dimensions.size()) {
12✔
635
                            std::vector<symbolic::Expression> local_indices;
12✔
636
                            for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
32✔
637
                                if (!symbolic::eq(tile_info_.dimensions.at(d), symbolic::integer(1))) {
20✔
638
                                    local_indices.push_back(symbolic::sub(acc->subset.at(d), tile_info_.bases.at(d)));
10✔
639
                                }
10✔
640
                            }
20✔
641
                            symbolic::Expression linear_idx = linearize_exprs(local_indices);
12✔
642
                            memlet.set_subset({linear_idx});
12✔
643
                            memlet.set_base_type(buffer_type);
12✔
644
                        }
12✔
645
                    }
12✔
646
                    // Rewrite incoming memlets (writes to this access node)
647
                    for (auto& memlet : dfg.in_edges(*access)) {
30✔
648
                        if (group_memlets_.count(&memlet) == 0) {
18✔
NEW
649
                            all_rewritten = false;
×
NEW
650
                            continue;
×
NEW
651
                        }
×
652
                        auto* acc = mla.access(memlet);
18✔
653
                        if (acc && acc->subset.size() == tile_info_.dimensions.size()) {
18✔
654
                            std::vector<symbolic::Expression> local_indices;
18✔
655
                            for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
47✔
656
                                if (!symbolic::eq(tile_info_.dimensions.at(d), symbolic::integer(1))) {
29✔
657
                                    local_indices.push_back(symbolic::sub(acc->subset.at(d), tile_info_.bases.at(d)));
16✔
658
                                }
16✔
659
                            }
29✔
660
                            symbolic::Expression linear_idx = linearize_exprs(local_indices);
18✔
661
                            memlet.set_subset({linear_idx});
18✔
662
                            memlet.set_base_type(buffer_type);
18✔
663
                        }
18✔
664
                    }
18✔
665
                    // Rename the access node only if all its memlets belong to our group
666
                    if (all_rewritten) {
30✔
667
                        access->data(local_name_);
30✔
668
                    }
30✔
669
                }
30✔
670
            } else if (auto* seq = dynamic_cast<structured_control_flow::Sequence*>(&node)) {
31✔
671
                for (size_t i = 0; i < seq->size(); i++) {
51✔
672
                    rewrite_accesses(seq->at(i).first);
27✔
673
                }
27✔
674
            } else if (auto* loop = dynamic_cast<structured_control_flow::StructuredLoop*>(&node)) {
24✔
675
                rewrite_accesses(loop->root());
7✔
676
            } else if (auto* if_else = dynamic_cast<structured_control_flow::IfElse*>(&node)) {
7✔
677
                for (size_t i = 0; i < if_else->size(); i++) {
×
678
                    rewrite_accesses(if_else->at(i).first);
×
679
                }
×
680
            }
×
681
        };
51✔
682
        rewrite_accesses(loop_.root());
17✔
683
    }
17✔
684

685
    // Cleanup
686
    analysis_manager.invalidate_all();
18✔
687

688
    passes::SequenceFusion sf_pass;
18✔
689
    passes::DeadCFGElimination dce_pass;
18✔
690
    bool applies = false;
18✔
691
    do {
18✔
692
        applies = false;
18✔
693
        applies |= dce_pass.run(builder, analysis_manager);
18✔
694
        applies |= sf_pass.run(builder, analysis_manager);
18✔
695
    } while (applies);
18✔
696
};
18✔
697

698
void OutLocalStorage::to_json(nlohmann::json& j) const {
3✔
699
    std::string loop_type;
3✔
700
    if (dynamic_cast<structured_control_flow::For*>(&loop_)) {
3✔
701
        loop_type = "for";
2✔
702
    } else if (dynamic_cast<structured_control_flow::Map*>(&loop_)) {
2✔
703
        loop_type = "map";
1✔
704
    } else {
1✔
705
        throw std::runtime_error("Unsupported loop type for serialization of loop: " + loop_.indvar()->get_name());
×
706
    }
×
707
    j["subgraph"] = {
3✔
708
        {"0", {{"element_id", this->loop_.element_id()}, {"type", loop_type}}},
3✔
709
        {"1", {{"element_id", this->access_node_.element_id()}, {"type", "access_node"}}}
3✔
710
    };
3✔
711
    j["transformation_type"] = this->name();
3✔
712
};
3✔
713

714
OutLocalStorage OutLocalStorage::from_json(builder::StructuredSDFGBuilder& builder, const nlohmann::json& desc) {
1✔
715
    auto loop_id = desc["subgraph"]["0"]["element_id"].get<size_t>();
1✔
716
    auto element = builder.find_element_by_id(loop_id);
1✔
717
    if (!element) {
1✔
718
        throw InvalidTransformationDescriptionException("Element with ID " + std::to_string(loop_id) + " not found.");
×
719
    }
×
720
    auto loop = dynamic_cast<structured_control_flow::StructuredLoop*>(element);
1✔
721

722
    auto access_node = dynamic_cast<
1✔
723
        data_flow::AccessNode*>(builder.find_element_by_id(desc.at("subgraph").at("1").at("element_id").get<size_t>()));
1✔
724
    if (!access_node) {
1✔
725
        throw InvalidTransformationDescriptionException(
×
726
            "Access node with ID " + std::to_string(desc.at("subgraph").at("1").at("element_id").get<size_t>()) +
×
727
            " not found."
×
728
        );
×
729
    }
×
730

731
    return OutLocalStorage(*loop, *access_node);
1✔
732
};
1✔
733

734
} // namespace transformations
735
} // namespace sdfg
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc