• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

daisytuner / docc / 25321382601

04 May 2026 01:20PM UTC coverage: 64.84% (-0.001%) from 64.841%
25321382601

Pull #701

github

web-flow
Merge e3ea6eb8c into af5fe61ef
Pull Request #701: smart memlet selection in packing transformations

53 of 76 new or added lines in 2 files covered. (69.74%)

2 existing lines in 1 file now uncovered.

31611 of 48752 relevant lines covered (64.84%)

2338.21 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

81.21
/opt/src/transformations/in_local_storage.cpp
1
#include "sdfg/transformations/in_local_storage.h"
2

3
#include <cstddef>
4
#include <functional>
5
#include <string>
6

7
#include "sdfg/analysis/memory_layout_analysis.h"
8
#include "sdfg/analysis/scope_analysis.h"
9
#include "sdfg/analysis/users.h"
10
#include "sdfg/builder/structured_sdfg_builder.h"
11
#include "sdfg/data_flow/access_node.h"
12
#include "sdfg/data_flow/library_nodes/barrier_local_node.h"
13
#include "sdfg/data_flow/memlet.h"
14
#include "sdfg/passes/structured_control_flow/dead_cfg_elimination.h"
15
#include "sdfg/passes/structured_control_flow/sequence_fusion.h"
16
#include "sdfg/structured_control_flow/if_else.h"
17
#include "sdfg/structured_control_flow/sequence.h"
18
#include "sdfg/structured_control_flow/structured_loop.h"
19
#include "sdfg/symbolic/symbolic.h"
20
#include "sdfg/targets/gpu/gpu_schedule_type.h"
21
#include "sdfg/types/array.h"
22
#include "sdfg/types/pointer.h"
23
#include "sdfg/types/scalar.h"
24

25
namespace sdfg {
26
namespace transformations {
27

28
InLocalStorage::InLocalStorage(
29
    structured_control_flow::StructuredLoop& loop,
30
    const data_flow::AccessNode& access_node,
31
    const types::StorageType& storage_type
32
)
33
    : loop_(loop), access_node_(access_node), container_(access_node.data()), storage_type_(storage_type) {}
31✔
34

35
std::string InLocalStorage::name() const { return "InLocalStorage"; }
7✔
36

37
bool InLocalStorage::can_be_applied(builder::StructuredSDFGBuilder& builder, analysis::AnalysisManager& analysis_manager) {
31✔
38
    auto& sdfg = builder.subject();
31✔
39
    auto& body = this->loop_.root();
31✔
40

41
    tile_info_ = TileInfo{};
31✔
42

43
    // Criterion: Container must exist
44
    if (!sdfg.exists(this->container_)) {
31✔
45
        return false;
×
46
    }
×
47

48
    auto& type = sdfg.type(this->container_);
31✔
49

50
    // Criterion: Container must be Array or Pointer (not Scalar)
51
    if (type.type_id() != types::TypeID::Pointer && type.type_id() != types::TypeID::Array) {
31✔
52
        return false;
1✔
53
    }
1✔
54

55
    // Criterion: Container must be used in the loop body
56
    auto& users = analysis_manager.get<analysis::Users>();
30✔
57
    analysis::UsersView body_users(users, body);
30✔
58
    if (body_users.uses(this->container_).empty()) {
30✔
59
        return false;
2✔
60
    }
2✔
61

62
    // Criterion: Container must be read-only within the loop (no writes)
63
    if (!body_users.writes(this->container_).empty()) {
28✔
64
        return false;
1✔
65
    }
1✔
66

67
    // Use MemoryLayoutAnalysis tile group API
68
    auto& mla = analysis_manager.get<analysis::MemoryLayoutAnalysis>();
27✔
69

70
    // Find a representative memlet from the access node to identify its group.
71
    // An access node may have multiple out-edges belonging to different tile
72
    // groups (e.g., A[i,k] and A[j,k]).  We iterate all out-edges and select
73
    // the first one whose tile group has provably integer extents (i.e., can
74
    // actually be applied).  This avoids picking a group with symbolic extents
75
    // when another group on the same node would succeed.
76
    // For GPU shared memory, extents may be symbolic until GPU block size
77
    // substitution, so we accept the first valid group unconditionally.
78
    const analysis::MemoryTileGroup* group = nullptr;
27✔
79
    auto& dfg = access_node_.get_parent();
27✔
80
    for (auto& memlet : dfg.out_edges(access_node_)) {
27✔
81
        auto* candidate = mla.tile_group_for(loop_, memlet);
27✔
82
        if (!candidate) continue;
27✔
83

84
        auto extents = candidate->tile.extents_approx();
27✔
85
        if (extents.empty()) continue;
27✔
86

87
        if (storage_type_.is_nv_shared()) {
27✔
88
            // GPU path: accept first valid group (substitution happens later)
89
            group = candidate;
5✔
90
            break;
5✔
91
        }
5✔
92

93
        // CPU path: require provably integer extents
94
        bool all_integer = true;
22✔
95
        for (auto& ext : extents) {
37✔
96
            if (!SymEngine::is_a<SymEngine::Integer>(*ext)) {
37✔
NEW
97
                all_integer = false;
×
NEW
98
                break;
×
NEW
99
            }
×
100
        }
37✔
101
        if (all_integer) {
22✔
102
            group = candidate;
22✔
103
            break;
22✔
104
        }
22✔
105
    }
22✔
106
    if (!group) {
27✔
107
        return false;
×
108
    }
×
109

110
    auto& tile = group->tile;
27✔
111

112
    // Store group memlets for use in apply()
113
    group_memlets_.clear();
27✔
114
    group_memlets_.insert(group->memlets.begin(), group->memlets.end());
27✔
115

116
    // Get overapproximated extents (integer upper bounds)
117
    auto extents = tile.extents_approx();
27✔
118
    if (extents.empty()) {
27✔
119
        return false;
×
120
    }
×
121

122
    // Store tile info (before substitution, bases/strides stay symbolic)
123
    tile_info_.dimensions = extents;
27✔
124
    tile_info_.bases = tile.min_subset;
27✔
125
    tile_info_.strides = std::vector<symbolic::Expression>(tile.layout.strides().begin(), tile.layout.strides().end());
27✔
126
    tile_info_.offset = tile.layout.offset();
27✔
127

128
    // GPU shared memory: resolve symbolic extents using GPU block sizes and
129
    // require at least one cooperative dimension
130
    if (storage_type_.is_nv_shared()) {
27✔
131
        auto& scope_analysis = analysis_manager.get<analysis::ScopeAnalysis>();
5✔
132
        auto ancestors = scope_analysis.ancestor_scopes(&loop_);
5✔
133

134
        // Build substitution map: symbolic GPU map bounds → integer block sizes
135
        // E.g., Map condition "i < N" with block_size=32 → N=32
136
        for (auto* node : ancestors) {
23✔
137
            if (auto* ancestor_map = dynamic_cast<structured_control_flow::Map*>(node)) {
23✔
138
                if (!gpu::is_gpu_schedule(ancestor_map->schedule_type())) {
9✔
139
                    continue;
×
140
                }
×
141
                auto block_size = gpu::gpu_block_size(ancestor_map->schedule_type());
9✔
142
                // Extract symbolic bound from condition: Lt(indvar, BOUND)
143
                auto condition = ancestor_map->condition();
9✔
144
                if (SymEngine::is_a<SymEngine::StrictLessThan>(*condition)) {
9✔
145
                    auto stl = SymEngine::rcp_static_cast<const SymEngine::StrictLessThan>(condition);
9✔
146
                    auto rhs = stl->get_args()[1];
9✔
147
                    auto iter_count = symbolic::sub(rhs, ancestor_map->init());
9✔
148
                    if (!SymEngine::is_a<SymEngine::Integer>(*iter_count)) {
9✔
149
                        // Symbolic bound — substitute with block size in extents and bases
150
                        for (auto& ext : tile_info_.dimensions) {
16✔
151
                            ext = symbolic::simplify(symbolic::subs(ext, iter_count, block_size));
16✔
152
                        }
16✔
153
                        for (auto& base : tile_info_.bases) {
16✔
154
                            base = symbolic::simplify(symbolic::subs(base, iter_count, block_size));
16✔
155
                        }
16✔
156
                    }
9✔
157
                }
9✔
158
            }
9✔
159
        }
23✔
160

161
        // Also resolve the loop's own bound if symbolic and matches a block size
162
        // E.g., For k = 0..K where K is a parameter — check if K can be resolved
163
        // from any GPU ancestor map
164
        // (Already handled above: if K appears as a GPU map bound, it's substituted)
165

166
        // Criterion: All extents must now be provably integer
167
        for (auto& ext : tile_info_.dimensions) {
9✔
168
            if (!SymEngine::is_a<SymEngine::Integer>(*ext)) {
9✔
169
                return false;
2✔
170
            }
2✔
171
        }
9✔
172

173
        // Criterion: At least one cooperative dimension
174
        bool has_cooperative_dim = false;
3✔
175
        for (auto* node : ancestors) {
6✔
176
            if (auto* ancestor_map = dynamic_cast<structured_control_flow::Map*>(node)) {
6✔
177
                if (!gpu::is_gpu_schedule(ancestor_map->schedule_type())) {
3✔
178
                    continue;
×
179
                }
×
180
                // A GPU dim is cooperative if its indvar does NOT appear in any tile base
181
                bool appears_in_bases = false;
3✔
182
                for (auto& base : tile_info_.bases) {
5✔
183
                    if (symbolic::uses(base, ancestor_map->indvar())) {
5✔
184
                        appears_in_bases = true;
×
185
                        break;
×
186
                    }
×
187
                }
5✔
188
                if (!appears_in_bases) {
3✔
189
                    has_cooperative_dim = true;
3✔
190
                    break;
3✔
191
                }
3✔
192
            }
3✔
193
        }
6✔
194
        if (!has_cooperative_dim) {
3✔
195
            return false;
×
196
        }
×
197
    } else {
22✔
198
        // CPU path: All extents must be provably integer
199
        for (auto& ext : tile_info_.dimensions) {
37✔
200
            if (!SymEngine::is_a<SymEngine::Integer>(*ext)) {
37✔
201
                return false;
×
202
            }
×
203
        }
37✔
204
    }
22✔
205

206
    return true;
25✔
207
}
27✔
208

209
void InLocalStorage::apply(builder::StructuredSDFGBuilder& builder, analysis::AnalysisManager& analysis_manager) {
17✔
210
    auto& sdfg = builder.subject();
17✔
211
    auto& scope_analysis = analysis_manager.get<analysis::ScopeAnalysis>();
17✔
212

213
    auto parent_node = scope_analysis.parent_scope(&loop_);
17✔
214
    auto parent = dynamic_cast<structured_control_flow::Sequence*>(parent_node);
17✔
215
    if (!parent) {
17✔
216
        throw InvalidSDFGException("InLocalStorage: Parent of loop must be a Sequence!");
×
217
    }
×
218

219
    // Get type information
220
    auto& type = sdfg.type(this->container_);
17✔
221
    types::Scalar scalar_type(type.primitive_type());
17✔
222

223
    // Create local buffer name
224
    local_name_ = builder.find_new_name("__daisy_in_local_storage_" + this->container_);
17✔
225

226
    // Collect varying dimensions (extent > 1) and compute buffer layout
227
    std::vector<size_t> varying_dims;
17✔
228
    std::vector<symbolic::Expression> dim_sizes;
17✔
229
    for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
47✔
230
        auto& dim_size = tile_info_.dimensions.at(d);
30✔
231
        if (!symbolic::eq(dim_size, symbolic::integer(1))) {
30✔
232
            varying_dims.push_back(d);
24✔
233
            dim_sizes.push_back(dim_size);
24✔
234
        }
24✔
235
    }
30✔
236

237
    // Compute total buffer size
238
    symbolic::Expression total_size = symbolic::integer(1);
17✔
239
    for (auto& ds : dim_sizes) {
24✔
240
        total_size = symbolic::mul(total_size, ds);
24✔
241
    }
24✔
242

243
    // Helper: build linearized local index from per-dimension symbolic expressions
244
    auto linearize_exprs = [&](const std::vector<symbolic::Expression>& indices) -> symbolic::Expression {
35✔
245
        symbolic::Expression linear_idx = symbolic::integer(0);
35✔
246
        symbolic::Expression stride = symbolic::integer(1);
35✔
247
        for (int i = indices.size() - 1; i >= 0; i--) {
88✔
248
            linear_idx = symbolic::add(linear_idx, symbolic::mul(indices[i], stride));
53✔
249
            stride = symbolic::mul(stride, dim_sizes[i]);
53✔
250
        }
53✔
251
        return linear_idx;
35✔
252
    };
35✔
253

254
    // Helper: build linearized local index from per-dimension indvars (symbols)
255
    auto linearize = [&](const std::vector<symbolic::Symbol>& indvars) -> symbolic::Expression {
17✔
256
        std::vector<symbolic::Expression> exprs(indvars.begin(), indvars.end());
14✔
257
        return linearize_exprs(exprs);
14✔
258
    };
14✔
259

260
    // Helper: build source subset (base[d] + copy_indvar[d]) for original container
261
    bool is_pointer = (type.type_id() == types::TypeID::Pointer);
17✔
262
    auto build_original_subset = [&](const std::vector<symbolic::Expression>& copy_indices) -> data_flow::Subset {
17✔
263
        std::vector<symbolic::Expression> full_indices;
17✔
264
        size_t var_idx = 0;
17✔
265
        for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
47✔
266
            if (!symbolic::eq(tile_info_.dimensions.at(d), symbolic::integer(1))) {
30✔
267
                full_indices.push_back(symbolic::add(tile_info_.bases.at(d), copy_indices.at(var_idx++)));
24✔
268
            } else {
24✔
269
                full_indices.push_back(tile_info_.bases.at(d));
6✔
270
            }
6✔
271
        }
30✔
272

273
        if (is_pointer) {
17✔
274
            symbolic::Expression linear = tile_info_.offset;
17✔
275
            for (size_t d = 0; d < full_indices.size(); d++) {
47✔
276
                linear = symbolic::add(linear, symbolic::mul(tile_info_.strides.at(d), full_indices.at(d)));
30✔
277
            }
30✔
278
            return {linear};
17✔
279
        } else {
17✔
280
            return data_flow::Subset(full_indices.begin(), full_indices.end());
×
281
        }
×
282
    };
17✔
283

284
    // ==================================================================
285
    // Branch: GPU cooperative path vs CPU sequential path
286
    // ==================================================================
287
    if (storage_type_.is_nv_shared()) {
17✔
288
        // ============================================================
289
        // GPU COOPERATIVE PATH
290
        // ============================================================
291
        auto ancestors = scope_analysis.ancestor_scopes(&loop_);
3✔
292

293
        // Collect cooperative GPU dimensions (indvar not in tile bases)
294
        struct CoopDim {
3✔
295
            symbolic::Symbol indvar;
3✔
296
            symbolic::Integer block_size;
3✔
297
            gpu::GPUDimension dimension;
3✔
298
        };
3✔
299
        std::vector<CoopDim> coop_dims;
3✔
300

301
        for (auto* node : ancestors) {
15✔
302
            if (auto* ancestor_map = dynamic_cast<structured_control_flow::Map*>(node)) {
15✔
303
                if (!gpu::is_gpu_schedule(ancestor_map->schedule_type())) {
6✔
304
                    continue;
×
305
                }
×
306
                bool appears_in_bases = false;
6✔
307
                for (auto& base : tile_info_.bases) {
8✔
308
                    if (symbolic::uses(base, ancestor_map->indvar())) {
8✔
309
                        appears_in_bases = true;
2✔
310
                        break;
2✔
311
                    }
2✔
312
                }
8✔
313
                if (!appears_in_bases) {
6✔
314
                    coop_dims.push_back(
4✔
315
                        {ancestor_map->indvar(),
4✔
316
                         gpu::gpu_block_size(ancestor_map->schedule_type()),
4✔
317
                         gpu::gpu_dimension(ancestor_map->schedule_type())}
4✔
318
                    );
4✔
319
                }
4✔
320
            }
6✔
321
        }
15✔
322

323
        // Compute total cooperative thread count
324
        symbolic::Expression total_coop_threads = symbolic::integer(1);
3✔
325
        for (auto& cd : coop_dims) {
4✔
326
            total_coop_threads = symbolic::mul(total_coop_threads, cd.block_size);
4✔
327
        }
4✔
328

329
        // Create the local buffer with NV_Shared storage
330
        types::Array buffer_type(storage_type_, 0, {}, scalar_type, total_size);
3✔
331
        builder.add_container(local_name_, buffer_type);
3✔
332

333
        // Emit: barrier → guarded cooperative copy → barrier → loop
334
        // 1. Barrier before copy
335
        auto& barrier_block1 = builder.add_block_before(*parent, loop_, {}, loop_.debug_info());
3✔
336
        builder.add_library_node<data_flow::BarrierLocalNode>(barrier_block1, {});
3✔
337

338
        // 2. Cooperative copy with if_else guard
339
        // Flatten cooperative thread index: coop_flat = sum(indvar[i] * product(block_size[j] for j>i))
340
        symbolic::Expression coop_flat = symbolic::integer(0);
3✔
341
        symbolic::Expression coop_stride = symbolic::integer(1);
3✔
342
        for (int i = coop_dims.size() - 1; i >= 0; i--) {
7✔
343
            coop_flat = symbolic::add(coop_flat, symbolic::mul(coop_dims[i].indvar, coop_stride));
4✔
344
            coop_stride = symbolic::mul(coop_stride, coop_dims[i].block_size);
4✔
345
        }
4✔
346

347
        // Each thread loads elements strided by total_coop_threads
348
        // Thread t loads elements: t, t + total_threads, t + 2*total_threads, ...
349
        // We emit a loop: for (idx = coop_flat; idx < total_size; idx += total_coop_threads)
350
        auto idx_name = builder.find_new_name("__daisy_ils_coop_" + this->container_);
3✔
351
        types::Scalar idx_type(types::PrimitiveType::UInt64);
3✔
352
        builder.add_container(idx_name, idx_type);
3✔
353
        auto idx_var = symbolic::symbol(idx_name);
3✔
354

355
        auto copy_init = coop_flat;
3✔
356
        auto copy_condition = symbolic::Lt(idx_var, total_size);
3✔
357
        auto copy_update = symbolic::add(idx_var, total_coop_threads);
3✔
358

359
        auto& copy_loop = builder.add_map_before(
3✔
360
            *parent,
3✔
361
            loop_,
3✔
362
            idx_var,
3✔
363
            copy_condition,
3✔
364
            copy_init,
3✔
365
            copy_update,
3✔
366
            structured_control_flow::ScheduleType_Sequential::create(),
3✔
367
            {},
3✔
368
            loop_.debug_info()
3✔
369
        );
3✔
370

371
        // Decompose flat idx back into per-dimension indices for source subset
372
        // idx maps to varying_dims in row-major order
373
        auto& copy_scope = copy_loop.root();
3✔
374
        auto& copy_block = builder.add_block(copy_scope);
3✔
375
        auto& copy_src = builder.add_access(copy_block, this->container_);
3✔
376
        auto& copy_dst = builder.add_access(copy_block, local_name_);
3✔
377
        auto& copy_tasklet = builder.add_tasklet(copy_block, data_flow::TaskletCode::assign, "_out", {"_in"});
3✔
378

379
        // Decompose idx_var into per-dim indices
380
        std::vector<symbolic::Expression> copy_indices;
3✔
381
        symbolic::Expression remainder = idx_var;
3✔
382
        for (size_t i = 0; i < dim_sizes.size(); i++) {
6✔
383
            if (i < dim_sizes.size() - 1) {
3✔
384
                // integer division: idx / (product of remaining dims)
385
                symbolic::Expression divisor = symbolic::integer(1);
×
386
                for (size_t j = i + 1; j < dim_sizes.size(); j++) {
×
387
                    divisor = symbolic::mul(divisor, dim_sizes[j]);
×
388
                }
×
389
                auto quotient = symbolic::div(remainder, divisor);
×
390
                copy_indices.push_back(quotient);
×
391
                remainder = symbolic::mod(remainder, divisor);
×
392
            } else {
3✔
393
                copy_indices.push_back(remainder);
3✔
394
            }
3✔
395
        }
3✔
396

397
        auto copy_src_subset = build_original_subset(copy_indices);
3✔
398
        data_flow::Subset copy_dst_subset = {idx_var};
3✔
399

400
        builder.add_computational_memlet(copy_block, copy_src, copy_tasklet, "_in", copy_src_subset, type);
3✔
401
        builder.add_computational_memlet(copy_block, copy_tasklet, "_out", copy_dst, copy_dst_subset, buffer_type);
3✔
402

403
        // 3. Barrier after copy
404
        auto& barrier_block2 = builder.add_block_before(*parent, loop_, {}, loop_.debug_info());
3✔
405
        builder.add_library_node<data_flow::BarrierLocalNode>(barrier_block2, {});
3✔
406
    } else {
14✔
407
        // ============================================================
408
        // CPU SEQUENTIAL PATH
409
        // ============================================================
410
        // Create the local buffer with specified storage type
411
        types::Array buffer_type(storage_type_, 0, {}, scalar_type, total_size);
14✔
412
        builder.add_container(local_name_, buffer_type);
14✔
413

414
        std::vector<symbolic::Symbol> copy_indvars;
14✔
415
        structured_control_flow::Sequence* copy_scope = parent;
14✔
416
        bool first_copy_loop = true;
14✔
417

418
        for (size_t i = 0; i < varying_dims.size(); i++) {
35✔
419
            size_t d = varying_dims[i];
21✔
420
            auto indvar_name = builder.find_new_name("__daisy_ils_" + this->container_ + "_d" + std::to_string(d));
21✔
421
            types::Scalar indvar_type(types::PrimitiveType::UInt64);
21✔
422
            builder.add_container(indvar_name, indvar_type);
21✔
423
            auto indvar = symbolic::symbol(indvar_name);
21✔
424
            copy_indvars.push_back(indvar);
21✔
425

426
            auto init = symbolic::integer(0);
21✔
427
            auto condition = symbolic::Lt(indvar, dim_sizes[i]);
21✔
428
            auto update = symbolic::add(indvar, symbolic::integer(1));
21✔
429

430
            if (first_copy_loop) {
21✔
431
                auto& copy_loop = builder.add_map_before(
14✔
432
                    *copy_scope,
14✔
433
                    loop_,
14✔
434
                    indvar,
14✔
435
                    condition,
14✔
436
                    init,
14✔
437
                    update,
14✔
438
                    structured_control_flow::ScheduleType_Sequential::create(),
14✔
439
                    {},
14✔
440
                    loop_.debug_info()
14✔
441
                );
14✔
442
                copy_scope = &copy_loop.root();
14✔
443
                first_copy_loop = false;
14✔
444
            } else {
14✔
445
                auto& copy_loop = builder.add_map(
7✔
446
                    *copy_scope,
7✔
447
                    indvar,
7✔
448
                    condition,
7✔
449
                    init,
7✔
450
                    update,
7✔
451
                    structured_control_flow::ScheduleType_Sequential::create(),
7✔
452
                    {},
7✔
453
                    loop_.debug_info()
7✔
454
                );
7✔
455
                copy_scope = &copy_loop.root();
7✔
456
            }
7✔
457
        }
21✔
458

459
        // Create copy block
460
        auto& copy_block = builder.add_block(*copy_scope);
14✔
461
        auto& copy_src = builder.add_access(copy_block, this->container_);
14✔
462
        auto& copy_dst = builder.add_access(copy_block, local_name_);
14✔
463
        auto& copy_tasklet = builder.add_tasklet(copy_block, data_flow::TaskletCode::assign, "_out", {"_in"});
14✔
464

465
        std::vector<symbolic::Expression> copy_exprs(copy_indvars.begin(), copy_indvars.end());
14✔
466
        auto copy_src_subset = build_original_subset(copy_exprs);
14✔
467
        data_flow::Subset copy_dst_subset = {linearize(copy_indvars)};
14✔
468

469
        builder.add_computational_memlet(copy_block, copy_src, copy_tasklet, "_in", copy_src_subset, type);
14✔
470
        types::Array buffer_type_ref(storage_type_, 0, {}, scalar_type, total_size);
14✔
471
        builder.add_computational_memlet(copy_block, copy_tasklet, "_out", copy_dst, copy_dst_subset, buffer_type_ref);
14✔
472
    }
14✔
473

474
    // ==================================================================
475
    // Update accesses in the main loop to use the local buffer
476
    // ==================================================================
477
    types::Array buffer_type(storage_type_, 0, {}, scalar_type, total_size);
17✔
478
    auto& mla = analysis_manager.get<analysis::MemoryLayoutAnalysis>();
17✔
479

480
    // Recursive helper to traverse all blocks in the loop body
481
    std::function<void(structured_control_flow::ControlFlowNode&)> rewrite_accesses;
17✔
482
    rewrite_accesses = [&](structured_control_flow::ControlFlowNode& node) {
69✔
483
        if (auto* block = dynamic_cast<structured_control_flow::Block*>(&node)) {
69✔
484
            auto& dfg = block->dataflow();
26✔
485

486
            // Collect access nodes to process (avoid iterator invalidation)
487
            std::vector<data_flow::AccessNode*> access_nodes;
26✔
488
            for (auto* access_node : dfg.data_nodes()) {
74✔
489
                if (access_node->data() == this->container_) {
74✔
490
                    access_nodes.push_back(access_node);
23✔
491
                }
23✔
492
            }
74✔
493

494
            for (auto* access : access_nodes) {
26✔
495
                // Classify memlets: group vs non-group
496
                struct MemletRewrite {
23✔
497
                    data_flow::Memlet* memlet;
23✔
498
                    data_flow::Subset local_subset;
23✔
499
                    bool is_outgoing;
23✔
500
                };
23✔
501
                std::vector<MemletRewrite> group_rewrites;
23✔
502
                bool all_in_group = true;
23✔
503

504
                for (auto& memlet : dfg.out_edges(*access)) {
23✔
505
                    if (group_memlets_.count(&memlet) == 0) {
23✔
506
                        all_in_group = false;
2✔
507
                        continue;
2✔
508
                    }
2✔
509
                    auto* acc = mla.access(memlet);
21✔
510
                    if (acc && acc->subset.size() == tile_info_.dimensions.size()) {
21✔
511
                        std::vector<symbolic::Expression> local_indices;
21✔
512
                        for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
59✔
513
                            if (!symbolic::eq(tile_info_.dimensions.at(d), symbolic::integer(1))) {
38✔
514
                                local_indices.push_back(symbolic::sub(acc->subset.at(d), tile_info_.bases.at(d)));
32✔
515
                            }
32✔
516
                        }
38✔
517
                        symbolic::Expression linear_idx = linearize_exprs(local_indices);
21✔
518
                        group_rewrites.push_back({&memlet, {linear_idx}, true});
21✔
519
                    }
21✔
520
                }
21✔
521
                for (auto& memlet : dfg.in_edges(*access)) {
23✔
UNCOV
522
                    if (group_memlets_.count(&memlet) == 0) {
×
NEW
523
                        all_in_group = false;
×
524
                        continue;
×
525
                    }
×
526
                    auto* acc = mla.access(memlet);
×
527
                    if (acc && acc->subset.size() == tile_info_.dimensions.size()) {
×
528
                        std::vector<symbolic::Expression> local_indices;
×
529
                        for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
×
530
                            if (!symbolic::eq(tile_info_.dimensions.at(d), symbolic::integer(1))) {
×
531
                                local_indices.push_back(symbolic::sub(acc->subset.at(d), tile_info_.bases.at(d)));
×
532
                            }
×
533
                        }
×
534
                        symbolic::Expression linear_idx = linearize_exprs(local_indices);
×
NEW
535
                        group_rewrites.push_back({&memlet, {linear_idx}, false});
×
536
                    }
×
537
                }
×
538

539
                if (group_rewrites.empty()) continue;
23✔
540

541
                if (all_in_group) {
21✔
542
                    // Simple case: all memlets in group → rewrite in-place and rename
543
                    for (auto& rw : group_rewrites) {
21✔
544
                        rw.memlet->set_subset(rw.local_subset);
21✔
545
                        rw.memlet->set_base_type(buffer_type);
21✔
546
                    }
21✔
547
                    access->data(local_name_);
21✔
548
                } else {
21✔
549
                    // Mixed case: split — create new local access node, redirect group memlets
NEW
550
                    auto& local_access = builder.add_access(*block, local_name_);
×
NEW
551
                    for (auto& rw : group_rewrites) {
×
NEW
552
                        if (rw.is_outgoing) {
×
553
                            // outgoing: access→tasklet  →  local_access→tasklet
NEW
554
                            auto& dst_node = rw.memlet->dst();
×
NEW
555
                            auto dst_conn = rw.memlet->dst_conn();
×
NEW
556
                            builder.remove_memlet(*block, *rw.memlet);
×
NEW
557
                            builder.add_memlet(
×
NEW
558
                                *block, local_access, "void", dst_node, dst_conn, rw.local_subset, buffer_type, {}
×
NEW
559
                            );
×
NEW
560
                        } else {
×
561
                            // incoming: tasklet→access  →  tasklet→local_access
NEW
562
                            auto& src_node = rw.memlet->src();
×
NEW
563
                            auto src_conn = rw.memlet->src_conn();
×
NEW
564
                            builder.remove_memlet(*block, *rw.memlet);
×
NEW
565
                            builder.add_memlet(
×
NEW
566
                                *block, src_node, src_conn, local_access, "void", rw.local_subset, buffer_type, {}
×
NEW
567
                            );
×
NEW
568
                        }
×
NEW
569
                    }
×
UNCOV
570
                }
×
571
            }
21✔
572
        } else if (auto* seq = dynamic_cast<structured_control_flow::Sequence*>(&node)) {
43✔
573
            for (size_t i = 0; i < seq->size(); i++) {
69✔
574
                rewrite_accesses(seq->at(i).first);
39✔
575
            }
39✔
576
        } else if (auto* loop = dynamic_cast<structured_control_flow::StructuredLoop*>(&node)) {
30✔
577
            rewrite_accesses(loop->root());
13✔
578
        } else if (auto* if_else = dynamic_cast<structured_control_flow::IfElse*>(&node)) {
13✔
579
            for (size_t i = 0; i < if_else->size(); i++) {
×
580
                rewrite_accesses(if_else->at(i).first);
×
581
            }
×
582
        }
×
583
    };
69✔
584
    rewrite_accesses(loop_.root());
17✔
585

586
    // Cleanup
587
    analysis_manager.invalidate_all();
17✔
588

589
    passes::SequenceFusion sf_pass;
17✔
590
    passes::DeadCFGElimination dce_pass;
17✔
591
    bool applies = false;
17✔
592
    do {
17✔
593
        applies = false;
17✔
594
        applies |= dce_pass.run(builder, analysis_manager);
17✔
595
        applies |= sf_pass.run(builder, analysis_manager);
17✔
596
    } while (applies);
17✔
597
}
17✔
598

599
void InLocalStorage::to_json(nlohmann::json& j) const {
6✔
600
    std::string loop_type;
6✔
601
    if (dynamic_cast<structured_control_flow::For*>(&loop_)) {
6✔
602
        loop_type = "for";
6✔
603
    } else if (dynamic_cast<structured_control_flow::Map*>(&loop_)) {
6✔
604
        loop_type = "map";
×
605
    } else {
×
606
        throw std::runtime_error("Unsupported loop type for serialization of loop: " + loop_.indvar()->get_name());
×
607
    }
×
608
    j["subgraph"] = {
6✔
609
        {"0", {{"element_id", this->loop_.element_id()}, {"type", loop_type}}},
6✔
610
        {"1", {{"element_id", this->access_node_.element_id()}, {"type", "access_node"}}}
6✔
611
    };
6✔
612
    j["transformation_type"] = this->name();
6✔
613
    j["container"] = container_;
6✔
614
}
6✔
615

616
InLocalStorage InLocalStorage::from_json(builder::StructuredSDFGBuilder& builder, const nlohmann::json& desc) {
1✔
617
    auto loop_id = desc["subgraph"]["0"]["element_id"].get<size_t>();
1✔
618
    auto element = builder.find_element_by_id(loop_id);
1✔
619
    if (!element) {
1✔
620
        throw InvalidTransformationDescriptionException("Element with ID " + std::to_string(loop_id) + " not found.");
×
621
    }
×
622
    auto loop = dynamic_cast<structured_control_flow::StructuredLoop*>(element);
1✔
623
    if (!loop) {
1✔
624
        throw InvalidTransformationDescriptionException(
×
625
            "Element with ID " + std::to_string(loop_id) + " is not a structured loop."
×
626
        );
×
627
    }
×
628

629
    auto access_node = dynamic_cast<
1✔
630
        data_flow::AccessNode*>(builder.find_element_by_id(desc.at("subgraph").at("1").at("element_id").get<size_t>()));
1✔
631
    if (!access_node) {
1✔
632
        throw InvalidTransformationDescriptionException(
×
633
            "Access node with ID " + std::to_string(desc.at("subgraph").at("1").at("element_id").get<size_t>()) +
×
634
            " not found."
×
635
        );
×
636
    }
×
637

638
    return InLocalStorage(*loop, *access_node);
1✔
639
}
1✔
640

641
} // namespace transformations
642
} // namespace sdfg
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc