• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

daisytuner / docc / 25220149405

01 May 2026 03:23PM UTC coverage: 64.643% (+0.1%) from 64.532%
25220149405

push

github

web-flow
Merge pull request #695 from daisytuner/local-storage-indices

fixes subset updates for local storage transformations

70 of 84 new or added lines in 2 files covered. (83.33%)

2 existing lines in 1 file now uncovered.

31329 of 48465 relevant lines covered (64.64%)

1395.44 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

84.3
/opt/src/transformations/in_local_storage.cpp
1
#include "sdfg/transformations/in_local_storage.h"
2

3
#include <cstddef>
4
#include <functional>
5
#include <string>
6

7
#include "sdfg/analysis/memory_layout_analysis.h"
8
#include "sdfg/analysis/scope_analysis.h"
9
#include "sdfg/analysis/users.h"
10
#include "sdfg/builder/structured_sdfg_builder.h"
11
#include "sdfg/data_flow/access_node.h"
12
#include "sdfg/data_flow/library_nodes/barrier_local_node.h"
13
#include "sdfg/data_flow/memlet.h"
14
#include "sdfg/passes/structured_control_flow/dead_cfg_elimination.h"
15
#include "sdfg/passes/structured_control_flow/sequence_fusion.h"
16
#include "sdfg/structured_control_flow/if_else.h"
17
#include "sdfg/structured_control_flow/sequence.h"
18
#include "sdfg/structured_control_flow/structured_loop.h"
19
#include "sdfg/symbolic/symbolic.h"
20
#include "sdfg/targets/gpu/gpu_schedule_type.h"
21
#include "sdfg/types/array.h"
22
#include "sdfg/types/pointer.h"
23
#include "sdfg/types/scalar.h"
24

25
namespace sdfg {
26
namespace transformations {
27

28
InLocalStorage::InLocalStorage(
29
    structured_control_flow::StructuredLoop& loop,
30
    const data_flow::AccessNode& access_node,
31
    const types::StorageType& storage_type
32
)
33
    : loop_(loop), access_node_(access_node), container_(access_node.data()), storage_type_(storage_type) {}
28✔
34

35
std::string InLocalStorage::name() const { return "InLocalStorage"; }
7✔
36

37
bool InLocalStorage::can_be_applied(builder::StructuredSDFGBuilder& builder, analysis::AnalysisManager& analysis_manager) {
28✔
38
    auto& sdfg = builder.subject();
28✔
39
    auto& body = this->loop_.root();
28✔
40

41
    tile_info_ = TileInfo{};
28✔
42

43
    // Criterion: Container must exist
44
    if (!sdfg.exists(this->container_)) {
28✔
45
        return false;
×
46
    }
×
47

48
    auto& type = sdfg.type(this->container_);
28✔
49

50
    // Criterion: Container must be Array or Pointer (not Scalar)
51
    if (type.type_id() != types::TypeID::Pointer && type.type_id() != types::TypeID::Array) {
28✔
52
        return false;
1✔
53
    }
1✔
54

55
    // Criterion: Container must be used in the loop body
56
    auto& users = analysis_manager.get<analysis::Users>();
27✔
57
    analysis::UsersView body_users(users, body);
27✔
58
    if (body_users.uses(this->container_).empty()) {
27✔
59
        return false;
2✔
60
    }
2✔
61

62
    // Criterion: Container must be read-only within the loop (no writes)
63
    if (!body_users.writes(this->container_).empty()) {
25✔
64
        return false;
1✔
65
    }
1✔
66

67
    // Use MemoryLayoutAnalysis tile API
68
    auto& mla = analysis_manager.get<analysis::MemoryLayoutAnalysis>();
24✔
69
    auto* tile = mla.tile(loop_, this->container_);
24✔
70
    if (!tile) {
24✔
71
        return false;
×
72
    }
×
73

74
    // Get overapproximated extents (integer upper bounds)
75
    auto extents = tile->extents_approx();
24✔
76
    if (extents.empty()) {
24✔
77
        return false;
×
78
    }
×
79

80
    // Store tile info (before substitution, bases/strides stay symbolic)
81
    tile_info_.dimensions = extents;
24✔
82
    tile_info_.bases = tile->min_subset;
24✔
83
    tile_info_.strides =
24✔
84
        std::vector<symbolic::Expression>(tile->layout.strides().begin(), tile->layout.strides().end());
24✔
85
    tile_info_.offset = tile->layout.offset();
24✔
86

87
    // GPU shared memory: resolve symbolic extents using GPU block sizes and
88
    // require at least one cooperative dimension
89
    if (storage_type_.is_nv_shared()) {
24✔
90
        auto& scope_analysis = analysis_manager.get<analysis::ScopeAnalysis>();
5✔
91
        auto ancestors = scope_analysis.ancestor_scopes(&loop_);
5✔
92

93
        // Build substitution map: symbolic GPU map bounds → integer block sizes
94
        // E.g., Map condition "i < N" with block_size=32 → N=32
95
        for (auto* node : ancestors) {
23✔
96
            if (auto* ancestor_map = dynamic_cast<structured_control_flow::Map*>(node)) {
23✔
97
                if (!gpu::is_gpu_schedule(ancestor_map->schedule_type())) {
9✔
98
                    continue;
×
99
                }
×
100
                auto block_size = gpu::gpu_block_size(ancestor_map->schedule_type());
9✔
101
                // Extract symbolic bound from condition: Lt(indvar, BOUND)
102
                auto condition = ancestor_map->condition();
9✔
103
                if (SymEngine::is_a<SymEngine::StrictLessThan>(*condition)) {
9✔
104
                    auto stl = SymEngine::rcp_static_cast<const SymEngine::StrictLessThan>(condition);
9✔
105
                    auto rhs = stl->get_args()[1];
9✔
106
                    auto iter_count = symbolic::sub(rhs, ancestor_map->init());
9✔
107
                    if (!SymEngine::is_a<SymEngine::Integer>(*iter_count)) {
9✔
108
                        // Symbolic bound — substitute with block size in extents and bases
109
                        for (auto& ext : tile_info_.dimensions) {
16✔
110
                            ext = symbolic::simplify(symbolic::subs(ext, iter_count, block_size));
16✔
111
                        }
16✔
112
                        for (auto& base : tile_info_.bases) {
16✔
113
                            base = symbolic::simplify(symbolic::subs(base, iter_count, block_size));
16✔
114
                        }
16✔
115
                    }
9✔
116
                }
9✔
117
            }
9✔
118
        }
23✔
119

120
        // Also resolve the loop's own bound if symbolic and matches a block size
121
        // E.g., For k = 0..K where K is a parameter — check if K can be resolved
122
        // from any GPU ancestor map
123
        // (Already handled above: if K appears as a GPU map bound, it's substituted)
124

125
        // Criterion: All extents must now be provably integer
126
        for (auto& ext : tile_info_.dimensions) {
9✔
127
            if (!SymEngine::is_a<SymEngine::Integer>(*ext)) {
9✔
128
                return false;
2✔
129
            }
2✔
130
        }
9✔
131

132
        // Criterion: At least one cooperative dimension
133
        bool has_cooperative_dim = false;
3✔
134
        for (auto* node : ancestors) {
6✔
135
            if (auto* ancestor_map = dynamic_cast<structured_control_flow::Map*>(node)) {
6✔
136
                if (!gpu::is_gpu_schedule(ancestor_map->schedule_type())) {
3✔
137
                    continue;
×
138
                }
×
139
                // A GPU dim is cooperative if its indvar does NOT appear in any tile base
140
                bool appears_in_bases = false;
3✔
141
                for (auto& base : tile_info_.bases) {
5✔
142
                    if (symbolic::uses(base, ancestor_map->indvar())) {
5✔
143
                        appears_in_bases = true;
×
144
                        break;
×
145
                    }
×
146
                }
5✔
147
                if (!appears_in_bases) {
3✔
148
                    has_cooperative_dim = true;
3✔
149
                    break;
3✔
150
                }
3✔
151
            }
3✔
152
        }
6✔
153
        if (!has_cooperative_dim) {
3✔
154
            return false;
×
155
        }
×
156
    } else {
19✔
157
        // CPU path: All extents must be provably integer
158
        for (auto& ext : tile_info_.dimensions) {
31✔
159
            if (!SymEngine::is_a<SymEngine::Integer>(*ext)) {
31✔
160
                return false;
×
161
            }
×
162
        }
31✔
163
    }
19✔
164

165
    return true;
22✔
166
}
24✔
167

168
void InLocalStorage::apply(builder::StructuredSDFGBuilder& builder, analysis::AnalysisManager& analysis_manager) {
14✔
169
    auto& sdfg = builder.subject();
14✔
170
    auto& scope_analysis = analysis_manager.get<analysis::ScopeAnalysis>();
14✔
171

172
    auto parent_node = scope_analysis.parent_scope(&loop_);
14✔
173
    auto parent = dynamic_cast<structured_control_flow::Sequence*>(parent_node);
14✔
174
    if (!parent) {
14✔
175
        throw InvalidSDFGException("InLocalStorage: Parent of loop must be a Sequence!");
×
176
    }
×
177

178
    // Get type information
179
    auto& type = sdfg.type(this->container_);
14✔
180
    types::Scalar scalar_type(type.primitive_type());
14✔
181

182
    // Create local buffer name
183
    local_name_ = "__daisy_in_local_storage_" + this->container_;
14✔
184

185
    // Collect varying dimensions (extent > 1) and compute buffer layout
186
    std::vector<size_t> varying_dims;
14✔
187
    std::vector<symbolic::Expression> dim_sizes;
14✔
188
    for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
38✔
189
        auto& dim_size = tile_info_.dimensions.at(d);
24✔
190
        if (!symbolic::eq(dim_size, symbolic::integer(1))) {
24✔
191
            varying_dims.push_back(d);
21✔
192
            dim_sizes.push_back(dim_size);
21✔
193
        }
21✔
194
    }
24✔
195

196
    // Compute total buffer size
197
    symbolic::Expression total_size = symbolic::integer(1);
14✔
198
    for (auto& ds : dim_sizes) {
21✔
199
        total_size = symbolic::mul(total_size, ds);
21✔
200
    }
21✔
201

202
    // Helper: build linearized local index from per-dimension symbolic expressions
203
    auto linearize_exprs = [&](const std::vector<symbolic::Expression>& indices) -> symbolic::Expression {
29✔
204
        symbolic::Expression linear_idx = symbolic::integer(0);
29✔
205
        symbolic::Expression stride = symbolic::integer(1);
29✔
206
        for (int i = indices.size() - 1; i >= 0; i--) {
76✔
207
            linear_idx = symbolic::add(linear_idx, symbolic::mul(indices[i], stride));
47✔
208
            stride = symbolic::mul(stride, dim_sizes[i]);
47✔
209
        }
47✔
210
        return linear_idx;
29✔
211
    };
29✔
212

213
    // Helper: build linearized local index from per-dimension indvars (symbols)
214
    auto linearize = [&](const std::vector<symbolic::Symbol>& indvars) -> symbolic::Expression {
14✔
215
        std::vector<symbolic::Expression> exprs(indvars.begin(), indvars.end());
11✔
216
        return linearize_exprs(exprs);
11✔
217
    };
11✔
218

219
    // Helper: build source subset (base[d] + copy_indvar[d]) for original container
220
    bool is_pointer = (type.type_id() == types::TypeID::Pointer);
14✔
221
    auto build_original_subset = [&](const std::vector<symbolic::Expression>& copy_indices) -> data_flow::Subset {
14✔
222
        std::vector<symbolic::Expression> full_indices;
14✔
223
        size_t var_idx = 0;
14✔
224
        for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
38✔
225
            if (!symbolic::eq(tile_info_.dimensions.at(d), symbolic::integer(1))) {
24✔
226
                full_indices.push_back(symbolic::add(tile_info_.bases.at(d), copy_indices.at(var_idx++)));
21✔
227
            } else {
21✔
228
                full_indices.push_back(tile_info_.bases.at(d));
3✔
229
            }
3✔
230
        }
24✔
231

232
        if (is_pointer) {
14✔
233
            symbolic::Expression linear = tile_info_.offset;
14✔
234
            for (size_t d = 0; d < full_indices.size(); d++) {
38✔
235
                linear = symbolic::add(linear, symbolic::mul(tile_info_.strides.at(d), full_indices.at(d)));
24✔
236
            }
24✔
237
            return {linear};
14✔
238
        } else {
14✔
239
            return data_flow::Subset(full_indices.begin(), full_indices.end());
×
240
        }
×
241
    };
14✔
242

243
    // ==================================================================
244
    // Branch: GPU cooperative path vs CPU sequential path
245
    // ==================================================================
246
    if (storage_type_.is_nv_shared()) {
14✔
247
        // ============================================================
248
        // GPU COOPERATIVE PATH
249
        // ============================================================
250
        auto ancestors = scope_analysis.ancestor_scopes(&loop_);
3✔
251

252
        // Collect cooperative GPU dimensions (indvar not in tile bases)
253
        struct CoopDim {
3✔
254
            symbolic::Symbol indvar;
3✔
255
            symbolic::Integer block_size;
3✔
256
            gpu::GPUDimension dimension;
3✔
257
        };
3✔
258
        std::vector<CoopDim> coop_dims;
3✔
259

260
        for (auto* node : ancestors) {
15✔
261
            if (auto* ancestor_map = dynamic_cast<structured_control_flow::Map*>(node)) {
15✔
262
                if (!gpu::is_gpu_schedule(ancestor_map->schedule_type())) {
6✔
263
                    continue;
×
264
                }
×
265
                bool appears_in_bases = false;
6✔
266
                for (auto& base : tile_info_.bases) {
8✔
267
                    if (symbolic::uses(base, ancestor_map->indvar())) {
8✔
268
                        appears_in_bases = true;
2✔
269
                        break;
2✔
270
                    }
2✔
271
                }
8✔
272
                if (!appears_in_bases) {
6✔
273
                    coop_dims.push_back(
4✔
274
                        {ancestor_map->indvar(),
4✔
275
                         gpu::gpu_block_size(ancestor_map->schedule_type()),
4✔
276
                         gpu::gpu_dimension(ancestor_map->schedule_type())}
4✔
277
                    );
4✔
278
                }
4✔
279
            }
6✔
280
        }
15✔
281

282
        // Compute total cooperative thread count
283
        symbolic::Expression total_coop_threads = symbolic::integer(1);
3✔
284
        for (auto& cd : coop_dims) {
4✔
285
            total_coop_threads = symbolic::mul(total_coop_threads, cd.block_size);
4✔
286
        }
4✔
287

288
        // Create the local buffer with NV_Shared storage
289
        types::Array buffer_type(storage_type_, 0, {}, scalar_type, total_size);
3✔
290
        builder.add_container(local_name_, buffer_type);
3✔
291

292
        // Emit: barrier → guarded cooperative copy → barrier → loop
293
        // 1. Barrier before copy
294
        auto& barrier_block1 = builder.add_block_before(*parent, loop_, {}, loop_.debug_info());
3✔
295
        builder.add_library_node<data_flow::BarrierLocalNode>(barrier_block1, {});
3✔
296

297
        // 2. Cooperative copy with if_else guard
298
        // Flatten cooperative thread index: coop_flat = sum(indvar[i] * product(block_size[j] for j>i))
299
        symbolic::Expression coop_flat = symbolic::integer(0);
3✔
300
        symbolic::Expression coop_stride = symbolic::integer(1);
3✔
301
        for (int i = coop_dims.size() - 1; i >= 0; i--) {
7✔
302
            coop_flat = symbolic::add(coop_flat, symbolic::mul(coop_dims[i].indvar, coop_stride));
4✔
303
            coop_stride = symbolic::mul(coop_stride, coop_dims[i].block_size);
4✔
304
        }
4✔
305

306
        // Each thread loads elements strided by total_coop_threads
307
        // Thread t loads elements: t, t + total_threads, t + 2*total_threads, ...
308
        // We emit a loop: for (idx = coop_flat; idx < total_size; idx += total_coop_threads)
309
        auto idx_name = "__daisy_ils_coop_" + this->container_;
3✔
310
        types::Scalar idx_type(types::PrimitiveType::UInt64);
3✔
311
        builder.add_container(idx_name, idx_type);
3✔
312
        auto idx_var = symbolic::symbol(idx_name);
3✔
313

314
        auto copy_init = coop_flat;
3✔
315
        auto copy_condition = symbolic::Lt(idx_var, total_size);
3✔
316
        auto copy_update = symbolic::add(idx_var, total_coop_threads);
3✔
317

318
        auto& copy_loop = builder.add_map_before(
3✔
319
            *parent,
3✔
320
            loop_,
3✔
321
            idx_var,
3✔
322
            copy_condition,
3✔
323
            copy_init,
3✔
324
            copy_update,
3✔
325
            structured_control_flow::ScheduleType_Sequential::create(),
3✔
326
            {},
3✔
327
            loop_.debug_info()
3✔
328
        );
3✔
329

330
        // Decompose flat idx back into per-dimension indices for source subset
331
        // idx maps to varying_dims in row-major order
332
        auto& copy_scope = copy_loop.root();
3✔
333
        auto& copy_block = builder.add_block(copy_scope);
3✔
334
        auto& copy_src = builder.add_access(copy_block, this->container_);
3✔
335
        auto& copy_dst = builder.add_access(copy_block, local_name_);
3✔
336
        auto& copy_tasklet = builder.add_tasklet(copy_block, data_flow::TaskletCode::assign, "_out", {"_in"});
3✔
337

338
        // Decompose idx_var into per-dim indices
339
        std::vector<symbolic::Expression> copy_indices;
3✔
340
        symbolic::Expression remainder = idx_var;
3✔
341
        for (size_t i = 0; i < dim_sizes.size(); i++) {
6✔
342
            if (i < dim_sizes.size() - 1) {
3✔
343
                // integer division: idx / (product of remaining dims)
344
                symbolic::Expression divisor = symbolic::integer(1);
×
345
                for (size_t j = i + 1; j < dim_sizes.size(); j++) {
×
346
                    divisor = symbolic::mul(divisor, dim_sizes[j]);
×
347
                }
×
348
                auto quotient = symbolic::div(remainder, divisor);
×
349
                copy_indices.push_back(quotient);
×
350
                remainder = symbolic::mod(remainder, divisor);
×
351
            } else {
3✔
352
                copy_indices.push_back(remainder);
3✔
353
            }
3✔
354
        }
3✔
355

356
        auto copy_src_subset = build_original_subset(copy_indices);
3✔
357
        data_flow::Subset copy_dst_subset = {idx_var};
3✔
358

359
        builder.add_computational_memlet(copy_block, copy_src, copy_tasklet, "_in", copy_src_subset, type);
3✔
360
        builder.add_computational_memlet(copy_block, copy_tasklet, "_out", copy_dst, copy_dst_subset, buffer_type);
3✔
361

362
        // 3. Barrier after copy
363
        auto& barrier_block2 = builder.add_block_before(*parent, loop_, {}, loop_.debug_info());
3✔
364
        builder.add_library_node<data_flow::BarrierLocalNode>(barrier_block2, {});
3✔
365
    } else {
11✔
366
        // ============================================================
367
        // CPU SEQUENTIAL PATH
368
        // ============================================================
369
        // Create the local buffer with specified storage type
370
        types::Array buffer_type(storage_type_, 0, {}, scalar_type, total_size);
11✔
371
        builder.add_container(local_name_, buffer_type);
11✔
372

373
        std::vector<symbolic::Symbol> copy_indvars;
11✔
374
        structured_control_flow::Sequence* copy_scope = parent;
11✔
375
        bool first_copy_loop = true;
11✔
376

377
        for (size_t i = 0; i < varying_dims.size(); i++) {
29✔
378
            size_t d = varying_dims[i];
18✔
379
            auto indvar_name = "__daisy_ils_" + this->container_ + "_d" + std::to_string(d);
18✔
380
            types::Scalar indvar_type(types::PrimitiveType::UInt64);
18✔
381
            builder.add_container(indvar_name, indvar_type);
18✔
382
            auto indvar = symbolic::symbol(indvar_name);
18✔
383
            copy_indvars.push_back(indvar);
18✔
384

385
            auto init = symbolic::integer(0);
18✔
386
            auto condition = symbolic::Lt(indvar, dim_sizes[i]);
18✔
387
            auto update = symbolic::add(indvar, symbolic::integer(1));
18✔
388

389
            if (first_copy_loop) {
18✔
390
                auto& copy_loop = builder.add_map_before(
11✔
391
                    *copy_scope,
11✔
392
                    loop_,
11✔
393
                    indvar,
11✔
394
                    condition,
11✔
395
                    init,
11✔
396
                    update,
11✔
397
                    structured_control_flow::ScheduleType_Sequential::create(),
11✔
398
                    {},
11✔
399
                    loop_.debug_info()
11✔
400
                );
11✔
401
                copy_scope = &copy_loop.root();
11✔
402
                first_copy_loop = false;
11✔
403
            } else {
11✔
404
                auto& copy_loop = builder.add_map(
7✔
405
                    *copy_scope,
7✔
406
                    indvar,
7✔
407
                    condition,
7✔
408
                    init,
7✔
409
                    update,
7✔
410
                    structured_control_flow::ScheduleType_Sequential::create(),
7✔
411
                    {},
7✔
412
                    loop_.debug_info()
7✔
413
                );
7✔
414
                copy_scope = &copy_loop.root();
7✔
415
            }
7✔
416
        }
18✔
417

418
        // Create copy block
419
        auto& copy_block = builder.add_block(*copy_scope);
11✔
420
        auto& copy_src = builder.add_access(copy_block, this->container_);
11✔
421
        auto& copy_dst = builder.add_access(copy_block, local_name_);
11✔
422
        auto& copy_tasklet = builder.add_tasklet(copy_block, data_flow::TaskletCode::assign, "_out", {"_in"});
11✔
423

424
        std::vector<symbolic::Expression> copy_exprs(copy_indvars.begin(), copy_indvars.end());
11✔
425
        auto copy_src_subset = build_original_subset(copy_exprs);
11✔
426
        data_flow::Subset copy_dst_subset = {linearize(copy_indvars)};
11✔
427

428
        builder.add_computational_memlet(copy_block, copy_src, copy_tasklet, "_in", copy_src_subset, type);
11✔
429
        types::Array buffer_type_ref(storage_type_, 0, {}, scalar_type, total_size);
11✔
430
        builder.add_computational_memlet(copy_block, copy_tasklet, "_out", copy_dst, copy_dst_subset, buffer_type_ref);
11✔
431
    }
11✔
432

433
    // ==================================================================
434
    // Update accesses in the main loop to use the local buffer
435
    // ==================================================================
436
    types::Array buffer_type(storage_type_, 0, {}, scalar_type, total_size);
14✔
437
    auto& mla = analysis_manager.get<analysis::MemoryLayoutAnalysis>();
14✔
438

439
    // Recursive helper to traverse all blocks in the loop body
440
    std::function<void(structured_control_flow::ControlFlowNode&)> rewrite_accesses;
14✔
441
    rewrite_accesses = [&](structured_control_flow::ControlFlowNode& node) {
63✔
442
        if (auto* block = dynamic_cast<structured_control_flow::Block*>(&node)) {
63✔
443
            auto& dfg = block->dataflow();
23✔
444
            for (auto* access : dfg.data_nodes()) {
65✔
445
                if (access->data() != this->container_) continue;
65✔
446
                // Rewrite outgoing memlets (reads from this access node)
447
                for (auto& memlet : dfg.out_edges(*access)) {
18✔
448
                    auto* acc = mla.access(memlet);
18✔
449
                    if (acc && acc->subset.size() == tile_info_.dimensions.size()) {
18✔
450
                        std::vector<symbolic::Expression> local_indices;
18✔
451
                        for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
50✔
452
                            if (!symbolic::eq(tile_info_.dimensions.at(d), symbolic::integer(1))) {
32✔
453
                                local_indices.push_back(symbolic::sub(acc->subset.at(d), tile_info_.bases.at(d)));
29✔
454
                            }
29✔
455
                        }
32✔
456
                        symbolic::Expression linear_idx = linearize_exprs(local_indices);
18✔
457
                        memlet.set_subset({linear_idx});
18✔
458
                        memlet.set_base_type(buffer_type);
18✔
459
                    }
18✔
460
                }
18✔
461
                // Rewrite incoming memlets (writes to this access node)
462
                for (auto& memlet : dfg.in_edges(*access)) {
18✔
NEW
463
                    auto* acc = mla.access(memlet);
×
NEW
464
                    if (acc && acc->subset.size() == tile_info_.dimensions.size()) {
×
NEW
465
                        std::vector<symbolic::Expression> local_indices;
×
NEW
466
                        for (size_t d = 0; d < tile_info_.dimensions.size(); d++) {
×
NEW
467
                            if (!symbolic::eq(tile_info_.dimensions.at(d), symbolic::integer(1))) {
×
NEW
468
                                local_indices.push_back(symbolic::sub(acc->subset.at(d), tile_info_.bases.at(d)));
×
NEW
469
                            }
×
470
                        }
×
NEW
471
                        symbolic::Expression linear_idx = linearize_exprs(local_indices);
×
NEW
472
                        memlet.set_subset({linear_idx});
×
NEW
473
                        memlet.set_base_type(buffer_type);
×
UNCOV
474
                    }
×
UNCOV
475
                }
×
476
                // Rename the access node to the local buffer
477
                access->data(local_name_);
18✔
478
            }
18✔
479
        } else if (auto* seq = dynamic_cast<structured_control_flow::Sequence*>(&node)) {
40✔
480
            for (size_t i = 0; i < seq->size(); i++) {
63✔
481
                rewrite_accesses(seq->at(i).first);
36✔
482
            }
36✔
483
        } else if (auto* loop = dynamic_cast<structured_control_flow::StructuredLoop*>(&node)) {
27✔
484
            rewrite_accesses(loop->root());
13✔
485
        } else if (auto* if_else = dynamic_cast<structured_control_flow::IfElse*>(&node)) {
13✔
NEW
486
            for (size_t i = 0; i < if_else->size(); i++) {
×
NEW
487
                rewrite_accesses(if_else->at(i).first);
×
488
            }
×
489
        }
×
490
    };
63✔
491
    rewrite_accesses(loop_.root());
14✔
492

493
    // Cleanup
494
    analysis_manager.invalidate_all();
14✔
495

496
    passes::SequenceFusion sf_pass;
14✔
497
    passes::DeadCFGElimination dce_pass;
14✔
498
    bool applies = false;
14✔
499
    do {
14✔
500
        applies = false;
14✔
501
        applies |= dce_pass.run(builder, analysis_manager);
14✔
502
        applies |= sf_pass.run(builder, analysis_manager);
14✔
503
    } while (applies);
14✔
504
}
14✔
505

506
void InLocalStorage::to_json(nlohmann::json& j) const {
6✔
507
    std::string loop_type;
6✔
508
    if (dynamic_cast<structured_control_flow::For*>(&loop_)) {
6✔
509
        loop_type = "for";
6✔
510
    } else if (dynamic_cast<structured_control_flow::Map*>(&loop_)) {
6✔
511
        loop_type = "map";
×
512
    } else {
×
513
        throw std::runtime_error("Unsupported loop type for serialization of loop: " + loop_.indvar()->get_name());
×
514
    }
×
515
    j["subgraph"] = {
6✔
516
        {"0", {{"element_id", this->loop_.element_id()}, {"type", loop_type}}},
6✔
517
        {"1", {{"element_id", this->access_node_.element_id()}, {"type", "access_node"}}}
6✔
518
    };
6✔
519
    j["transformation_type"] = this->name();
6✔
520
    j["container"] = container_;
6✔
521
}
6✔
522

523
InLocalStorage InLocalStorage::from_json(builder::StructuredSDFGBuilder& builder, const nlohmann::json& desc) {
1✔
524
    auto loop_id = desc["subgraph"]["0"]["element_id"].get<size_t>();
1✔
525
    auto element = builder.find_element_by_id(loop_id);
1✔
526
    if (!element) {
1✔
527
        throw InvalidTransformationDescriptionException("Element with ID " + std::to_string(loop_id) + " not found.");
×
528
    }
×
529
    auto loop = dynamic_cast<structured_control_flow::StructuredLoop*>(element);
1✔
530
    if (!loop) {
1✔
531
        throw InvalidTransformationDescriptionException(
×
532
            "Element with ID " + std::to_string(loop_id) + " is not a structured loop."
×
533
        );
×
534
    }
×
535

536
    auto access_node = dynamic_cast<
1✔
537
        data_flow::AccessNode*>(builder.find_element_by_id(desc.at("subgraph").at("1").at("element_id").get<size_t>()));
1✔
538
    if (!access_node) {
1✔
539
        throw InvalidTransformationDescriptionException(
×
540
            "Access node with ID " + std::to_string(desc.at("subgraph").at("1").at("element_id").get<size_t>()) +
×
541
            " not found."
×
542
        );
×
543
    }
×
544

545
    return InLocalStorage(*loop, *access_node);
1✔
546
}
1✔
547

548
} // namespace transformations
549
} // namespace sdfg
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc