• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

daisytuner / docc / 24474374464

15 Apr 2026 07:36PM UTC coverage: 64.633%. First build
24474374464

push

github

web-flow
Merge pull request #682 from daisytuner/read-only-offloads-reuse

Read only offloads reuse

125 of 138 new or added lines in 5 files covered. (90.58%)

30803 of 47658 relevant lines covered (64.63%)

582.23 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.93
/sdfg/src/passes/dataflow/dead_data_elimination.cpp
1
#include "sdfg/passes/dataflow/dead_data_elimination.h"
2

3
#include "sdfg/analysis/base_user_visitor.h"
4
#include "sdfg/analysis/data_dependency_analysis.h"
5
#include "sdfg/analysis/pointer_analyzers.h"
6
#include "sdfg/data_flow/library_nodes/stdlib/free.h"
7
#include "sdfg/data_flow/library_nodes/stdlib/malloc.h"
8
#include "sdfg/targets/offloading/data_offloading_node.h"
9
#include "sdfg/visitor/structured_sdfg_visitor.h"
10
#include "sdfg/visualizer/dot_visualizer.h"
11

12
namespace sdfg {
13
namespace passes {
14

15
DeadDataElimination::DeadDataElimination() : Pass(), legacy_removals_(true) {};
59✔
16

17
DeadDataElimination::DeadDataElimination(bool legacy_removals) : Pass(), legacy_removals_(legacy_removals) {};
3✔
18

19
std::string DeadDataElimination::name() { return "DeadDataElimination"; };
×
20

21
/**
22
 * Simple escape policy that collects all escape and overwrite events
23
 * into a map of container -> {element -> type} entries.
24
 */
25
class BlockerListPolicy {
26
public:
27
    enum class BlockerType { Escape, Overwrite };
28

29
    using BlockerMap = std::unordered_map<std::string, std::unordered_map<const Element*, BlockerType>>;
30

31
protected:
32
    BlockerMap blockers_;
33

34
public:
35
    void on_escape(const std::string& container, const ControlFlowNode* node, const Element* user) {
43✔
36
        blockers_[container].emplace(user, BlockerType::Escape);
43✔
37
    }
43✔
38

39
    void on_overwrite(const std::string& container, const ControlFlowNode* node, const Element* user) {
29✔
40
        blockers_[container].emplace(user, BlockerType::Overwrite);
29✔
41
    }
29✔
42
};
43

44
/**
45
 * Finds memory areas (heap for now) that are wholly owned by the surrounding function. Owned memory can be removed if
46
 * its no longer used, writes to it can be ellided if the data is never read. This is not true for memory writes in
47
 * general, as you must prove no reference to that data ever escapes our control
48
 */
49
class MemoryOwnershipAnalysis : public analysis::BaseUserVisitor,
50
                                analysis::PointerEscapeAnalyzer<BlockerListPolicy>,
51
                                analysis::PointerOverwriteAnalyzer<BlockerListPolicy>,
52
                                BlockerListPolicy {
53
    struct FreeCluster {
54
        const Block* block;
55
        const data_flow::Memlet* in;
56
        const data_flow::Memlet* out;
57

58
        FreeCluster(const Block* b, const data_flow::Memlet* i, const data_flow::Memlet* o) : block(b), in(i), out(o) {}
4✔
59
    };
60

61
    struct OwnedArea {
62
        data_flow::Memlet* producer;
63
        structured_control_flow::Block* producer_block;
64
        symbolic::Expression allocation_size;
65
        bool non_ssa = false;
66
        std::vector<FreeCluster> free_clusters;
67

68
        OwnedArea(data_flow::Memlet* p, structured_control_flow::Block* pb, symbolic::Expression as, bool ns)
69
            : producer(p), producer_block(pb), allocation_size(std::move(as)), non_ssa(ns) {}
13✔
70

71
        void remove_from(builder::StructuredSDFGBuilder& builder) const;
72
    };
73

74
private:
75
    StructuredSDFG& sdfg_;
76
    // memory that is allocated by us and therefore 'owned' until it escapes
77
    std::unordered_map<std::string, OwnedArea> originally_owned_data_;
78
    std::unordered_set<std::string> fully_owned_; // never escaped
79

80
public:
81
    MemoryOwnershipAnalysis(StructuredSDFG& sdfg);
82

83
    void run(analysis::AnalysisManager& manager);
84

85
    bool visit(sdfg::structured_control_flow::Block& node) override;
86

87
    void use_as_symbol_write(
88
        const symbolic::Symbol& container, const ControlFlowNode* node, const Element* user, SymbolWriteLocation loc
89
    ) override {
281✔
90
        PointerEscapeAnalyzer::use_as_symbol_write(container, node, user, loc);
281✔
91
        PointerOverwriteAnalyzer::use_as_symbol_write(container, node, user, loc);
281✔
92
    }
281✔
93
    void use_as_symbol_read(
94
        const std::string& container,
95
        const ControlFlowNode* node,
96
        const Element* user,
97
        SymbolReadLocation loc,
98
        int loc_index,
99
        symbolic::Expression expr
100
    ) override {
1,892✔
101
        PointerEscapeAnalyzer::use_as_symbol_read(container, node, user, loc, loc_index, std::move(expr));
1,892✔
102
        PointerOverwriteAnalyzer::use_as_symbol_read(container, node, user, loc, loc_index, std::move(expr));
1,892✔
103
    }
1,892✔
104
    void use_as_src_node(
105
        const std::string& container,
106
        const data_flow::AccessNode& node,
107
        const data_flow::Memlet& edge,
108
        const Block& block
109
    ) override {
570✔
110
        PointerEscapeAnalyzer::use_as_src_node(container, node, edge, block);
570✔
111
        PointerOverwriteAnalyzer::use_as_src_node(container, node, edge, block);
570✔
112
    }
570✔
113
    void use_as_dst_node(
114
        const std::string& container,
115
        const data_flow::AccessNode& node,
116
        const data_flow::Memlet& edge,
117
        const Block& block
118
    ) override {
345✔
119
        PointerEscapeAnalyzer::use_as_dst_node(container, node, edge, block);
345✔
120
        PointerOverwriteAnalyzer::use_as_dst_node(container, node, edge, block);
345✔
121
    }
345✔
122
    void use_as_return_src(const std::string& container, const Return& ret) override {
8✔
123
        PointerEscapeAnalyzer::use_as_return_src(container, ret);
8✔
124
        PointerOverwriteAnalyzer::use_as_return_src(container, ret);
8✔
125
    }
8✔
126

127
    const std::unordered_set<std::string>& fully_owned_areas() const { return fully_owned_; }
101✔
128

129
    const OwnedArea& owned_area(const std::string& container) const { return originally_owned_data_.at(container); }
4✔
130

131
private:
132
    static bool excusedEscape(const Element* element, const OwnedArea& area);
133
    static bool excusedOverwrite(const Element* element, const OwnedArea& area);
134
};
135

136
void MemoryOwnershipAnalysis::OwnedArea::remove_from(builder::StructuredSDFGBuilder& builder) const {
4✔
137
    auto& malloc_write = this->producer->dst();
4✔
138
    auto& malloc_node = this->producer->src();
4✔
139
    builder.clear_code_node_legacy(*this->producer_block, dynamic_cast<const data_flow::CodeNode&>(malloc_node));
4✔
140
    // builder.clear_node(
141
    //     *this->producer_block, dynamic_cast<data_flow::AccessNode&>(malloc_write), {&malloc_write, &malloc_node}
142
    // );
143

144
    for (auto& free_cluster : this->free_clusters) {
4✔
145
        auto& memlet = *free_cluster.out;
2✔
146
        builder.clear_code_node_legacy(
2✔
147
            *const_cast<Block*>(free_cluster.block), dynamic_cast<const data_flow::CodeNode&>(memlet.src())
2✔
148
        );
2✔
149
        // builder.clear_node(*const_cast<Block*>(free_cluster.block), memlet.dst(), {&memlet.dst(), &memlet.src()});
150
    }
2✔
151
}
4✔
152

153
MemoryOwnershipAnalysis::MemoryOwnershipAnalysis(StructuredSDFG& sdfg)
154
    : sdfg_(sdfg), PointerEscapeAnalyzer(sdfg, *this), PointerOverwriteAnalyzer(sdfg, *this) {}
101✔
155

156
bool MemoryOwnershipAnalysis::excusedEscape(const Element* element, const OwnedArea& area) {
12✔
157
    // An escape is excused if it matches the input edge of one of the free_clusters.
158
    // Reading the pointer to pass it to free() is not a real escape.
159
    for (const auto& cluster : area.free_clusters) {
12✔
160
        if (element == cluster.in) {
6✔
161
            return true;
4✔
162
        }
4✔
163
    }
6✔
164

165
    auto* memlet = dynamic_cast<const data_flow::Memlet*>(element);
8✔
166
    if (memlet) {
8✔
167
        auto* libNode = dynamic_cast<const data_flow::LibraryNode*>(&memlet->dst());
6✔
168
        if (libNode) {
6✔
169
            auto conns = libNode->inputs();
3✔
170
            auto idx = std::find(conns.begin(), conns.end(), memlet->dst_conn()) - conns.begin();
3✔
171
            auto access_type = libNode->pointer_access_type(idx);
3✔
172
            auto maybe_rd_only = std::get_if<data_flow::PointerReadOnly>(&access_type);
3✔
173
            if (maybe_rd_only && maybe_rd_only->no_ptr_escape()) {
3✔
174
                return true;
1✔
175
            }
1✔
176
        }
3✔
177
    }
6✔
178

179
    return false;
7✔
180
}
8✔
181

182
bool MemoryOwnershipAnalysis::excusedOverwrite(const Element* element, const OwnedArea& area) {
14✔
183
    auto* memlet = dynamic_cast<const data_flow::Memlet*>(element);
14✔
184

185
    if (!memlet) {
14✔
186
        return false;
×
187
    }
×
188

189
    // The producer (malloc output edge) is an excused overwrite — it's the initial assignment.
190
    if (element == area.producer) {
14✔
191
        return true;
6✔
192
    }
6✔
193
    // DataOffloadNodes currently have a fake-output edge instead of a pointer input.
194
    // But they can only write to memory, never generate/overwrite the pointer
195
    if (auto* offload = dynamic_cast<const offloading::DataOffloadingNode*>(&memlet->src())) {
8✔
196
        if (offload->transfer_direction() != offloading::DataTransferDirection::NONE) {
4✔
197
            return true;
4✔
198
        }
4✔
199
    }
4✔
200

201
    // The output edge of a free cluster is an excused overwrite — free sets the pointer
202
    // to NULL (a fake overwrite that doesn't represent a meaningful reassignment).
203
    for (const auto& cluster : area.free_clusters) {
4✔
204
        if (element == cluster.out) {
4✔
205
            return true;
4✔
206
        }
4✔
207
    }
4✔
208
    return false;
×
209
}
4✔
210

211

212
void MemoryOwnershipAnalysis::run(analysis::AnalysisManager& manager) {
101✔
213
    dispatch(sdfg_.root());
101✔
214

215
    for (auto& [name, area] : originally_owned_data_) {
101✔
216
        if (!area.non_ssa && area.allocation_size != SymEngine::null) {
13✔
217
            auto it = blockers_.find(name);
13✔
218
            if (it != blockers_.end()) {
13✔
219
                bool killed = false;
13✔
220
                for (auto& [element, type] : it->second) {
26✔
221
                    if (type == BlockerType::Escape) {
26✔
222
                        if (!excusedEscape(element, area)) {
12✔
223
                            killed = true;
7✔
224
                            break;
7✔
225
                        }
7✔
226
                    } else if (type == BlockerType::Overwrite) {
14✔
227
                        if (!excusedOverwrite(element, area)) {
14✔
228
                            killed = true;
×
229
                            break;
×
230
                        }
×
231
                    }
14✔
232
                }
26✔
233
                if (!killed) {
13✔
234
                    fully_owned_.insert(name);
6✔
235
                }
6✔
236
            } else {
13✔
237
                fully_owned_.insert(name);
×
238
            }
×
239
        }
13✔
240
    }
13✔
241
}
101✔
242

243

244
bool MemoryOwnershipAnalysis::visit(sdfg::structured_control_flow::Block& node) {
364✔
245
    auto& dflow = node.dataflow();
364✔
246
    for (auto& library_node : dflow.library_nodes()) {
364✔
247
        if (library_node->code() == stdlib::LibraryNodeType_Malloc) {
34✔
248
            auto* malloc_node = dynamic_cast<const stdlib::MallocNode*>(library_node);
13✔
249
            auto& alloc_size = malloc_node->size();
13✔
250
            auto output_conn = malloc_node->output(0);
13✔
251
            auto oedges = dflow.out_edges(*malloc_node) |
13✔
252
                          std::views::filter([&](const auto& e) { return e.src_conn() == output_conn; });
13✔
253
            for (auto& oedge : oedges) {
13✔
254
                auto* access_node = dynamic_cast<data_flow::AccessNode*>(&oedge.dst());
13✔
255
                if (access_node && oedge.is_dst_write()) {
13✔
256
                    auto container = access_node->data();
13✔
257
                    if (sdfg_.is_external(container)) {
13✔
258
                        // was never ours to begin with, even if weird that we run malloc on it
NEW
259
                        continue;
×
NEW
260
                    }
×
261
                    auto it = originally_owned_data_.find(container);
13✔
262
                    if (it != originally_owned_data_.end()) {
13✔
263
                        auto& area = it->second;
×
264
                        area.non_ssa = true;
×
265
                        area.allocation_size = SymEngine::null;
×
266
                        area.producer_block = nullptr;
×
267
                        area.producer = nullptr;
×
268
                        // DEBUG_PRINTLN("Conflicting ownership of " << container);
269
                        continue;
×
270
                    }
×
271
                    originally_owned_data_.emplace(
13✔
272
                        std::piecewise_construct,
13✔
273
                        std::forward_as_tuple(container),
13✔
274
                        std::forward_as_tuple(&oedge, &node, alloc_size, false)
13✔
275
                    );
13✔
276
                }
13✔
277
            }
13✔
278
        } else if (library_node->code() == stdlib::LibraryNodeType_Free) {
21✔
279
            auto* free_node = dynamic_cast<const stdlib::FreeNode*>(library_node);
4✔
280
            auto input = dflow.in_edge_for_connector(*free_node, free_node->input(0));
4✔
281
            auto outputs = dflow.out_edges_for_connector(*free_node, free_node->output(0));
4✔
282
            if (input && outputs.size() == 1) {
4✔
283
                auto* in_access = dynamic_cast<const data_flow::AccessNode*>(&input->src());
4✔
284
                auto* out_access = dynamic_cast<const data_flow::AccessNode*>(&outputs.at(0)->dst());
4✔
285

286
                if (in_access && out_access && in_access->data() == out_access->data()) {
4✔
287
                    auto& container = in_access->data();
4✔
288
                    if (sdfg_.type(container).type_id() == types::TypeID::Pointer) {
4✔
289
                        auto area_it = originally_owned_data_.find(container);
4✔
290
                        if (area_it != originally_owned_data_.end()) { // we scan in execution order. Malloc needs to
4✔
291
                                                                       // have been found before
292
                            auto& area = area_it->second;
4✔
293
                            area.free_clusters.emplace_back(&node, input, outputs[0]);
4✔
294
                        }
4✔
295
                    }
4✔
296
                }
4✔
297
            }
4✔
298
        }
4✔
299
    }
34✔
300

301
    return BaseUserVisitor::visit(node);
364✔
302
}
364✔
303

304
/**
305
 * Does not care about other types of accesses.
306
 * Presumes, that the data cannot alias and there is only one SSA-like instance of backing data.
307
 * So that indirect reads and writes can be matched up with each other.
308
 * Any read of the pointer or getting of an address is considered an escape for which aliasing cannot be excluded,
309
 * in which case you must not rely on this analysis
310
 */
311
class IndirectMemoryAccessFinder : public analysis::BaseUserVisitor { // TODO update to use the PointerUsedAnalyzer and
312
                                                                      // a policy that filters the containstarg
313
private:
314
    std::unordered_map<std::string, std::unordered_set<const data_flow::Memlet*>> indirect_reads_;
315
    std::unordered_map<std::string, std::unordered_map<const data_flow::Memlet*, const Block*>> writes_to_remove_;
316
    const std::unordered_set<std::string>& target_containers_;
317

318
public:
319
    IndirectMemoryAccessFinder(const std::unordered_set<std::string>& target_containers);
320

321
    const std::unordered_map<std::string, std::unordered_set<const data_flow::Memlet*>>& indirect_reads() {
6✔
322
        return indirect_reads_;
6✔
323
    }
6✔
324
    const std::unordered_map<std::string, std::unordered_map<const data_flow::Memlet*, const Block*>>& writes_to_remove() {
4✔
325
        return writes_to_remove_;
4✔
326
    }
4✔
327

328
    void use_as_symbol_read(
329
        const std::string& container,
330
        const ControlFlowNode* node,
331
        const Element* user,
332
        SymbolReadLocation loc,
333
        int loc_index,
334
        symbolic::Expression expr
335
    ) override {}
28✔
336
    void use_as_symbol_write(
337
        const symbolic::Symbol& container, const ControlFlowNode* node, const Element* user, SymbolWriteLocation loc
338
    ) override {}
5✔
339
    void use_as_src_node(
340
        const std::string& container,
341
        const data_flow::AccessNode& node,
342
        const data_flow::Memlet& edge,
343
        const Block& block
344
    ) override;
345
    void use_as_dst_node(
346
        const std::string& container,
347
        const data_flow::AccessNode& node,
348
        const data_flow::Memlet& edge,
349
        const Block& block
350
    ) override;
351
    void use_as_return_src(const std::string& container, const Return& ret) override {}
1✔
352
};
353

354
IndirectMemoryAccessFinder::IndirectMemoryAccessFinder(const std::unordered_set<std::string>& target_containers)
355
    : target_containers_(target_containers) {}
6✔
356

357
void IndirectMemoryAccessFinder::use_as_src_node(
358
    const std::string& container, const data_flow::AccessNode& node, const data_flow::Memlet& edge, const Block& block
359
) {
18✔
360
    if (target_containers_.contains(container)) {
18✔
361
        if (edge.is_src_pointed_to_read()) {
5✔
362
            indirect_reads_[container].insert(&edge);
1✔
363
        }
1✔
364
        // Library nodes may get a pointer as input. But some of them we know enough about,
365
        // to know they are only borrowing the pointer for read access during their execution, not representing an
366
        // actual leak these we can instead cound as indirect readse
367
        if (edge.is_src_read()) {
5✔
368
            if (auto* libNode = dynamic_cast<const data_flow::LibraryNode*>(&edge.dst())) {
5✔
369
                auto conns = libNode->inputs();
4✔
370
                auto idx = std::find(conns.begin(), conns.end(), edge.dst_conn()) - conns.begin();
4✔
371
                auto access_type = libNode->pointer_access_type(idx);
4✔
372
                auto maybe_rd_only = std::get_if<data_flow::PointerReadOnly>(&access_type);
4✔
373
                if (maybe_rd_only && maybe_rd_only->no_ptr_escape()) {
4✔
374
                    indirect_reads_[container].insert(&edge);
1✔
375
                }
1✔
376
            }
4✔
377
        }
5✔
378
    }
5✔
379
}
18✔
380

381
void IndirectMemoryAccessFinder::use_as_dst_node(
382
    const std::string& container, const data_flow::AccessNode& node, const data_flow::Memlet& edge, const Block& block
383
) {
26✔
384
    if (target_containers_.contains(container)) {
26✔
385
        if (edge.is_dst_pointed_to_write()) {
16✔
386
            writes_to_remove_[container][&edge] = &block;
4✔
387
        }
4✔
388
        // hack to classify Offload nodes with D2H correctly. For historic reasons they use a direct output edge
389
        // to the host ptr, even though they will never write the pointer, but only write the memory the pointer points
390
        // to as that edge is destructive to many optimizations and scheduled to be removed, cuhere custom handleing per
391
        // node
392
        if (edge.is_dst_write()) {
16✔
393
            if (auto* offload = dynamic_cast<const offloading::DataOffloadingNode*>(&edge.src())) {
12✔
394
                if (offload->transfer_direction() != offloading::DataTransferDirection::NONE) {
3✔
395
                    writes_to_remove_[container][&edge] = &block;
3✔
396
                }
3✔
397
            }
3✔
398
        }
12✔
399
    }
16✔
400
}
26✔
401

402

403
bool DeadDataElimination::run_pass(builder::StructuredSDFGBuilder& builder, analysis::AnalysisManager& analysis_manager) {
101✔
404
    bool applied = false;
101✔
405

406
    auto& sdfg = builder.subject();
101✔
407

408
    // Check for locally allocated memory that we "own" and understand (no reference to it escapes or can potentially
409
    // alias)
410
    MemoryOwnershipAnalysis ownership_analysis(sdfg);
101✔
411
    ownership_analysis.run(analysis_manager);
101✔
412

413
    std::unordered_set<std::string> dead_containers;
101✔
414

415
    auto& fully_owned_areas = ownership_analysis.fully_owned_areas();
101✔
416
    if (!fully_owned_areas.empty()) {
101✔
417
        // We found pointered accesses, where we could prove that the pointer is unique and SSA-like, such that we may
418
        // check accesses via the pointer as well
419
        IndirectMemoryAccessFinder remaining_indirects(fully_owned_areas);
6✔
420
        remaining_indirects.dispatch(sdfg.root());
6✔
421
        for (auto& owned_area_id : fully_owned_areas) {
6✔
422
            auto& all_reads = remaining_indirects.indirect_reads();
6✔
423
            auto cont_reads_it = all_reads.find(owned_area_id);
6✔
424
            if (cont_reads_it == all_reads.end() || cont_reads_it->second.empty()) {
6✔
425
                DEBUG_PRINTLN("Removing fully owned memory " << owned_area_id << ", never used!");
4✔
426
                auto writes = remaining_indirects.writes_to_remove();
4✔
427
                // [owned_area] is never read, no reference to it escapes our control. So any write of it is useless
428
                auto writes_it = writes.find(owned_area_id);
4✔
429

430
                bool all_removed = true;
4✔
431
                if (writes_it != writes.end()) {
4✔
432
                    auto& to_remove = writes_it->second;
4✔
433
                    for (auto& [edge_to_remove, w_block] : to_remove) {
5✔
434
                        auto& write_node = dynamic_cast<const data_flow::AccessNode&>(edge_to_remove->dst());
5✔
435
                        int removed =
5✔
436
                            builder.clear_node(*const_cast<structured_control_flow::Block*>(w_block), write_node);
5✔
437
                        if (removed == 0) {
5✔
438
                            all_removed = false;
×
439
                        } else {
5✔
440
                            applied = true;
5✔
441
                        }
5✔
442
                    }
5✔
443
                }
4✔
444
                // This is the malloc. We can remove this because we understand what malloc does. Otherwise the
445
                // sideeffect flag would stop us from removing a libNode
446
                if (all_removed) {
4✔
447
                    auto& area = ownership_analysis.owned_area(owned_area_id);
4✔
448
                    area.remove_from(builder);
4✔
449
                    applied = true;
4✔
450
                    if (sdfg.is_transient(owned_area_id)) {
4✔
451
                        dead_containers.insert(owned_area_id);
4✔
452
                    }
4✔
453
                }
4✔
454
            }
4✔
455
        }
6✔
456
    }
6✔
457

458
    if (legacy_removals_) {
101✔
459
        if (applied) { // if changes were made, any cached analysis will be out of date.
98✔
460
            analysis_manager.invalidate_all();
3✔
461
        }
3✔
462

463
        // slightly expensive, because for fully_owned_areas we already looked for uses. But classified differently and
464
        // did not look at, whether the entire container can be removed
465
        auto& users = analysis_manager.get<analysis::Users>();
98✔
466
        auto& data_dependency_analysis = analysis_manager.get<analysis::DataDependencyAnalysis>();
98✔
467

468
        for (auto& name : sdfg.containers()) {
831✔
469
            if (!sdfg.is_transient(name)) {
831✔
470
                continue;
424✔
471
            }
424✔
472
            if (users.num_views(name) > 0 || users.num_moves(name) > 0) {
407✔
473
                continue;
3✔
474
            }
3✔
475
            auto num_reads = users.num_reads(name);
404✔
476
            if (!num_reads && users.num_writes(name) == 0) { // no reference of [name] anywhere
404✔
477
                dead_containers.insert(name);
12✔
478
                applied = true;
12✔
479
                continue;
12✔
480
            }
12✔
481

482
            if (sdfg.type(name).type_id() == types::TypeID::Pointer) {
392✔
483
                continue;
6✔
484
                // use analysis does not return actual reads and writes for pointers. So if [name] is a pointer,
485
                // num reads/writes, does not actually mean no reads exist and any removal is problematic
486
                // more complex cases have been removed above already
487
            }
6✔
488

489
            bool completely_unused = !num_reads; // if there are reads left, we can never remove the container, but
386✔
490
                                                 // maybe
491
            // some writes
492
            auto raws = data_dependency_analysis.definitions(name);
386✔
493
            for (auto set : raws) {
696✔
494
                bool no_reads = false;
696✔
495
                if (set.second.size() == 0) {
696✔
496
                    no_reads = true;
17✔
497
                }
17✔
498
                if (data_dependency_analysis.is_undefined_user(*set.first)) {
696✔
499
                    continue;
1✔
500
                }
1✔
501

502
                if (no_reads) {
695✔
503
                    bool could_eliminate_write = false;
17✔
504
                    auto write = set.first;
17✔
505
                    if (auto transition = dynamic_cast<structured_control_flow::Transition*>(write->element())) {
17✔
506
                        transition->assignments().erase(symbolic::symbol(name));
15✔
507
                        applied = true;
15✔
508
                        could_eliminate_write = true;
15✔
509
                    } else if (auto access_node = dynamic_cast<data_flow::AccessNode*>(write->element())) {
15✔
510
                        auto& graph = access_node->get_parent();
2✔
511
                        auto& block = dynamic_cast<structured_control_flow::Block&>(*graph.get_parent());
2✔
512

513
                        if (builder.clear_node(block, *access_node)) {
2✔
514
                            applied = true;
1✔
515
                            could_eliminate_write = true;
1✔
516
                        }
1✔
517
                    }
2✔
518

519
                    completely_unused &= could_eliminate_write;
17✔
520
                }
17✔
521
            }
695✔
522

523
            if (completely_unused) { // no reads, and all remaining writes could be removed
386✔
524
                dead_containers.insert(name);
8✔
525
            }
8✔
526
        }
386✔
527
    }
98✔
528

529
    for (auto& name : dead_containers) {
101✔
530
        builder.remove_container(name);
21✔
531
    }
21✔
532

533
    return applied;
101✔
534
};
101✔
535

536
} // namespace passes
537
} // namespace sdfg
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc