• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

daisytuner / docc / 24408765437

14 Apr 2026 03:47PM UTC coverage: 64.417% (-0.05%) from 64.469%
24408765437

push

github

web-flow
Add support for DDE to remove mallocs and frees as dead-code when DataOffloadingNodes operate on them (#678)

 + generic modeling for libNodes to declare that an input pointer does not actually escape through them
 + draft of generic modeling of input-pointer access ranges and access types (read-only, full overwrite etc.)
 + DDE: Offload-Transfer-specific handling to reinterpret their fake output edges instead as only indirect writes vie the target pointer
 + DDE: option to disable all the legacy code that requires full UserAnalysis and DataDependencyAnalysis.
 * the new malloc/heap removal in DDE can now remove containers by itself, not relying on DataDependencyAnalysis for that
 * improved builder.clear_node, node can report how to deal with a dead edge, including calling an update method on the node to make it validate again after edge removal.
 + Used for OffloadTransfer nodes to support removing the output edge via dead-code and drop the transfer and maybe the entire node if it does nothing anymore.
 * Offloading: option to quickly disable omitting d2h for read-only data

92 of 162 new or added lines in 11 files covered. (56.79%)

4 existing lines in 3 files now uncovered.

30598 of 47500 relevant lines covered (64.42%)

582.72 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.51
/sdfg/src/passes/dataflow/dead_data_elimination.cpp
1
#include "sdfg/passes/dataflow/dead_data_elimination.h"
2

3
#include "sdfg/analysis/base_user_visitor.h"
4
#include "sdfg/analysis/data_dependency_analysis.h"
5
#include "sdfg/analysis/pointer_analyzers.h"
6
#include "sdfg/data_flow/library_nodes/stdlib/free.h"
7
#include "sdfg/data_flow/library_nodes/stdlib/malloc.h"
8
#include "sdfg/targets/offloading/data_offloading_node.h"
9
#include "sdfg/visitor/structured_sdfg_visitor.h"
10
#include "sdfg/visualizer/dot_visualizer.h"
11

12
namespace sdfg {
13
namespace passes {
14

15
DeadDataElimination::DeadDataElimination() : Pass(), legacy_removals_(true) {};
59✔
16

NEW
17
DeadDataElimination::DeadDataElimination(bool legacy_removals) : Pass(), legacy_removals_(legacy_removals) {};
×
18

19
std::string DeadDataElimination::name() { return "DeadDataElimination"; };
×
20

21
/**
22
 * Simple escape policy that collects all escape and overwrite events
23
 * into a map of container -> {element -> type} entries.
24
 */
25
class BlockerListPolicy {
26
public:
27
    enum class BlockerType { Escape, Overwrite };
28

29
    using BlockerMap = std::unordered_map<std::string, std::unordered_map<const Element*, BlockerType>>;
30

31
protected:
32
    BlockerMap blockers_;
33

34
public:
35
    void on_escape(const std::string& container, const ControlFlowNode* node, const Element* user) {
31✔
36
        blockers_[container].emplace(user, BlockerType::Escape);
31✔
37
    }
31✔
38

39
    void on_overwrite(const std::string& container, const ControlFlowNode* node, const Element* user) {
13✔
40
        blockers_[container].emplace(user, BlockerType::Overwrite);
13✔
41
    }
13✔
42
};
43

44
/**
45
 * Finds memory areas (heap for now) that are wholly owned by the surrounding function. Owned memory can be removed if
46
 * its no longer used, writes to it can be ellided if the data is never read. This is not true for memory writes in
47
 * general, as you must prove no reference to that data ever escapes our control
48
 */
49
class MemoryOwnershipAnalysis : public analysis::BaseUserVisitor,
50
                                analysis::PointerEscapeAnalyzer<BlockerListPolicy>,
51
                                analysis::PointerOverwriteAnalyzer<BlockerListPolicy>,
52
                                BlockerListPolicy {
53
    struct FreeCluster {
54
        const Block* block;
55
        const data_flow::Memlet* in;
56
        const data_flow::Memlet* out;
57

58
        FreeCluster(const Block* b, const data_flow::Memlet* i, const data_flow::Memlet* o) : block(b), in(i), out(o) {}
1✔
59
    };
60

61
    struct OwnedArea {
62
        data_flow::Memlet* producer;
63
        structured_control_flow::Block* producer_block;
64
        symbolic::Expression allocation_size;
65
        bool non_ssa = false;
66
        std::vector<FreeCluster> free_clusters;
67

68
        OwnedArea(data_flow::Memlet* p, structured_control_flow::Block* pb, symbolic::Expression as, bool ns)
69
            : producer(p), producer_block(pb), allocation_size(std::move(as)), non_ssa(ns) {}
10✔
70

71
        void remove_from(builder::StructuredSDFGBuilder& builder) const;
72
    };
73

74
private:
75
    StructuredSDFG& sdfg_;
76
    // memory that is allocated by us and therefore 'owned' until it escapes
77
    std::unordered_map<std::string, OwnedArea> originally_owned_data_;
78
    std::unordered_set<std::string> fully_owned_; // never escaped
79

80
public:
81
    MemoryOwnershipAnalysis(StructuredSDFG& sdfg);
82

83
    void run(analysis::AnalysisManager& manager);
84

85
    bool visit(sdfg::structured_control_flow::Block& node) override;
86

87
    void use_as_symbol_write(
88
        const symbolic::Symbol& container, const ControlFlowNode* node, const Element* user, SymbolWriteLocation loc
89
    ) override {
280✔
90
        PointerEscapeAnalyzer::use_as_symbol_write(container, node, user, loc);
280✔
91
        PointerOverwriteAnalyzer::use_as_symbol_write(container, node, user, loc);
280✔
92
    }
280✔
93
    void use_as_symbol_read(
94
        const std::string& container,
95
        const ControlFlowNode* node,
96
        const Element* user,
97
        SymbolReadLocation loc,
98
        int loc_index,
99
        symbolic::Expression expr
100
    ) override {
1,887✔
101
        PointerEscapeAnalyzer::use_as_symbol_read(container, node, user, loc, loc_index, std::move(expr));
1,887✔
102
        PointerOverwriteAnalyzer::use_as_symbol_read(container, node, user, loc, loc_index, std::move(expr));
1,887✔
103
    }
1,887✔
104
    void use_as_src_node(
105
        const std::string& container,
106
        const data_flow::AccessNode& node,
107
        const data_flow::Memlet& edge,
108
        const Block& block
109
    ) override {
557✔
110
        PointerEscapeAnalyzer::use_as_src_node(container, node, edge, block);
557✔
111
        PointerOverwriteAnalyzer::use_as_src_node(container, node, edge, block);
557✔
112
    }
557✔
113
    void use_as_dst_node(
114
        const std::string& container,
115
        const data_flow::AccessNode& node,
116
        const data_flow::Memlet& edge,
117
        const Block& block
118
    ) override {
328✔
119
        PointerEscapeAnalyzer::use_as_dst_node(container, node, edge, block);
328✔
120
        PointerOverwriteAnalyzer::use_as_dst_node(container, node, edge, block);
328✔
121
    }
328✔
122
    void use_as_return_src(const std::string& container, const Return& ret) override {
8✔
123
        PointerEscapeAnalyzer::use_as_return_src(container, ret);
8✔
124
        PointerOverwriteAnalyzer::use_as_return_src(container, ret);
8✔
125
    }
8✔
126

127
    const std::unordered_set<std::string>& fully_owned_areas() const { return fully_owned_; }
98✔
128

129
    const OwnedArea& owned_area(const std::string& container) const { return originally_owned_data_.at(container); }
3✔
130

131
private:
132
    static bool excusedEscape(const Element* element, const OwnedArea& area);
133
    static bool excusedOverwrite(const Element* element, const OwnedArea& area);
134
};
135

136
void MemoryOwnershipAnalysis::OwnedArea::remove_from(builder::StructuredSDFGBuilder& builder) const {
3✔
137
    auto& malloc_write = this->producer->dst();
3✔
138
    auto& malloc_node = this->producer->src();
3✔
139
    builder.clear_code_node_legacy(*this->producer_block, dynamic_cast<const data_flow::CodeNode&>(malloc_node));
3✔
140
    // builder.clear_node(
141
    //     *this->producer_block, dynamic_cast<data_flow::AccessNode&>(malloc_write), {&malloc_write, &malloc_node}
142
    // );
143

144
    for (auto& free_cluster : this->free_clusters) {
3✔
145
        auto& memlet = *free_cluster.out;
1✔
146
        builder.clear_code_node_legacy(
1✔
147
            *const_cast<Block*>(free_cluster.block), dynamic_cast<const data_flow::CodeNode&>(memlet.src())
1✔
148
        );
1✔
149
        // builder.clear_node(*const_cast<Block*>(free_cluster.block), memlet.dst(), {&memlet.dst(), &memlet.src()});
150
    }
1✔
151
}
3✔
152

153
MemoryOwnershipAnalysis::MemoryOwnershipAnalysis(StructuredSDFG& sdfg)
154
    : sdfg_(sdfg), PointerEscapeAnalyzer(sdfg, *this), PointerOverwriteAnalyzer(sdfg, *this) {}
98✔
155

156
bool MemoryOwnershipAnalysis::excusedEscape(const Element* element, const OwnedArea& area) {
7✔
157
    // An escape is excused if it matches the input edge of one of the free_clusters.
158
    // Reading the pointer to pass it to free() is not a real escape.
159
    for (const auto& cluster : area.free_clusters) {
7✔
160
        if (element == cluster.in) {
1✔
161
            return true;
1✔
162
        }
1✔
163
    }
1✔
164
    return false;
6✔
165
}
7✔
166

167
bool MemoryOwnershipAnalysis::excusedOverwrite(const Element* element, const OwnedArea& area) {
5✔
168
    auto* memlet = dynamic_cast<const data_flow::Memlet*>(element);
5✔
169

170
    if (!memlet) {
5✔
NEW
171
        return false;
×
NEW
172
    }
×
173

174
    // The producer (malloc output edge) is an excused overwrite — it's the initial assignment.
175
    if (element == area.producer) {
5✔
176
        return true;
4✔
177
    }
4✔
178
    // DataOffloadNodes currently have a fake-output edge instead of a pointer input.
179
    // But they can only write to memory, never generate/overwrite the pointer
180
    if (auto* offload = dynamic_cast<const offloading::DataOffloadingNode*>(&memlet->src())) {
1✔
NEW
181
        if (offload->transfer_direction() != offloading::DataTransferDirection::NONE) {
×
NEW
182
            return true;
×
NEW
183
        }
×
NEW
184
    }
×
185

186
    // The output edge of a free cluster is an excused overwrite — free sets the pointer
187
    // to NULL (a fake overwrite that doesn't represent a meaningful reassignment).
188
    for (const auto& cluster : area.free_clusters) {
1✔
189
        if (element == cluster.out) {
1✔
190
            return true;
1✔
191
        }
1✔
192
    }
1✔
193
    return false;
×
194
}
1✔
195

196

197
void MemoryOwnershipAnalysis::run(analysis::AnalysisManager& manager) {
98✔
198
    dispatch(sdfg_.root());
98✔
199

200
    for (auto& [name, area] : originally_owned_data_) {
98✔
201
        if (!area.non_ssa && area.allocation_size != SymEngine::null) {
10✔
202
            auto it = blockers_.find(name);
10✔
203
            if (it != blockers_.end()) {
10✔
204
                bool killed = false;
10✔
205
                for (auto& [element, type] : it->second) {
12✔
206
                    if (type == BlockerType::Escape) {
12✔
207
                        if (!excusedEscape(element, area)) {
7✔
208
                            killed = true;
6✔
209
                            break;
6✔
210
                        }
6✔
211
                    } else if (type == BlockerType::Overwrite) {
7✔
212
                        if (!excusedOverwrite(element, area)) {
5✔
213
                            killed = true;
×
214
                            break;
×
215
                        }
×
216
                    }
5✔
217
                }
12✔
218
                if (!killed) {
10✔
219
                    fully_owned_.insert(name);
4✔
220
                }
4✔
221
            } else {
10✔
222
                fully_owned_.insert(name);
×
223
            }
×
224
        }
10✔
225
    }
10✔
226
}
98✔
227

228

229
bool MemoryOwnershipAnalysis::visit(sdfg::structured_control_flow::Block& node) {
340✔
230
    auto& dflow = node.dataflow();
340✔
231
    for (auto& library_node : dflow.library_nodes()) {
340✔
232
        if (library_node->code() == stdlib::LibraryNodeType_Malloc) {
17✔
233
            auto* malloc_node = dynamic_cast<const stdlib::MallocNode*>(library_node);
10✔
234
            auto& alloc_size = malloc_node->size();
10✔
235
            auto output_conn = malloc_node->output(0);
10✔
236
            auto oedges = dflow.out_edges(*malloc_node) |
10✔
237
                          std::views::filter([&](const auto& e) { return e.src_conn() == output_conn; });
10✔
238
            for (auto& oedge : oedges) {
10✔
239
                auto* access_node = dynamic_cast<data_flow::AccessNode*>(&oedge.dst());
10✔
240
                if (access_node && oedge.is_dst_write()) {
10✔
241
                    auto container = access_node->data();
10✔
242
                    auto it = originally_owned_data_.find(container);
10✔
243
                    if (it != originally_owned_data_.end()) {
10✔
244
                        auto& area = it->second;
×
245
                        area.non_ssa = true;
×
246
                        area.allocation_size = SymEngine::null;
×
247
                        area.producer_block = nullptr;
×
248
                        area.producer = nullptr;
×
249
                        // DEBUG_PRINTLN("Conflicting ownership of " << container);
250
                        continue;
×
251
                    }
×
252
                    originally_owned_data_.emplace(
10✔
253
                        std::piecewise_construct,
10✔
254
                        std::forward_as_tuple(container),
10✔
255
                        std::forward_as_tuple(&oedge, &node, alloc_size, false)
10✔
256
                    );
10✔
257
                }
10✔
258
            }
10✔
259
        } else if (library_node->code() == stdlib::LibraryNodeType_Free) {
10✔
260
            auto* free_node = dynamic_cast<const stdlib::FreeNode*>(library_node);
1✔
261
            auto input = dflow.in_edge_for_connector(*free_node, free_node->input(0));
1✔
262
            auto outputs = dflow.out_edges_for_connector(*free_node, free_node->output(0));
1✔
263
            if (input && outputs.size() == 1) {
1✔
264
                auto* in_access = dynamic_cast<const data_flow::AccessNode*>(&input->src());
1✔
265
                auto* out_access = dynamic_cast<const data_flow::AccessNode*>(&outputs.at(0)->dst());
1✔
266

267
                if (in_access && out_access && in_access->data() == out_access->data()) {
1✔
268
                    auto& container = in_access->data();
1✔
269
                    if (sdfg_.type(container).type_id() == types::TypeID::Pointer) {
1✔
270
                        auto area_it = originally_owned_data_.find(container);
1✔
271
                        if (area_it != originally_owned_data_.end()) { // we scan in execution order. Malloc needs to
1✔
272
                                                                       // have been found before
273
                            auto& area = area_it->second;
1✔
274
                            area.free_clusters.emplace_back(&node, input, outputs[0]);
1✔
275
                        }
1✔
276
                    }
1✔
277
                }
1✔
278
            }
1✔
279
        }
1✔
280
    }
17✔
281

282
    return BaseUserVisitor::visit(node);
340✔
283
}
340✔
284

285
/**
286
 * Does not care about other types of accesses.
287
 * Presumes, that the data cannot alias and there is only one SSA-like instance of backing data.
288
 * So that indirect reads and writes can be matched up with each other.
289
 * Any read of the pointer or getting of an address is considered an escape for which aliasing cannot be excluded,
290
 * in which case you must not rely on this analysis
291
 */
292
class IndirectMemoryAccessFinder : public analysis::BaseUserVisitor { // TODO update to use the PointerUsedAnalyzer and
293
                                                                      // a policy that filters the containstarg
294
private:
295
    std::unordered_map<std::string, std::unordered_set<const data_flow::Memlet*>> indirect_reads_;
296
    std::unordered_map<std::string, std::unordered_map<const data_flow::Memlet*, const Block*>> writes_to_remove_;
297
    const std::unordered_set<std::string>& target_containers_;
298

299
public:
300
    IndirectMemoryAccessFinder(const std::unordered_set<std::string>& target_containers);
301

302
    const std::unordered_map<std::string, std::unordered_set<const data_flow::Memlet*>>& indirect_reads() {
4✔
303
        return indirect_reads_;
4✔
304
    }
4✔
305
    const std::unordered_map<std::string, std::unordered_map<const data_flow::Memlet*, const Block*>>& writes_to_remove() {
3✔
306
        return writes_to_remove_;
3✔
307
    }
3✔
308

309
    void use_as_symbol_read(
310
        const std::string& container,
311
        const ControlFlowNode* node,
312
        const Element* user,
313
        SymbolReadLocation loc,
314
        int loc_index,
315
        symbolic::Expression expr
316
    ) override {}
24✔
317
    void use_as_symbol_write(
318
        const symbolic::Symbol& container, const ControlFlowNode* node, const Element* user, SymbolWriteLocation loc
319
    ) override {}
4✔
320
    void use_as_src_node(
321
        const std::string& container,
322
        const data_flow::AccessNode& node,
323
        const data_flow::Memlet& edge,
324
        const Block& block
325
    ) override;
326
    void use_as_dst_node(
327
        const std::string& container,
328
        const data_flow::AccessNode& node,
329
        const data_flow::Memlet& edge,
330
        const Block& block
331
    ) override;
332
    void use_as_return_src(const std::string& container, const Return& ret) override {}
1✔
333
};
334

335
IndirectMemoryAccessFinder::IndirectMemoryAccessFinder(const std::unordered_set<std::string>& target_containers)
336
    : target_containers_(target_containers) {}
4✔
337

338
void IndirectMemoryAccessFinder::use_as_src_node(
339
    const std::string& container, const data_flow::AccessNode& node, const data_flow::Memlet& edge, const Block& block
340
) {
10✔
341
    if (target_containers_.contains(container)) {
10✔
342
        if (edge.is_src_pointed_to_read()) {
2✔
343
            indirect_reads_[container].insert(&edge);
1✔
344
        }
1✔
345
        // Library nodes may get a pointer as input. But some of them we know enough about,
346
        // to know they are only borrowing the pointer for read access during their execution, not representing an
347
        // actual leak these we can instead cound as indirect readse
348
        if (edge.is_src_read()) {
2✔
349
            if (auto* libNode = dynamic_cast<const data_flow::LibraryNode*>(&edge.dst())) {
2✔
350
                auto conns = libNode->inputs();
1✔
351
                auto idx = std::find(conns.begin(), conns.end(), edge.dst_conn()) - conns.begin();
1✔
352
                auto access_type = libNode->pointer_access_type(idx);
1✔
353
                auto maybe_rd_only = std::get_if<data_flow::PointerReadOnly>(&access_type);
1✔
354
                if (maybe_rd_only && maybe_rd_only->no_ptr_escape()) {
1✔
NEW
355
                    indirect_reads_[container].insert(&edge);
×
NEW
356
                }
×
357
            }
1✔
358
        }
2✔
359
    }
2✔
360
}
10✔
361

362
void IndirectMemoryAccessFinder::use_as_dst_node(
363
    const std::string& container, const data_flow::AccessNode& node, const data_flow::Memlet& edge, const Block& block
364
) {
14✔
365
    if (target_containers_.contains(container)) {
14✔
366
        if (edge.is_dst_pointed_to_write()) {
9✔
367
            writes_to_remove_[container][&edge] = &block;
4✔
368
        }
4✔
369
        // hack to classify Offload nodes with D2H correctly. For historic reasons they use a direct output edge
370
        // to the host ptr, even though they will never write the pointer, but only write the memory the pointer points
371
        // to as that edge is destructive to many optimizations and scheduled to be removed, cuhere custom handleing per
372
        // node
373
        if (edge.is_dst_write()) {
9✔
374
            if (auto* offload = dynamic_cast<const offloading::DataOffloadingNode*>(&edge.src())) {
5✔
NEW
375
                if (offload->transfer_direction() != offloading::DataTransferDirection::NONE) {
×
NEW
376
                    writes_to_remove_[container][&edge] = &block;
×
NEW
377
                }
×
NEW
378
            }
×
379
        }
5✔
380
    }
9✔
381
}
14✔
382

383

384
bool DeadDataElimination::run_pass(builder::StructuredSDFGBuilder& builder, analysis::AnalysisManager& analysis_manager) {
98✔
385
    bool applied = false;
98✔
386

387
    auto& sdfg = builder.subject();
98✔
388

389
    // Check for locally allocated memory that we "own" and understand (no reference to it escapes or can potentially
390
    // alias)
391
    MemoryOwnershipAnalysis ownership_analysis(sdfg);
98✔
392
    ownership_analysis.run(analysis_manager);
98✔
393

394
    std::unordered_set<std::string> dead_containers;
98✔
395

396
    auto& fully_owned_areas = ownership_analysis.fully_owned_areas();
98✔
397
    if (!fully_owned_areas.empty()) {
98✔
398
        // We found pointered accesses, where we could prove that the pointer is unique and SSA-like, such that we may
399
        // check accesses via the pointer as well
400
        IndirectMemoryAccessFinder remaining_indirects(fully_owned_areas);
4✔
401
        remaining_indirects.dispatch(sdfg.root());
4✔
402
        for (auto& owned_area_id : fully_owned_areas) {
4✔
403
            auto& all_reads = remaining_indirects.indirect_reads();
4✔
404
            auto cont_reads_it = all_reads.find(owned_area_id);
4✔
405
            if (cont_reads_it == all_reads.end() || cont_reads_it->second.empty()) {
4✔
406
                DEBUG_PRINTLN("Removing fully owned memory " << owned_area_id << ", never used!");
3✔
407
                auto writes = remaining_indirects.writes_to_remove();
3✔
408
                // [owned_area] is never read, no reference to it escapes our control. So any write of it is useless
409
                auto writes_it = writes.find(owned_area_id);
3✔
410

411
                bool all_removed = true;
3✔
412
                if (writes_it != writes.end()) {
3✔
413
                    auto& to_remove = writes_it->second;
3✔
414
                    for (auto& [edge_to_remove, w_block] : to_remove) {
3✔
415
                        auto& write_node = dynamic_cast<const data_flow::AccessNode&>(edge_to_remove->dst());
3✔
416
                        int removed =
3✔
417
                            builder.clear_node(*const_cast<structured_control_flow::Block*>(w_block), write_node);
3✔
418
                        if (removed == 0) {
3✔
419
                            all_removed = false;
×
420
                        } else {
3✔
421
                            applied = true;
3✔
422
                        }
3✔
423
                    }
3✔
424
                }
3✔
425
                // This is the malloc. We can remove this because we understand what malloc does. Otherwise the
426
                // sideeffect flag would stop us from removing a libNode
427
                if (all_removed) {
3✔
428
                    auto& area = ownership_analysis.owned_area(owned_area_id);
3✔
429
                    area.remove_from(builder);
3✔
430
                    applied = true;
3✔
431
                    dead_containers.insert(owned_area_id);
3✔
432
                }
3✔
433
            }
3✔
434
        }
4✔
435
    }
4✔
436

437
    if (legacy_removals_) {
98✔
438
        if (applied) { // if changes were made, any cached analysis will be out of date.
98✔
439
            analysis_manager.invalidate_all();
3✔
440
        }
3✔
441

442
        // slightly expensive, because for fully_owned_areas we already looked for uses. But classified differently and
443
        // did not look at, whether the entire container can be removed
444
        auto& users = analysis_manager.get<analysis::Users>();
98✔
445
        auto& data_dependency_analysis = analysis_manager.get<analysis::DataDependencyAnalysis>();
98✔
446

447
        for (auto& name : sdfg.containers()) {
831✔
448
            if (!sdfg.is_transient(name)) {
831✔
449
                continue;
424✔
450
            }
424✔
451
            if (users.num_views(name) > 0 || users.num_moves(name) > 0) {
407✔
452
                continue;
3✔
453
            }
3✔
454
            auto num_reads = users.num_reads(name);
404✔
455
            if (!num_reads && users.num_writes(name) == 0) { // no reference of [name] anywhere
404✔
456
                dead_containers.insert(name);
12✔
457
                applied = true;
12✔
458
                continue;
12✔
459
            }
12✔
460

461
            if (sdfg.type(name).type_id() == types::TypeID::Pointer) {
392✔
462
                continue;
6✔
463
                // use analysis does not return actual reads and writes for pointers. So if [name] is a pointer,
464
                // num reads/writes, does not actually mean no reads exist and any removal is problematic
465
                // more complex cases have been removed above already
466
            }
6✔
467

468
            bool completely_unused = !num_reads; // if there are reads left, we can never remove the container, but
386✔
469
                                                 // maybe
470
            // some writes
471
            auto raws = data_dependency_analysis.definitions(name);
386✔
472
            for (auto set : raws) {
696✔
473
                bool no_reads = false;
696✔
474
                if (set.second.size() == 0) {
696✔
475
                    no_reads = true;
17✔
476
                }
17✔
477
                if (data_dependency_analysis.is_undefined_user(*set.first)) {
696✔
478
                    continue;
1✔
479
                }
1✔
480

481
                if (no_reads) {
695✔
482
                    bool could_eliminate_write = false;
17✔
483
                    auto write = set.first;
17✔
484
                    if (auto transition = dynamic_cast<structured_control_flow::Transition*>(write->element())) {
17✔
485
                        transition->assignments().erase(symbolic::symbol(name));
15✔
486
                        applied = true;
15✔
487
                        could_eliminate_write = true;
15✔
488
                    } else if (auto access_node = dynamic_cast<data_flow::AccessNode*>(write->element())) {
15✔
489
                        auto& graph = access_node->get_parent();
2✔
490
                        auto& block = dynamic_cast<structured_control_flow::Block&>(*graph.get_parent());
2✔
491

492
                        if (builder.clear_node(block, *access_node)) {
2✔
493
                            applied = true;
1✔
494
                            could_eliminate_write = true;
1✔
495
                        }
1✔
496
                    }
2✔
497

498
                    completely_unused &= could_eliminate_write;
17✔
499
                }
17✔
500
            }
695✔
501

502
            if (completely_unused) { // no reads, and all remaining writes could be removed
386✔
503
                dead_containers.insert(name);
8✔
504
            }
8✔
505
        }
386✔
506
    }
98✔
507

508
    for (auto& name : dead_containers) {
98✔
509
        builder.remove_container(name);
20✔
510
    }
20✔
511

512
    return applied;
98✔
513
};
98✔
514

515
} // namespace passes
516
} // namespace sdfg
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc