• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

Alan-Jowett / ebpf-verifier / 22160684311

18 Feb 2026 09:20PM UTC coverage: 88.256% (+1.1%) from 87.142%
22160684311

push

github

elazarg
lint

Signed-off-by: Elazar Gershuni <elazarg@gmail.com>

78 of 82 new or added lines in 6 files covered. (95.12%)

402 existing lines in 18 files now uncovered.

10731 of 12159 relevant lines covered (88.26%)

837642.51 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

76.51
/src/result.cpp
1
// Copyright (c) Prevail Verifier contributors.
2
// SPDX-License-Identifier: MIT
3
#include <algorithm>
4
#include <map>
5
#include <ranges>
6
#include <regex>
7
#include <sstream>
8

9
#include "crab/ebpf_domain.hpp"
10
#include "ir/program.hpp"
11
#include "result.hpp"
12
#include "spec/ebpf_base.h"
13
#include "spec/vm_isa.hpp"
14

15
namespace prevail {
16

17
// Stream-local storage index for invariant filter
18
static int invariant_filter_index() {
24✔
19
    static const int index = std::ios_base::xalloc();
24✔
20
    return index;
24✔
21
}
22

23
const RelevantState* get_invariant_filter(std::ostream& os) {
12✔
24
    return static_cast<const RelevantState*>(os.pword(invariant_filter_index()));
12✔
25
}
26

27
std::ostream& operator<<(std::ostream& os, const invariant_filter& filter) {
12✔
28
    os.pword(invariant_filter_index()) = const_cast<void*>(static_cast<const void*>(filter.state));
12✔
29
    return os;
12✔
30
}
31

32
bool RelevantState::is_relevant_constraint(const std::string& constraint) const {
86✔
33
    // Extract the register or stack reference from the constraint.
34
    // Constraints look like: "r1.type=number", "r2.svalue=[0, 100]", "s[4088...4095].type=ctx"
35
    // Relational constraints: "r1.svalue-r8.svalue<=-100", "r0.svalue=r3.svalue+2"
36
    // Also handle: "packet_size=54", "meta_offset=[-4098, 0]"
37

38
    // Track whether we matched any known pattern.  When at least one pattern
39
    // fires but nothing is relevant we return false; the conservative
40
    // `return true` at the end only applies to truly unparseable constraints.
41
    bool parsed_any_pattern = false;
86✔
42

43
    // Check for ANY register mentioned in the constraint (not just at start).
44
    // This handles relational constraints like "r1.svalue-r8.svalue<=100".
45
    static const std::regex reg_pattern(R"(r(\d+)\.)");
86✔
46
    std::sregex_iterator it(constraint.begin(), constraint.end(), reg_pattern);
86✔
47
    std::sregex_iterator end;
86✔
48

49
    while (it != end) {
140✔
50
        parsed_any_pattern = true;
64✔
51
        const uint8_t reg_num = static_cast<uint8_t>(std::stoi((*it)[1].str()));
96✔
52
        if (registers.contains(Reg{reg_num})) {
64✔
53
            return true; // At least one relevant register is mentioned
5✔
54
        }
55
        ++it;
54✔
56
    }
57

58
    // Precompute absolute stack offsets once to avoid repeated conversion in loops
59
    std::vector<int64_t> abs_stack_offsets;
76✔
60
    abs_stack_offsets.reserve(stack_offsets.size());
76✔
61
    for (const auto& rel_offset : stack_offsets) {
76✔
UNCOV
62
        abs_stack_offsets.push_back(EBPF_TOTAL_STACK_SIZE + rel_offset);
×
63
    }
64

65
    // Check for stack range pattern: s[start...end]
66
    static const std::regex stack_range_pattern(R"(s\[(\d+)\.\.\.(\d+)\])");
76✔
67
    std::sregex_iterator stack_it(constraint.begin(), constraint.end(), stack_range_pattern);
76✔
68
    while (stack_it != end) {
88✔
69
        parsed_any_pattern = true;
12✔
70
        const int64_t abs_start = std::stoll((*stack_it)[1].str());
18✔
71
        const int64_t abs_end = std::stoll((*stack_it)[2].str());
18✔
72
        for (const auto& abs_offset : abs_stack_offsets) {
12✔
UNCOV
73
            if (abs_offset >= abs_start && abs_offset <= abs_end) {
×
74
                return true; // Overlaps within the constraint range
38✔
75
            }
76
        }
77
        ++stack_it;
12✔
78
    }
79

80
    // Check for single-offset stack pattern: s[offset]
81
    static const std::regex stack_single_pattern(R"(s\[(\d+)\]\.)");
76✔
82
    std::sregex_iterator single_it(constraint.begin(), constraint.end(), stack_single_pattern);
76✔
83
    while (single_it != end) {
76✔
UNCOV
84
        parsed_any_pattern = true;
×
UNCOV
85
        const int64_t abs_pos = std::stoll((*single_it)[1].str());
×
UNCOV
86
        for (const auto& abs_offset : abs_stack_offsets) {
×
UNCOV
87
            if (abs_offset == abs_pos) {
×
88
                return true;
6✔
89
            }
90
        }
UNCOV
91
        ++single_it;
×
92
    }
93

94
    // Global constraints (packet_size, meta_offset) - always show for context
95
    if (constraint.starts_with("packet_size") || constraint.starts_with("meta_offset")) {
111✔
96
        return true;
12✔
97
    }
98

99
    // A known pattern was parsed but nothing matched — the constraint is irrelevant.
100
    if (parsed_any_pattern) {
64✔
101
        return false;
32✔
102
    }
103

104
    // If we can't parse it at all, show it (conservative)
105
    return true;
106
}
162✔
107

UNCOV
108
bool AnalysisResult::is_valid_after(const Label& label, const StringInvariant& state) const {
×
109
    const EbpfDomain abstract_state =
UNCOV
110
        EbpfDomain::from_constraints(state.value(), thread_local_options.setup_constraints);
×
UNCOV
111
    return abstract_state <= invariants.at(label).post;
×
UNCOV
112
}
×
113

114
ObservationCheckResult AnalysisResult::check_observation_at_label(const Label& label, const InvariantPoint point,
14✔
115
                                                                  const StringInvariant& observation,
116
                                                                  const ObservationCheckMode mode) const {
117
    const auto it = invariants.find(label);
14✔
118
    if (it == invariants.end()) {
14✔
UNCOV
119
        return {.ok = false, .message = "No invariant available for label " + to_string(label)};
×
120
    }
121
    const auto& inv_pair = it->second;
14✔
122
    const EbpfDomain& abstract_state = (point == InvariantPoint::pre) ? inv_pair.pre : inv_pair.post;
14✔
123

124
    const EbpfDomain observed_state =
7✔
125
        observation.is_bottom()
14✔
126
            ? EbpfDomain::bottom()
14✔
127
            : EbpfDomain::from_constraints(observation.value(), thread_local_options.setup_constraints);
14✔
128

129
    if (observed_state.is_bottom()) {
14✔
130
        return {.ok = false, .message = "Observation constraints are unsatisfiable (domain is bottom)"};
2✔
131
    }
132

133
    if (abstract_state.is_bottom()) {
12✔
UNCOV
134
        return {.ok = false, .message = "Invariant at label is bottom (unreachable)"};
×
135
    }
136

137
    switch (mode) {
12✔
138
    case ObservationCheckMode::entailed:
4✔
139
        if (observed_state <= abstract_state) {
4✔
140
            return {.ok = true, .message = ""};
2✔
141
        }
142
        return {.ok = false, .message = "Invariant does not entail the observation (C ⊑ A is false)"};
2✔
143

144
    case ObservationCheckMode::consistent:
8✔
145
        // Default: consistency / satisfiability.
146
        if ((abstract_state & observed_state).is_bottom()) {
12✔
UNCOV
147
            return {.ok = false, .message = "Observation contradicts invariant (meet is bottom)"};
×
148
        }
149
        return {.ok = true, .message = ""};
8✔
150
    }
151

UNCOV
152
    return {.ok = false, .message = "Unsupported observation check mode"};
×
153
}
35✔
154

UNCOV
155
bool AnalysisResult::is_consistent_before(const Label& label, const StringInvariant& observation) const {
×
UNCOV
156
    return check_observation_at_label(label, InvariantPoint::pre, observation, ObservationCheckMode::consistent).ok;
×
157
}
158

UNCOV
159
bool AnalysisResult::is_consistent_after(const Label& label, const StringInvariant& observation) const {
×
UNCOV
160
    return check_observation_at_label(label, InvariantPoint::post, observation, ObservationCheckMode::consistent).ok;
×
161
}
162

163
StringInvariant AnalysisResult::invariant_at(const Label& label) const { return invariants.at(label).post.to_set(); }
1,432✔
164

165
std::optional<VerificationError> AnalysisResult::find_first_error() const {
1,432✔
166
    for (const auto& inv_pair : invariants | std::views::values) {
6,238✔
167
        if (inv_pair.pre.is_bottom()) {
5,122✔
168
            continue;
330✔
169
        }
170
        if (inv_pair.error) {
4,792✔
171
            return inv_pair.error;
316✔
172
        }
173
    }
174
    return {};
1,116✔
175
}
176

177
std::map<Label, std::vector<std::string>> AnalysisResult::find_unreachable(const Program& prog) const {
1,432✔
178
    std::map<Label, std::vector<std::string>> unreachable;
1,432✔
179
    for (const auto& [label, inv_pair] : invariants) {
7,134✔
180
        if (inv_pair.pre.is_bottom()) {
5,702✔
181
            continue;
810✔
182
        }
183
        if (const auto passume = std::get_if<Assume>(&prog.instruction_at(label))) {
7,743✔
184
            if (inv_pair.post.is_bottom() && !inv_pair.error) {
592✔
185
                const auto s = to_string(*passume);
182✔
186
                unreachable[label].emplace_back(to_string(label) + ": Code becomes unreachable (" + s + ")");
455✔
187
            }
182✔
188
        }
189
    }
190
    return unreachable;
1,432✔
UNCOV
191
}
×
192

193
/// Extract the registers and stack offsets read/written by an instruction.
194
/// Used to populate InstructionDeps during forward analysis.
195
InstructionDeps extract_instruction_deps(const Instruction& ins, const EbpfDomain& pre_state) {
460✔
196
    InstructionDeps deps;
460✔
197

198
    std::visit(
460✔
199
        [&](const auto& v) {
896✔
200
            using T = std::decay_t<decltype(v)>;
201

202
            if constexpr (std::is_same_v<T, Bin>) {
203
                // dst = dst op src (or dst = src for MOV/MOVSX)
204
                deps.regs_written.insert(v.dst);
152✔
205
                if (v.op != Bin::Op::MOV && v.op != Bin::Op::MOVSX8 && v.op != Bin::Op::MOVSX16 &&
152✔
206
                    v.op != Bin::Op::MOVSX32) {
32✔
207
                    // Non-move ops also read dst
208
                    deps.regs_read.insert(v.dst);
64✔
209
                }
210
                if (const auto* reg = std::get_if<Reg>(&v.v)) {
152✔
211
                    deps.regs_read.insert(*reg);
64✔
212
                }
213
            } else if constexpr (std::is_same_v<T, Un>) {
214
                // dst = op(dst)
UNCOV
215
                deps.regs_read.insert(v.dst);
×
UNCOV
216
                deps.regs_written.insert(v.dst);
×
217
            } else if constexpr (std::is_same_v<T, LoadMapFd> || std::is_same_v<T, LoadMapAddress>) {
218
                deps.regs_written.insert(v.dst);
20✔
219
            } else if constexpr (std::is_same_v<T, Mem>) {
220
                deps.regs_read.insert(v.access.basereg);
124✔
221
                if (v.is_load) {
124✔
222
                    // Load: value = *(basereg + offset)
223
                    if (const auto* dst_reg = std::get_if<Reg>(&v.value)) {
22✔
224
                        deps.regs_written.insert(*dst_reg);
22✔
225
                    }
226
                    // Track stack read if base is a stack pointer (R10 or derived)
227
                    if (v.access.basereg.v == R10_STACK_POINTER) {
22✔
228
                        deps.stack_read.insert(v.access.offset);
2✔
229
                    } else if (const auto stack_off = pre_state.get_stack_offset(v.access.basereg)) {
20✔
230
                        // basereg is a stack pointer with known offset from R10
231
                        // Compute the actual stack slot: stack_off is absolute (e.g., 4096),
232
                        // convert to relative from R10: relative = offset + (stack_off - EBPF_TOTAL_STACK_SIZE)
UNCOV
233
                        deps.stack_read.insert(v.access.offset + (*stack_off - EBPF_TOTAL_STACK_SIZE));
×
234
                    }
235
                } else {
236
                    // Store: *(basereg + offset) = value
237
                    if (const auto* src_reg = std::get_if<Reg>(&v.value)) {
102✔
238
                        deps.regs_read.insert(*src_reg);
102✔
239
                    }
240
                    // Track stack write if base is a stack pointer (R10 or derived)
241
                    if (v.access.basereg.v == R10_STACK_POINTER) {
102✔
242
                        deps.stack_written.insert(v.access.offset);
88✔
243
                    } else if (const auto stack_off = pre_state.get_stack_offset(v.access.basereg)) {
14✔
UNCOV
244
                        deps.stack_written.insert(v.access.offset + (*stack_off - EBPF_TOTAL_STACK_SIZE));
×
245
                    }
246
                }
247
            } else if constexpr (std::is_same_v<T, Atomic>) {
UNCOV
248
                deps.regs_read.insert(v.access.basereg);
×
UNCOV
249
                deps.regs_read.insert(v.valreg);
×
UNCOV
250
                if (v.fetch) {
×
UNCOV
251
                    deps.regs_written.insert(v.valreg);
×
252
                }
253
                // Track stack read/write if base is a stack pointer (R10 or derived)
UNCOV
254
                if (v.access.basereg.v == R10_STACK_POINTER) {
×
UNCOV
255
                    deps.stack_read.insert(v.access.offset);
×
UNCOV
256
                    deps.stack_written.insert(v.access.offset);
×
UNCOV
257
                } else if (const auto stack_off = pre_state.get_stack_offset(v.access.basereg)) {
×
UNCOV
258
                    const auto adjusted_off = v.access.offset + (*stack_off - EBPF_TOTAL_STACK_SIZE);
×
UNCOV
259
                    deps.stack_read.insert(adjusted_off);
×
UNCOV
260
                    deps.stack_written.insert(adjusted_off);
×
261
                }
262
            } else if constexpr (std::is_same_v<T, Call>) {
263
                // Calls read R1-R5 as arguments, write R0 as return value,
264
                // and clobber R1-R5 (caller-saved registers are killed).
265
                // Separate clobbers from writes so backward slicing can:
266
                // - Stop propagation for post-call uses of R1-R5 (they're killed)
267
                // - Still trace R1-R5 as read-deps when R0 is relevant-after (args affect return)
268
                for (uint8_t i = 1; i <= 5; ++i) {
132✔
269
                    deps.regs_read.insert(Reg{i});      // Arguments feed into return value
110✔
270
                    deps.regs_clobbered.insert(Reg{i}); // Killed after call
110✔
271
                }
272
                deps.regs_written.insert(Reg{0}); // Return value
22✔
273
            } else if constexpr (std::is_same_v<T, CallLocal>) {
274
                // Local (macro-inlined) calls:
275
                // - read R1-R5 as arguments
276
                // - write R0 as return value
277
                // - clobber R1-R5 (caller-saved)
278
                // - adjust frame pointer R10 (save/restore callee-saved regs)
UNCOV
279
                for (uint8_t i = 1; i <= 5; ++i) {
×
UNCOV
280
                    deps.regs_read.insert(Reg{i});      // Arguments
×
UNCOV
281
                    deps.regs_clobbered.insert(Reg{i}); // Killed after call
×
282
                }
UNCOV
283
                deps.regs_written.insert(Reg{0});  // Return value
×
UNCOV
284
                deps.regs_read.insert(Reg{10});    // Frame pointer read
×
UNCOV
285
                deps.regs_written.insert(Reg{10}); // Frame pointer adjusted
×
286
            } else if constexpr (std::is_same_v<T, Callx>) {
287
                // Indirect call: reads the function pointer and R1-R5,
288
                // writes R0, and clobbers R1-R5.
UNCOV
289
                deps.regs_read.insert(v.func);
×
UNCOV
290
                for (uint8_t i = 1; i <= 5; ++i) {
×
UNCOV
291
                    deps.regs_read.insert(Reg{i});      // Arguments
×
UNCOV
292
                    deps.regs_clobbered.insert(Reg{i}); // Killed after call
×
293
                }
UNCOV
294
                deps.regs_written.insert(Reg{0});
×
295
            } else if constexpr (std::is_same_v<T, Exit>) {
296
                deps.regs_read.insert(Reg{0}); // Return value
14✔
297
                if (!v.stack_frame_prefix.empty()) {
14✔
298
                    // Subprogram return restores callee-saved registers and frame pointer.
UNCOV
299
                    for (uint8_t i = R6; i <= R9; ++i) {
×
UNCOV
300
                        deps.regs_written.insert(Reg{i});
×
301
                    }
UNCOV
302
                    deps.regs_written.insert(Reg{R10_STACK_POINTER});
×
303
                }
304
            } else if constexpr (std::is_same_v<T, Jmp>) {
305
                if (v.cond) {
46✔
306
                    deps.regs_read.insert(v.cond->left);
32✔
307
                    if (const auto* reg = std::get_if<Reg>(&v.cond->right)) {
32✔
308
                        deps.regs_read.insert(*reg);
2✔
309
                    }
310
                }
311
            } else if constexpr (std::is_same_v<T, Assume>) {
312
                deps.regs_read.insert(v.cond.left);
54✔
313
                if (const auto* reg = std::get_if<Reg>(&v.cond.right)) {
54✔
314
                    deps.regs_read.insert(*reg);
4✔
315
                }
316
            } else if constexpr (std::is_same_v<T, Packet>) {
UNCOV
317
                if (v.regoffset) {
×
UNCOV
318
                    deps.regs_read.insert(*v.regoffset);
×
319
                }
UNCOV
320
                deps.regs_written.insert(Reg{0}); // Result in R0
×
321
                // Packet clobbers caller-saved registers R1-R5 without reading them
UNCOV
322
                for (uint8_t i = 1; i <= 5; ++i) {
×
UNCOV
323
                    deps.regs_clobbered.insert(Reg{i});
×
324
                }
325
            }
326
            // Undefined and IncrementLoopCounter have no register deps
327
        },
436✔
328
        ins);
329

330
    return deps;
460✔
UNCOV
331
}
×
332

333
/// Extract the registers referenced by an assertion.
334
/// Used to seed backward slicing from the point of failure.
335
std::set<Reg> extract_assertion_registers(const Assertion& assertion) {
20✔
336
    return std::visit(
10✔
337
        [](const auto& a) -> std::set<Reg> {
40✔
338
            using T = std::decay_t<decltype(a)>;
339

340
            if constexpr (std::is_same_v<T, Comparable>) {
341
                return {a.r1, a.r2};
4✔
342
            } else if constexpr (std::is_same_v<T, Addable>) {
UNCOV
343
                return {a.ptr, a.num};
×
344
            } else if constexpr (std::is_same_v<T, ValidDivisor>) {
345
                return {a.reg};
4✔
346
            } else if constexpr (std::is_same_v<T, ValidAccess>) {
347
                std::set<Reg> regs{a.reg};
9✔
348
                if (const auto* r = std::get_if<Reg>(&a.width)) {
6✔
UNCOV
349
                    regs.insert(*r);
×
350
                }
351
                return regs;
6✔
352
            } else if constexpr (std::is_same_v<T, ValidStore>) {
6✔
UNCOV
353
                return {a.mem, a.val};
×
354
            } else if constexpr (std::is_same_v<T, ValidSize>) {
UNCOV
355
                return {a.reg};
×
356
            } else if constexpr (std::is_same_v<T, ValidMapKeyValue>) {
357
                return {a.access_reg, a.map_fd_reg};
4✔
358
            } else if constexpr (std::is_same_v<T, ValidCall>) {
359
                // ValidCall checks function validity, no direct register deps
UNCOV
360
                return {};
×
361
            } else if constexpr (std::is_same_v<T, TypeConstraint>) {
362
                return {a.reg};
12✔
363
            } else if constexpr (std::is_same_v<T, FuncConstraint>) {
UNCOV
364
                return {a.reg};
×
365
            } else if constexpr (std::is_same_v<T, ZeroCtxOffset>) {
UNCOV
366
                return {a.reg};
×
367
            } else if constexpr (std::is_same_v<T, BoundedLoopCount>) {
368
                // Loop counter, not a register
369
                return {};
2✔
370
            }
371
            return {};
372
        },
373
        assertion);
25✔
374
}
375

376
std::vector<FailureSlice> AnalysisResult::compute_failure_slices(const Program& prog, const SliceParams params) const {
14✔
377
    const auto max_steps = params.max_steps;
14✔
378
    const auto max_slices = params.max_slices;
14✔
379
    std::vector<FailureSlice> slices;
14✔
380

381
    // Find all labels with errors
382
    for (const auto& [label, inv_pair] : invariants) {
296✔
383
        if (inv_pair.pre.is_bottom()) {
282✔
384
            continue; // Unreachable
272✔
385
        }
386
        if (!inv_pair.error) {
232✔
387
            continue; // No error here
222✔
388
        }
389

390
        // Check if we've reached the max slices limit
391
        if (max_slices > 0 && slices.size() >= max_slices) {
10✔
392
            break;
393
        }
394

395
        FailureSlice slice{
5✔
396
            .failing_label = label,
397
            .error = *inv_pair.error,
398
            .relevance = {},
399
        };
10✔
400

401
        // Seed relevant registers from the actual failing assertion.
402
        // Forward analysis stops at the first failing assertion, which may not be
403
        // assertions[0]. Replay the checks against the pre-state to identify
404
        // the actual failing assertion and seed relevance from it.
405
        RelevantState initial_relevance;
10✔
406
        const auto& assertions = prog.assertions_at(label);
10✔
407
        bool found_failing = false;
10✔
408
        for (const auto& assertion : assertions) {
24✔
409
            if (ebpf_domain_check(inv_pair.pre, assertion, label)) {
24✔
410
                for (const auto& reg : extract_assertion_registers(assertion)) {
22✔
411
                    initial_relevance.registers.insert(reg);
12✔
412
                }
5✔
413
                found_failing = true;
10✔
414
                break;
10✔
415
            }
416
        }
417
        // Fallback: if no failing assertion was identified (shouldn't happen),
418
        // or if the failing assertion has no register deps, aggregate all assertions.
419
        if (!found_failing || initial_relevance.registers.empty()) {
10✔
UNCOV
420
            for (const auto& assertion : assertions) {
×
UNCOV
421
                for (const auto& reg : extract_assertion_registers(assertion)) {
×
UNCOV
422
                    initial_relevance.registers.insert(reg);
×
UNCOV
423
                }
×
424
            }
425
        }
426

427
        // Always include the failing label in the slice, even if no registers were extracted
428
        // (e.g., ValidCall, BoundedLoopCount assertions have no register deps)
429

430
        // `visited` tracks all explored labels for deduplication during backward traversal.
431
        // `slice_labels` tracks only labels that interact with relevant registers (the output slice).
432
        std::map<Label, RelevantState> visited;
10✔
433
        std::set<Label> conservative_visited; // Dedup for empty-relevance labels in conservative mode
10✔
434
        std::map<Label, RelevantState> slice_labels;
10✔
435

436
        // Worklist: (label, relevant_state_after_this_label)
437
        std::vector<std::pair<Label, RelevantState>> worklist;
10✔
438
        worklist.emplace_back(label, initial_relevance);
10✔
439

440
        // When the seed has no register/stack deps (e.g., BoundedLoopCount),
441
        // perform a conservative backward walk so the slice still shows the
442
        // loop structure and control flow leading to the failure.
443
        const bool conservative_mode = initial_relevance.registers.empty() && initial_relevance.stack_offsets.empty();
10✔
444

445
        size_t steps = 0;
10✔
446

447
        // Hoist the parent lookup for the failing label outside the hot loop;
448
        // it is invariant and parents_of() may return a temporary.
449
        const auto parents_of_fail = prog.cfg().parents_of(label);
10✔
450

451
        while (!worklist.empty() && steps < max_steps) {
108✔
452
            auto [current_label, relevant_after] = worklist.back();
98✔
453
            worklist.pop_back();
98✔
454

455
            // Skip if nothing is relevant — unless we're in conservative mode
456
            // (empty-seed assertions like BoundedLoopCount) or this is the failing label.
457
            if (!conservative_mode && current_label != label && relevant_after.registers.empty() &&
100✔
458
                relevant_after.stack_offsets.empty()) {
4✔
459
                continue;
4✔
460
            }
461

462
            // Merge with existing relevance at this label (for deduplication)
463
            auto& existing = visited[current_label];
94✔
464
            const size_t prev_size = existing.registers.size() + existing.stack_offsets.size();
94✔
465
            existing.registers.insert(relevant_after.registers.begin(), relevant_after.registers.end());
94✔
466
            existing.stack_offsets.insert(relevant_after.stack_offsets.begin(), relevant_after.stack_offsets.end());
94✔
467
            const size_t new_size = existing.registers.size() + existing.stack_offsets.size();
94✔
468

469
            // If no new relevance was added, skip (already processed with same or broader relevance).
470
            // In conservative mode with empty relevance, use a separate visited set for dedup.
471
            if (new_size == prev_size) {
94✔
472
                if (prev_size > 0) {
2✔
473
                    continue;
2✔
474
                }
475
                // Empty relevance (conservative mode): skip if we already visited this label
UNCOV
476
                if (!conservative_visited.insert(current_label).second) {
×
UNCOV
477
                    continue;
×
478
                }
479
            }
480

481
            // Compute what's relevant BEFORE this instruction using deps
482
            RelevantState relevant_before;
92✔
483
            const auto inv_it = invariants.find(current_label);
92✔
484
            if (inv_it != invariants.end() && inv_it->second.deps) {
92✔
485
                const auto& deps = *inv_it->second.deps;
92✔
486

487
                // Start with what's relevant after
488
                relevant_before = relevant_after;
92✔
489

490
                // Remove registers that are written by this instruction
491
                // (they weren't relevant before their definition)
492
                for (const auto& reg : deps.regs_written) {
158✔
493
                    relevant_before.registers.erase(reg);
66✔
494
                }
495

496
                // Remove registers that are clobbered (killed without reading).
497
                // These stop propagation for post-instruction uses but don't add read-deps.
498
                for (const auto& reg : deps.regs_clobbered) {
112✔
499
                    relevant_before.registers.erase(reg);
20✔
500
                }
501

502
                // Determine if this instruction contributes to the slice.
503
                // An instruction contributes when it writes to a relevant register/stack slot,
504
                // or when it is a control-flow decision (Jmp/Assume) that reads relevant registers.
505
                bool instruction_contributes = false;
92✔
506
                for (const auto& reg : deps.regs_written) {
112✔
507
                    if (relevant_after.registers.contains(reg)) {
66✔
508
                        instruction_contributes = true;
23✔
509
                        break;
23✔
510
                    }
511
                }
512
                for (const auto& offset : deps.stack_written) {
98✔
513
                    if (relevant_after.stack_offsets.contains(offset)) {
6✔
514
                        instruction_contributes = true;
515
                        break;
516
                    }
517
                }
518

519
                // Control-flow instructions (Jmp/Assume) that read relevant registers
520
                // contribute to the slice because they shape the path to the failure.
521
                if (!instruction_contributes) {
92✔
522
                    const auto& ins = prog.instruction_at(current_label);
46✔
523
                    if (std::holds_alternative<Jmp>(ins) || std::holds_alternative<Assume>(ins)) {
46✔
524
                        for (const auto& reg : deps.regs_read) {
30✔
525
                            if (relevant_after.registers.contains(reg)) {
20✔
526
                                instruction_contributes = true;
1✔
527
                                break;
1✔
528
                            }
529
                        }
530
                    }
531
                }
532

533
                // Immediate path guard: when the current label is a direct predecessor
534
                // of the failing label and is an Assume instruction, its condition
535
                // registers are causally relevant — they determine reachability.
536
                if (std::holds_alternative<Assume>(prog.instruction_at(current_label))) {
92✔
537
                    if (std::find(parents_of_fail.begin(), parents_of_fail.end(), current_label) !=
6✔
538
                        parents_of_fail.end()) {
9✔
539
                        instruction_contributes = true;
47✔
540
                    }
541
                }
542

543
                // At the failing label, the assertion depends on registers the instruction
544
                // reads (e.g., base pointer r3 in a store). Since stores write to memory
545
                // not registers, instruction_contributes would be false without this.
546
                if (current_label == label) {
92✔
547
                    instruction_contributes = true;
10✔
548
                }
549

550
                // In conservative mode (empty seed, e.g., BoundedLoopCount), include all
551
                // reachable labels so the slice shows the loop structure and control flow.
552
                if (conservative_mode) {
92✔
553
                    instruction_contributes = true;
554
                }
555

556
                if (instruction_contributes) {
92✔
557
                    for (const auto& reg : deps.regs_read) {
114✔
558
                        relevant_before.registers.insert(reg);
58✔
559
                    }
560
                    for (const auto& offset : deps.stack_read) {
56✔
UNCOV
561
                        relevant_before.stack_offsets.insert(offset);
×
562
                    }
563
                }
564

565
                // Remove stack locations that are written by this instruction,
566
                // but preserve offsets that are also read (read-modify-write, e.g., Atomic).
567
                // Done before storing to slice_labels for consistency with register handling
568
                // (written registers are removed before storage at lines 476-478).
569
                for (const auto& offset : deps.stack_written) {
98✔
570
                    if (!deps.stack_read.contains(offset)) {
6✔
571
                        relevant_before.stack_offsets.erase(offset);
6✔
572
                    }
573
                }
574

575
                if (instruction_contributes) {
92✔
576
                    // Only include contributing labels in the output slice.
577
                    // Store relevant_before so pre-invariant filtering shows the
578
                    // instruction's read-deps (the true upstream dependencies).
579
                    slice_labels[current_label] = relevant_before;
56✔
580
                }
581
            } else {
582
                // No deps available: conservatively treat this label as contributing
583
                // and propagate all current relevance to predecessors.
UNCOV
584
                relevant_before = relevant_after;
×
UNCOV
585
                slice_labels[current_label] = relevant_before;
×
586
            }
587

588
            // Add predecessors to worklist
589
            for (const auto& parent : prog.cfg().parents_of(current_label)) {
180✔
590
                worklist.emplace_back(parent, relevant_before);
88✔
591
            }
592

593
            ++steps;
92✔
594
        }
98✔
595

596
        // Expand join points: for any traversed label that is a join point
597
        // (≥2 predecessors) where at least one predecessor is already in the slice,
598
        // add the join-point label itself and all predecessors that have invariants.
599
        // This ensures the causal trace shows converging paths that cause precision loss.
600
        // Note: predecessors may not be in `visited` if the worklist budget was exhausted
601
        // before reaching them, so we check the invariant map directly.
602
        std::map<Label, RelevantState> join_expansion;
10✔
603
        for (const auto& [v_label, v_relevance] : visited) {
86✔
604
            const auto& parents = prog.cfg().parents_of(v_label);
76✔
605
            if (parents.size() < 2) {
76✔
606
                continue;
74✔
607
            }
608
            // Check that at least one predecessor is in the slice (this join is relevant)
609
            bool has_slice_parent = false;
2✔
610
            for (const auto& parent : parents) {
4✔
611
                if (slice_labels.contains(parent)) {
4✔
612
                    has_slice_parent = true;
1✔
613
                    break;
1✔
614
                }
615
            }
616
            if (!has_slice_parent) {
2✔
UNCOV
617
                continue;
×
618
            }
619
            // Include the join-point label itself so the printing code can display
620
            // per-predecessor state at this join.
621
            if (!slice_labels.contains(v_label)) {
2✔
UNCOV
622
                join_expansion[v_label] = v_relevance;
×
623
            }
624
            // Include all predecessors so the join display is complete.
625
            // Use visited relevance if available, otherwise use the join-point's relevance.
626
            for (const auto& parent : parents) {
6✔
627
                if (slice_labels.contains(parent) || join_expansion.contains(parent)) {
4✔
628
                    continue;
2✔
629
                }
630
                const auto& rel = visited.contains(parent) ? visited.at(parent) : v_relevance;
2✔
631
                join_expansion[parent] = rel;
2✔
632
            }
633
        }
634
        slice_labels.insert(join_expansion.begin(), join_expansion.end());
10✔
635

636
        // Build the slice from contributing labels only
637
        slice.relevance = std::move(slice_labels);
10✔
638
        slices.push_back(std::move(slice));
10✔
639
    }
10✔
640

641
    return slices;
14✔
UNCOV
642
}
×
643

644
} // namespace prevail
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc