• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

Alan-Jowett / ebpf-verifier / 22235569093

20 Feb 2026 02:12AM UTC coverage: 88.002% (-0.2%) from 88.157%
22235569093

push

github

web-flow
Handle Call builtins: fix handling of Falco tests  (#1025)

* falco: fix raw_tracepoint privilege and group expected failures

Mark raw_tracepoint/raw_tracepoint_writable as privileged program types so Falco raw-tracepoint sections are not treated as unprivileged argument checks.

Update Falco sample matrix to move now-passing sections out of TEST_SECTION_FAIL and group the remaining expected failures by root-cause class (offset lower-bound loss vs size lower-bound loss at correlated joins).

Signed-off-by: Elazar Gershuni <elazarg@gmail.com>

* elf/unmarshal: gate builtin relocations via platform call model

Add platform hooks to resolve builtin symbols and provide builtin call contracts, thread relocation-gated builtin call offsets through ProgramInfo, and only treat static helper IDs as builtins at gated call sites.

Also extend platform-table, marshal, and YAML-platform tests to cover builtin resolver wiring and call unmarshal behavior.
* crab: canonicalize unsigned intervals in bitwise_and
When uvalue intervals temporarily carry signed lower bounds (e.g. after joins), Interval::bitwise_and asserted in debug builds. Canonicalize both operands via zero_extend(64) before unsigned bitwise reasoning, preserving soundness and avoiding debug aborts.

Validated by reproducing SIGABRT on reverted code in [falco][verify] and confirming the patched build completes with expected 73 pass / 20 failed-as-expected.

* Fix unsound bitwise_and case for non-singleton all-ones rhs

Signed-off-by: Elazar Gershuni <elazarg@gmail.com>

239 of 252 new or added lines in 9 files covered. (94.84%)

602 existing lines in 16 files now uncovered.

11743 of 13344 relevant lines covered (88.0%)

3262592.78 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.39
/src/crab/ebpf_transformer.cpp
1
// Copyright (c) Prevail Verifier contributors.
2
// SPDX-License-Identifier: MIT
3

4
// This file is eBPF-specific, not derived from CRAB.
5

6
#include <bitset>
7
#include <cassert>
8
#include <optional>
9
#include <utility>
10
#include <vector>
11

12
#include "boost/endian/conversion.hpp"
13

14
#include "arith/dsl_syntax.hpp"
15
#include "config.hpp"
16
#include "crab/array_domain.hpp"
17
#include "crab/ebpf_domain.hpp"
18
#include "crab/var_registry.hpp"
19
#include "crab_utils/num_safety.hpp"
20
#include "ir/unmarshal.hpp"
21
#include "platform.hpp"
22
#include "string_constraints.hpp"
23

24
namespace prevail {
25
class EbpfTransformer final {
26
    EbpfDomain& dom;
27
    // shorthands:
28
    ArrayDomain& stack;
29

30
  public:
31
    explicit EbpfTransformer(EbpfDomain& _dom) : dom(_dom), stack(_dom.stack) {}
506,114✔
32

33
    // abstract transformers
34
    void operator()(const Assume&);
35

36
    void operator()(const Atomic&);
37

38
    void operator()(const Bin&);
39

40
    void operator()(const Call&);
41

42
    void operator()(const CallLocal&);
43

44
    void operator()(const Callx&);
45
    void operator()(const CallBtf&);
46

47
    void operator()(const Exit&);
48

49
    void operator()(const IncrementLoopCounter&);
50

51
    void operator()(const Jmp&) const;
52

53
    void operator()(const LoadMapFd&);
54

55
    void operator()(const LoadMapAddress&);
56
    void operator()(const LoadPseudo&);
57

58
    void operator()(const Mem&);
59

60
    void operator()(const Packet&);
61

62
    void operator()(const Un&);
63

64
    void operator()(const Undefined&);
65

66
    void initialize_loop_counter(const Label& label);
67

68
  private:
69
    /// Forget everything about all offset variables for a given register.
70
    void scratch_caller_saved_registers();
71

72
    void save_callee_saved_registers(const std::string& prefix);
73

74
    void restore_callee_saved_registers(const std::string& prefix);
75

76
    void havoc_subprogram_stack(const std::string& prefix);
77

78
    void forget_packet_pointers();
79

80
    void do_load_mapfd(const Reg& dst_reg, int mapfd, bool maybe_null);
81

82
    void do_load_map_address(const Reg& dst_reg, int mapfd, int32_t offset);
83

84
    void assign_valid_ptr(const Reg& dst_reg, bool maybe_null);
85

86
    static void recompute_stack_numeric_size(TypeToNumDomain& state, ArrayDomain& stack, const Reg& reg);
87

88
    static void recompute_stack_numeric_size(TypeToNumDomain& state, const ArrayDomain& stack, Variable type_variable);
89

90
    static void do_load_stack(TypeToNumDomain& state, ArrayDomain& stack, const Reg& target_reg,
91
                              const LinearExpression& addr, int width, const Reg& src_reg);
92

93
    void do_load(const Mem& b, const Reg& target_reg);
94

95
    static void do_store_stack(TypeToNumDomain& state, ArrayDomain& stack, const LinearExpression& symb_addr,
96
                               int exact_width, const LinearExpression& val_svalue, const LinearExpression& val_uvalue,
97
                               const std::optional<Reg>& opt_val_reg);
98

99
    void do_mem_store(const Mem& b, const LinearExpression& val_svalue, const LinearExpression& val_uvalue,
100
                      const std::optional<Reg>& opt_val_reg);
101

102
    void add(const Reg& dst_reg, int imm, int finite_width);
103

104
    void shl(const Reg& dst_reg, int imm, int finite_width);
105

106
    void lshr(const Reg& dst_reg, int imm, int finite_width);
107

108
    void ashr(const Reg& dst_reg, const LinearExpression& right_svalue, int finite_width);
109
}; // end EbpfDomain
110

111
void ebpf_domain_transform(EbpfDomain& inv, const Instruction& ins) {
664,630✔
112
    if (inv.is_bottom()) {
664,630✔
113
        return;
158,556✔
114
    }
115
    const auto pre = inv;
506,074✔
116
    std::visit(EbpfTransformer{inv}, ins);
506,074✔
117
    if (inv.is_bottom() && !std::holds_alternative<Assume>(ins)) {
506,074✔
118
        // Fail. raise an exception to stop the analysis.
119
        std::stringstream msg;
×
120
        msg << "Bug! pre-invariant:\n"
×
121
            << pre << "\n followed by instruction: " << ins << "\n"
×
122
            << "leads to bottom";
×
123
        throw std::logic_error(msg.str());
×
124
    }
×
125
}
506,074✔
126

127
void EbpfTransformer::scratch_caller_saved_registers() {
17,808✔
128
    for (uint8_t i = R1_ARG; i <= R5_ARG; i++) {
106,848✔
129
        dom.state.havoc_register(Reg{i});
89,040✔
130
    }
131
}
17,808✔
132

133
/// Create variables specific to the new call stack frame that store
134
/// copies of the states of r6 through r9.
135
void EbpfTransformer::save_callee_saved_registers(const std::string& prefix) {
62✔
136
    // TODO: define location `stack_frame_var`, and pass to dom.state.assign().
137
    //       Similarly in restore_callee_saved_registers
138
    for (const Reg r : {Reg{R6}, Reg{R7}, Reg{R8}, Reg{R9}}) {
310✔
139
        if (dom.state.is_initialized(r)) {
248✔
140
            const Variable type_var = variable_registry->type_reg(r.v);
26✔
141
            dom.state.assign_type(variable_registry->stack_frame_var(DataKind::types, r.v, prefix), type_var);
26✔
142
            for (const TypeEncoding type : dom.state.iterate_types(r)) {
52✔
143
                auto kinds = type_to_kinds.at(type);
26✔
144
                kinds.push_back(DataKind::uvalues);
26✔
145
                kinds.push_back(DataKind::svalues);
26✔
146
                for (const DataKind kind : kinds) {
104✔
147
                    const Variable src_var = variable_registry->reg(kind, r.v);
78✔
148
                    const Variable dst_var = variable_registry->stack_frame_var(kind, r.v, prefix);
78✔
149
                    if (!dom.state.values.eval_interval(src_var).is_top()) {
117✔
150
                        dom.state.values.assign(dst_var, src_var);
108✔
151
                    }
152
                }
153
            }
52✔
154
        }
155
    }
156
}
62✔
157

158
void EbpfTransformer::restore_callee_saved_registers(const std::string& prefix) {
62✔
159
    for (uint8_t r = R6; r <= R9; r++) {
310✔
160
        Reg reg{r};
248✔
161
        const Variable type_var = variable_registry->stack_frame_var(DataKind::types, r, prefix);
248✔
162
        if (dom.state.is_initialized(type_var)) {
248✔
163
            dom.state.assign_type(reg, type_var);
26✔
164
            for (const TypeEncoding type : dom.state.iterate_types(reg)) {
52✔
165
                auto kinds = type_to_kinds.at(type);
26✔
166
                kinds.push_back(DataKind::uvalues);
26✔
167
                kinds.push_back(DataKind::svalues);
26✔
168
                for (const DataKind kind : kinds) {
104✔
169
                    const Variable src_var = variable_registry->stack_frame_var(kind, r, prefix);
78✔
170
                    const Variable dst_var = variable_registry->reg(kind, r);
78✔
171
                    if (!dom.state.values.eval_interval(src_var).is_top()) {
117✔
172
                        dom.state.values.assign(dst_var, src_var);
108✔
173
                    } else {
174
                        dom.state.values.havoc(dst_var);
6✔
175
                    }
176
                    dom.state.values.havoc(src_var);
78✔
177
                }
178
            }
52✔
179
        }
180
        dom.state.havoc_type(type_var);
248✔
181
    }
182
}
62✔
183

184
void EbpfTransformer::havoc_subprogram_stack(const std::string& prefix) {
62✔
185
    const Variable r10_stack_offset = reg_pack(R10_STACK_POINTER).stack_offset;
62✔
186
    const auto intv = dom.state.values.eval_interval(r10_stack_offset);
62✔
187
    if (!intv.is_singleton()) {
62✔
188
        return;
34✔
189
    }
190
    const int64_t stack_start = intv.singleton()->cast_to<int64_t>() - EBPF_SUBPROGRAM_STACK_SIZE;
28✔
191
    stack.havoc_type(dom.state.types, Interval{stack_start}, Interval{EBPF_SUBPROGRAM_STACK_SIZE});
28✔
192
    for (const DataKind kind : iterate_kinds()) {
420✔
193
        stack.havoc(dom.state.values, kind, Interval{stack_start}, Interval{EBPF_SUBPROGRAM_STACK_SIZE});
392✔
194
    }
28✔
195
}
196

197
void EbpfTransformer::forget_packet_pointers() {
40✔
198
    dom.state.havoc_all_locations_having_type(T_PACKET);
40✔
199
    dom.initialize_packet();
40✔
200
}
40✔
201

202
/** Linear constraint for a pointer comparison.
203
 */
204
static LinearConstraint assume_cst_offsets_reg(const Condition::Op op, const Variable dst_offset,
1,880✔
205
                                               const Variable src_offset) {
206
    using namespace dsl_syntax;
940✔
207
    using Op = Condition::Op;
940✔
208
    switch (op) {
1,880✔
209
    case Op::EQ: return eq(dst_offset, src_offset);
6✔
210
    case Op::NE: return neq(dst_offset, src_offset);
10✔
211
    case Op::GE: return dst_offset >= src_offset;
6✔
212
    case Op::SGE: return dst_offset >= src_offset; // pointer comparison is unsigned
×
213
    case Op::LE: return dst_offset <= src_offset;
1,392✔
214
    case Op::SLE: return dst_offset <= src_offset; // pointer comparison is unsigned
×
215
    case Op::GT: return dst_offset > src_offset;
1,392✔
216
    case Op::SGT: return dst_offset > src_offset; // pointer comparison is unsigned
×
217
    case Op::SLT: return src_offset > dst_offset;
×
218
    // Note: reverse the test as a workaround strange lookup:
219
    case Op::LT: return src_offset > dst_offset; // FIX unsigned
6✔
220
    default: return dst_offset - dst_offset == 0;
×
221
    }
222
}
223

224
void EbpfTransformer::operator()(const Assume& s) {
72,976✔
225
    if (dom.is_bottom()) {
72,976✔
226
        return;
×
227
    }
228
    const Condition cond = s.cond;
72,976✔
229
    const auto dst = reg_pack(cond.left);
72,976✔
230
    if (const auto psrc_reg = std::get_if<Reg>(&cond.right)) {
72,976✔
231
        const auto src_reg = *psrc_reg;
14,024✔
232
        const auto src = reg_pack(src_reg);
14,024✔
233
        // This should have been checked by EbpfChecker
234
        assert(dom.state.same_type(cond.left, std::get<Reg>(cond.right)));
14,024✔
235
        dom.state = dom.state.join_over_types(cond.left, [&](TypeToNumDomain& state, const TypeEncoding type) {
28,048✔
236
            if (type == T_NUM) {
14,028✔
237
                for (const LinearConstraint& cst :
22,427✔
238
                     state.values->assume_cst_reg(cond.op, cond.is64, dst.svalue, dst.uvalue, src.svalue, src.uvalue)) {
38,780✔
239
                    state.values.add_constraint(cst);
20,558✔
240
                }
12,148✔
241
            } else {
242
                // Either pointers to a singleton region,
243
                // or an equality comparison on map descriptors/pointers to non-singleton locations
244
                if (const auto dst_offset = get_type_offset_variable(cond.left, type)) {
1,880✔
245
                    if (const auto src_offset = get_type_offset_variable(src_reg, type)) {
1,880✔
246
                        state.values.add_constraint(
1,880✔
247
                            assume_cst_offsets_reg(cond.op, dst_offset.value(), src_offset.value()));
3,760✔
248
                    }
249
                }
250
            }
251
        });
28,052✔
252
    } else {
253
        const int64_t imm = gsl::narrow_cast<int64_t>(std::get<Imm>(cond.right).v);
58,952✔
254
        for (const LinearConstraint& cst :
131,163✔
255
             dom.state.values->assume_cst_imm(cond.op, cond.is64, dst.svalue, dst.uvalue, imm)) {
232,850✔
256
            dom.state.values.add_constraint(cst);
144,422✔
257
        }
58,952✔
258
    }
259
}
260

261
void EbpfTransformer::operator()(const Undefined& a) {}
2,757✔
262

263
// Rejected before abstract interpretation by cfg_builder::check_instruction_feature_support.
264
void EbpfTransformer::operator()(const CallBtf&) {
×
265
    assert(false && "CallBtf should be rejected before abstract transformation");
×
266
}
267

268
static uint64_t merge_imm32_to_u64(const int32_t lo, const int32_t hi) {
8✔
269
    return static_cast<uint64_t>(static_cast<uint32_t>(lo)) | (static_cast<uint64_t>(static_cast<uint32_t>(hi)) << 32);
8✔
270
}
271

272
void EbpfTransformer::operator()(const LoadPseudo& pseudo) {
8✔
273
    switch (pseudo.addr.kind) {
8✔
274
    case PseudoAddress::Kind::CODE_ADDR: {
8✔
275
        const auto dst = reg_pack(pseudo.dst);
8✔
276
        const uint64_t imm64 = merge_imm32_to_u64(pseudo.addr.imm, pseudo.addr.next_imm);
8✔
277
        dom.state.values.assign(dst.svalue, to_signed(imm64));
8✔
278
        dom.state.values.assign(dst.uvalue, imm64);
8✔
279
        dom.state.values->overflow_bounds(dst.uvalue, 64, false);
8✔
280
        dom.state.assign_type(pseudo.dst, T_FUNC);
8✔
281
        dom.state.havoc_offsets(pseudo.dst);
8✔
282
        return;
8✔
283
    }
284
    case PseudoAddress::Kind::VARIABLE_ADDR:
285
    case PseudoAddress::Kind::MAP_BY_IDX:
286
    case PseudoAddress::Kind::MAP_VALUE_BY_IDX:
287
        assert(false && "unexpected LoadPseudo kind during abstract transformation");
288
        return;
289
    }
290
}
291

292
// Simple truncation function usable with swap_endianness().
293
template <class T>
294
constexpr T truncate(T x) noexcept {
40✔
295
    return x;
40✔
296
}
297

298
void EbpfTransformer::operator()(const Un& stmt) {
1,118✔
299
    if (dom.is_bottom()) {
1,118✔
UNCOV
300
        return;
×
301
    }
302
    const auto dst = reg_pack(stmt.dst);
1,118✔
303
    auto swap_endianness = [&](const Variable v, auto be_or_le) {
2,447✔
304
        if (dom.state.is_in_group(stmt.dst, TS_NUM)) {
1,888✔
305
            if (const auto n = dom.state.values.eval_interval(v).singleton()) {
1,888✔
306
                if (n->fits_cast_to<int64_t>()) {
1,098✔
307
                    dom.state.values.set(v, Interval{be_or_le(n->cast_to<int64_t>())});
308✔
308
                    return;
308✔
309
                }
310
            }
311
        }
312
        dom.state.values.havoc(v);
1,580✔
313
        dom.state.havoc_offsets(stmt.dst);
1,580✔
314
    };
1,118✔
315
    // Swap bytes if needed.  For 64-bit types we need the weights to fit in a
316
    // signed int64, but for smaller types we don't want sign extension,
317
    // so we use unsigned which still fits in a signed int64.
318
    switch (stmt.op) {
1,118✔
319
    case Un::Op::BE16:
722✔
320
        if (!thread_local_options.big_endian) {
722✔
321
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
718✔
322
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
718✔
323
        } else {
324
            swap_endianness(dst.svalue, truncate<uint16_t>);
4✔
325
            swap_endianness(dst.uvalue, truncate<uint16_t>);
4✔
326
        }
327
        break;
361✔
328
    case Un::Op::BE32:
124✔
329
        if (!thread_local_options.big_endian) {
124✔
330
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
122✔
331
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
122✔
332
        } else {
333
            swap_endianness(dst.svalue, truncate<uint32_t>);
2✔
334
            swap_endianness(dst.uvalue, truncate<uint32_t>);
2✔
335
        }
336
        break;
62✔
337
    case Un::Op::BE64:
30✔
338
        if (!thread_local_options.big_endian) {
30✔
339
            swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
28✔
340
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
28✔
341
        }
342
        break;
15✔
343
    case Un::Op::LE16:
16✔
344
        if (thread_local_options.big_endian) {
16✔
345
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
2✔
346
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
2✔
347
        } else {
348
            swap_endianness(dst.svalue, truncate<uint16_t>);
14✔
349
            swap_endianness(dst.uvalue, truncate<uint16_t>);
14✔
350
        }
351
        break;
8✔
352
    case Un::Op::LE32:
12✔
353
        if (thread_local_options.big_endian) {
12✔
354
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
2✔
355
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
2✔
356
        } else {
357
            swap_endianness(dst.svalue, truncate<uint32_t>);
10✔
358
            swap_endianness(dst.uvalue, truncate<uint32_t>);
10✔
359
        }
360
        break;
6✔
361
    case Un::Op::LE64:
10✔
362
        if (thread_local_options.big_endian) {
10✔
363
            swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
2✔
364
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
2✔
365
        }
366
        break;
5✔
367
    case Un::Op::SWAP16:
16✔
368
        swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
16✔
369
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
16✔
370
        break;
8✔
371
    case Un::Op::SWAP32:
12✔
372
        swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
12✔
373
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
12✔
374
        break;
6✔
375
    case Un::Op::SWAP64:
12✔
376
        swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
12✔
377
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
12✔
378
        break;
6✔
379
    case Un::Op::NEG:
164✔
380
        dom.state.values->neg(dst.svalue, dst.uvalue, stmt.is64 ? 64 : 32);
189✔
381
        dom.state.havoc_offsets(stmt.dst);
164✔
382
        break;
82✔
383
    }
384
}
385

386
void EbpfTransformer::operator()(const Exit& a) {
1,694✔
387
    if (dom.is_bottom()) {
1,694✔
388
        return;
1,632✔
389
    }
390
    // Clean up any state for the current stack frame.
391
    const std::string prefix = a.stack_frame_prefix;
1,694✔
392
    if (prefix.empty()) {
1,694✔
393
        return;
1,632✔
394
    }
395
    havoc_subprogram_stack(prefix);
62✔
396
    restore_callee_saved_registers(prefix);
62✔
397

398
    // Restore r10.
399
    constexpr Reg r10_reg{R10_STACK_POINTER};
62✔
400
    add(r10_reg, EBPF_SUBPROGRAM_STACK_SIZE, 64);
62✔
401
}
1,694✔
402

403
void EbpfTransformer::operator()(const Jmp&) const {
22,288✔
404
    // This is a NOP. It only exists to hold the jump preconditions.
405
}
22,288✔
406

407
void EbpfTransformer::operator()(const Packet& a) {
236✔
408
    if (dom.is_bottom()) {
236✔
UNCOV
409
        return;
×
410
    }
411
    constexpr Reg r0_reg{R0_RETURN_VALUE};
236✔
412
    dom.state.havoc_register_except_type(r0_reg);
236✔
413
    dom.state.assign_type(r0_reg, T_NUM);
236✔
414
    scratch_caller_saved_registers();
236✔
415
}
416

417
void EbpfTransformer::do_load_stack(TypeToNumDomain& state, ArrayDomain& stack, const Reg& target_reg,
27,926✔
418
                                    const LinearExpression& symb_addr, const int width, const Reg& src_reg) {
419
    const Interval addr = state.values.eval_interval(symb_addr);
27,926✔
420
    using namespace dsl_syntax;
13,963✔
421
    if (state.values.entail(width <= reg_pack(src_reg).stack_numeric_size)) {
41,889✔
422
        state.assign_type(target_reg, T_NUM);
458✔
423
    } else {
424
        state.assign_type(target_reg, stack.load_type(addr, width));
41,202✔
425
        if (!state.is_initialized(target_reg)) {
27,468✔
426
            // We don't know what we loaded, so just havoc the destination register.
427
            state.havoc_register(target_reg);
4✔
428
            return;
4✔
429
        }
430
    }
431

432
    const RegPack& target = reg_pack(target_reg);
27,922✔
433
    if (width == 1 || width == 2 || width == 4 || width == 8) {
27,922✔
434
        // Use the addr before we havoc the destination register since we might be getting the
435
        // addr from that same register.
436
        const std::optional<LinearExpression> sresult = stack.load(state.values, DataKind::svalues, addr, width);
27,922✔
437
        const std::optional<LinearExpression> uresult = stack.load(state.values, DataKind::uvalues, addr, width);
27,922✔
438
        state.havoc_register_except_type(target_reg);
27,922✔
439
        state.values.assign(target.svalue, sresult);
27,922✔
440
        state.values.assign(target.uvalue, uresult);
27,922✔
441
        for (const TypeEncoding type : state.iterate_types(target_reg)) {
55,848✔
442
            for (const auto& kind : type_to_kinds.at(type)) {
46,832✔
443
                const Variable dst_var = variable_registry->reg(kind, target_reg.v);
18,906✔
444
                state.values.assign(dst_var, stack.load(state.values, kind, addr, width));
28,359✔
445
            }
446
        }
27,922✔
447
    } else {
27,922✔
UNCOV
448
        state.havoc_register_except_type(target_reg);
×
449
    }
450
}
451

452
static void do_load_ctx(TypeToNumDomain& state, const Reg& target_reg, const LinearExpression& addr_vague,
4,302✔
453
                        const int width) {
454
    using namespace dsl_syntax;
2,151✔
455
    if (state.values.is_bottom()) {
4,302✔
456
        return;
2,434✔
457
    }
458

459
    const ebpf_context_descriptor_t* desc = thread_local_program_info->type.context_descriptor;
4,302✔
460

461
    const RegPack& target = reg_pack(target_reg);
4,302✔
462

463
    if (desc->end < 0) {
4,302✔
464
        state.havoc_register(target_reg);
952✔
465
        state.assign_type(target_reg, T_NUM);
952✔
466
        return;
952✔
467
    }
468

469
    const Interval interval = state.values.eval_interval(addr_vague);
3,350✔
470
    const std::optional<Number> maybe_addr = interval.singleton();
3,350✔
471
    state.havoc_register(target_reg);
3,350✔
472

473
    const bool may_touch_ptr =
1,675✔
474
        interval.contains(desc->data) || interval.contains(desc->meta) || interval.contains(desc->end);
3,350✔
475

476
    if (!maybe_addr) {
3,350✔
UNCOV
477
        if (may_touch_ptr) {
×
UNCOV
478
            state.havoc_type(target_reg);
×
479
        } else {
UNCOV
480
            state.assign_type(target_reg, T_NUM);
×
481
        }
UNCOV
482
        return;
×
483
    }
484

485
    const Number& addr = *maybe_addr;
3,350✔
486

487
    // We use offsets for packet data, data_end, and meta during verification,
488
    // but at runtime they will be 64-bit pointers.  We can use the offset values
489
    // for verification like we use map_fd's as a proxy for maps which
490
    // at runtime are actually 64-bit memory pointers.
491
    const int offset_width = desc->end - desc->data;
3,350✔
492
    if (addr == desc->data) {
3,350✔
493
        if (width == offset_width) {
930✔
494
            state.values.assign(target.packet_offset, 0);
1,395✔
495
        }
496
    } else if (addr == desc->end) {
2,420✔
497
        if (width == offset_width) {
916✔
498
            state.values.assign(target.packet_offset, variable_registry->packet_size());
910✔
499
            // EXPERIMENTAL: Explicit upper bound since packet_size is min_only.
500
            // This preserves the relational constraint (packet_offset <= packet_size)
501
            // while ensuring comparison checks have a concrete upper bound.
502
            state.values.add_constraint(target.packet_offset < MAX_PACKET_SIZE);
1,365✔
503
        }
504
    } else if (addr == desc->meta) {
1,504✔
505
        if (width == offset_width) {
22✔
506
            state.values.assign(target.packet_offset, variable_registry->meta_offset());
33✔
507
        }
508
    } else {
509
        if (may_touch_ptr) {
1,482✔
UNCOV
510
            state.havoc_type(target_reg);
×
511
        } else {
512
            state.assign_type(target_reg, T_NUM);
1,482✔
513
        }
514
        return;
1,482✔
515
    }
516
    if (width == offset_width) {
1,868✔
517
        state.assign_type(target_reg, T_PACKET);
1,862✔
518
        state.values.add_constraint(4098 <= target.svalue);
2,793✔
519
        state.values.add_constraint(target.svalue <= PTR_MAX);
2,793✔
520
    }
521
}
522

523
static void do_load_packet_or_shared(TypeToNumDomain& state, const Reg& target_reg, const int width,
38,262✔
524
                                     const bool is_signed) {
525
    if (state.values.is_bottom()) {
38,262✔
UNCOV
526
        return;
×
527
    }
528
    const RegPack& target = reg_pack(target_reg);
38,262✔
529

530
    state.havoc_register(target_reg);
38,262✔
531
    state.assign_type(target_reg, T_NUM);
38,262✔
532

533
    // Small copies can be range-limited and useful for later arithmetic.
534
    if (is_signed && (width == 1 || width == 2 || width == 4)) {
38,262✔
UNCOV
535
        state.values.set(target.svalue, Interval::signed_int(width * 8));
×
UNCOV
536
        state.values.set(target.uvalue, Interval::unsigned_int(width * 8));
×
537
    } else if (width == 1 || width == 2) {
38,262✔
538
        const Interval full = Interval::unsigned_int(width * 8);
32,990✔
539
        state.values.set(target.svalue, full);
32,990✔
540
        state.values.set(target.uvalue, full);
32,990✔
541
    }
542
}
543

544
void EbpfTransformer::do_load(const Mem& b, const Reg& target_reg) {
70,318✔
545
    using namespace dsl_syntax;
35,159✔
546

547
    const auto mem_reg = reg_pack(b.access.basereg);
70,318✔
548
    const int width = b.access.width;
70,318✔
549
    const int offset = b.access.offset;
70,318✔
550

551
    if (b.access.basereg.v == R10_STACK_POINTER) {
70,318✔
552
        const LinearExpression addr = mem_reg.stack_offset + offset;
41,103✔
553
        do_load_stack(dom.state, stack, target_reg, addr, width, b.access.basereg);
27,402✔
554
        return;
27,402✔
555
    }
27,402✔
556
    dom.state = dom.state.join_over_types(b.access.basereg, [&](TypeToNumDomain& state, TypeEncoding type) {
85,832✔
557
        switch (type) {
43,088✔
558
        case T_UNINIT: return;
559
        case T_MAP: return;
560
        case T_MAP_PROGRAMS: return;
561
        case T_NUM: return;
562
        case T_FUNC: return;
563
        case T_CTX: {
4,302✔
564
            LinearExpression addr = mem_reg.ctx_offset + offset;
6,453✔
565
            do_load_ctx(state, target_reg, addr, width);
4,302✔
566
            break;
4,302✔
567
        }
4,302✔
568
        case T_STACK: {
524✔
569
            LinearExpression addr = mem_reg.stack_offset + offset;
786✔
570
            do_load_stack(state, stack, target_reg, addr, width, b.access.basereg);
524✔
571
            break;
524✔
572
        }
524✔
573
        case T_PACKET: {
3,332✔
574
            LinearExpression addr = mem_reg.packet_offset + offset;
4,998✔
575
            do_load_packet_or_shared(state, target_reg, width, b.is_signed);
3,332✔
576
            break;
3,332✔
577
        }
3,332✔
578
        case T_SHARED: {
34,930✔
579
            LinearExpression addr = mem_reg.shared_offset + offset;
52,395✔
580
            do_load_packet_or_shared(state, target_reg, width, b.is_signed);
34,930✔
581
            break;
34,930✔
582
        }
34,930✔
583
        case T_SOCKET:
×
584
        case T_BTF_ID:
585
        case T_ALLOC_MEM: {
586
            // TODO: implement proper load semantics for these pointer types.
587
            // For now, treat like packet/shared (havoc the result).
UNCOV
588
            do_load_packet_or_shared(state, target_reg, width, b.is_signed);
×
UNCOV
589
            break;
×
590
        }
591
        }
592
    });
42,916✔
593
}
594

595
void EbpfTransformer::do_store_stack(TypeToNumDomain& state, ArrayDomain& stack, const LinearExpression& symb_addr,
29,484✔
596
                                     const int exact_width, const LinearExpression& val_svalue,
597
                                     const LinearExpression& val_uvalue, const std::optional<Reg>& opt_val_reg) {
598
    const Interval addr = state.values.eval_interval(symb_addr);
29,484✔
599
    const Interval width{exact_width};
29,484✔
600
    // no aliasing of val - we don't move from stack to stack, so we can just havoc first
601
    stack.havoc_type(state.types, addr, width);
29,484✔
602
    for (const DataKind kind : iterate_kinds()) {
442,260✔
603
        if (kind == DataKind::svalues || kind == DataKind::uvalues) {
412,776✔
604
            continue;
58,968✔
605
        }
606
        stack.havoc(state.values, kind, addr, width);
353,808✔
607
    }
14,742✔
608
    bool must_be_num = false;
29,484✔
609
    if (opt_val_reg && !state.is_initialized(*opt_val_reg)) {
29,484✔
UNCOV
610
        stack.havoc(state.values, DataKind::svalues, addr, width);
×
UNCOV
611
        stack.havoc(state.values, DataKind::uvalues, addr, width);
×
612
    } else {
613
        // opt_val_reg is unset when storing an immediate value.
614
        must_be_num = !opt_val_reg || state.is_in_group(*opt_val_reg, TS_NUM);
29,484✔
615
        const LinearExpression val_type =
14,742✔
616
            must_be_num ? LinearExpression{T_NUM} : variable_registry->type_reg(opt_val_reg->v);
29,484✔
617
        state.assign_type(stack.store_type(state.types, addr, width, must_be_num), val_type);
29,484✔
618

619
        if (exact_width == 8) {
29,484✔
620
            stack.havoc(state.values, DataKind::svalues, addr, width);
11,526✔
621
            stack.havoc(state.values, DataKind::uvalues, addr, width);
11,526✔
622
            state.values.assign(stack.store(state.values, DataKind::svalues, addr, width), val_svalue);
11,526✔
623
            state.values.assign(stack.store(state.values, DataKind::uvalues, addr, width), val_uvalue);
11,526✔
624

625
            if (!must_be_num) {
11,526✔
626
                for (TypeEncoding type : state.iterate_types(*opt_val_reg)) {
3,040✔
627
                    for (const DataKind kind : type_to_kinds.at(type)) {
4,048✔
628
                        const Variable src_var = variable_registry->reg(kind, opt_val_reg->v);
2,526✔
629
                        state.values.assign(stack.store(state.values, kind, addr, width), src_var);
3,789✔
630
                    }
631
                }
1,518✔
632
            }
633
        } else if ((exact_width == 1 || exact_width == 2 || exact_width == 4) && must_be_num) {
17,958✔
634
            // Keep track of numbers on the stack that might be used as array indices.
635
            if (const auto stack_svalue = stack.store(state.values, DataKind::svalues, addr, width)) {
17,928✔
636
                state.values.assign(stack_svalue, val_svalue);
17,924✔
637
                state.values->overflow_bounds(*stack_svalue, exact_width * 8, true);
17,924✔
638
            } else {
639
                stack.havoc(state.values, DataKind::svalues, addr, width);
4✔
640
            }
641
            if (const auto stack_uvalue = stack.store(state.values, DataKind::uvalues, addr, width)) {
17,928✔
642
                state.values.assign(stack_uvalue, val_uvalue);
17,924✔
643
                state.values->overflow_bounds(*stack_uvalue, exact_width * 8, false);
17,924✔
644
            } else {
645
                stack.havoc(state.values, DataKind::uvalues, addr, width);
4✔
646
            }
647
        } else {
17,928✔
648
            stack.havoc(state.values, DataKind::svalues, addr, width);
30✔
649
            stack.havoc(state.values, DataKind::uvalues, addr, width);
30✔
650
        }
651
    }
29,484✔
652

653
    // Update stack_numeric_size for any stack type variables.
654
    // stack_numeric_size holds the number of continuous bytes starting from stack_offset that are known to be numeric.
655
    for (const Variable type_variable : variable_registry->get_type_variables()) {
1,389,018✔
656
        if (!state.is_initialized(type_variable) || !state.may_have_type(type_variable, T_STACK)) {
1,743,297✔
657
            continue;
1,326,536✔
658
        }
659
        const Variable stack_offset_variable = variable_registry->kind_var(DataKind::stack_offsets, type_variable);
32,998✔
660
        const Variable stack_numeric_size_variable =
16,499✔
661
            variable_registry->kind_var(DataKind::stack_numeric_sizes, type_variable);
32,998✔
662

663
        using namespace dsl_syntax;
16,499✔
664
        // See if the variable's numeric interval overlaps with changed bytes.
665
        if (state.values.intersect(
131,992✔
666
                dsl_syntax::operator<=(symb_addr, stack_offset_variable + stack_numeric_size_variable)) &&
98,994✔
667
            state.values.intersect(dsl_syntax::operator>=(symb_addr + exact_width, stack_offset_variable))) {
131,992✔
668
            if (!must_be_num) {
2,984✔
669
                state.values.havoc(stack_numeric_size_variable);
72✔
670
            }
671
            recompute_stack_numeric_size(state, stack, type_variable);
2,984✔
672
        }
673
    }
29,484✔
674
}
29,484✔
675

676
void EbpfTransformer::operator()(const Mem& b) {
129,558✔
677
    if (dom.is_bottom()) {
129,558✔
678
        return;
679
    }
680
    if (const auto preg = std::get_if<Reg>(&b.value)) {
129,558✔
681
        if (b.is_load) {
129,488✔
682
            do_load(b, *preg);
70,318✔
683
            if (b.is_signed) {
70,318✔
684
                Bin::Op op{};
6✔
685
                // MEMSX decode only allows widths 1/2/4. Programmatic Mem construction paths
686
                // (for example from Atomic lowering) do not set is_signed=true.
687
                switch (b.access.width) {
6✔
688
                case 1: op = Bin::Op::MOVSX8; break;
1✔
689
                case 2: op = Bin::Op::MOVSX16; break;
2✔
690
                case 4: op = Bin::Op::MOVSX32; break;
2✔
UNCOV
691
                default: CRAB_ERROR("unexpected MEMSX width", b.access.width);
×
692
                }
693
                (*this)(Bin{.op = op, .dst = *preg, .v = *preg, .is64 = true, .lddw = false});
6✔
694
            }
695
        } else {
696
            const auto data_reg = reg_pack(*preg);
59,170✔
697
            do_mem_store(b, data_reg.svalue, data_reg.uvalue, *preg);
88,755✔
698
        }
699
    } else {
700
        const uint64_t imm = std::get<Imm>(b.value).v;
70✔
701
        do_mem_store(b, to_signed(imm), imm, {});
105✔
702
    }
703
}
704

705
void EbpfTransformer::do_mem_store(const Mem& b, const LinearExpression& val_svalue, const LinearExpression& val_uvalue,
59,240✔
706
                                   const std::optional<Reg>& opt_val_reg) {
707
    if (dom.is_bottom()) {
59,240✔
708
        return;
29,322✔
709
    }
710
    const int width = b.access.width;
59,240✔
711
    const Number offset{b.access.offset};
59,240✔
712
    if (b.access.basereg.v == R10_STACK_POINTER) {
59,240✔
713
        const auto r10_stack_offset = reg_pack(b.access.basereg).stack_offset;
29,322✔
714
        const auto r10_interval = dom.state.values.eval_interval(r10_stack_offset);
29,322✔
715
        if (r10_interval.is_singleton()) {
29,322✔
716
            const int32_t stack_offset = r10_interval.singleton()->cast_to<int32_t>();
29,322✔
717
            const Number base_addr{stack_offset};
29,322✔
718
            do_store_stack(dom.state, stack, base_addr + offset, width, val_svalue, val_uvalue, opt_val_reg);
29,322✔
719
            return;
29,322✔
720
        }
721
    }
722
    dom.state = dom.state.join_over_types(b.access.basereg, [&](TypeToNumDomain& state, const TypeEncoding type) {
59,836✔
723
        if (type == T_STACK) {
29,918✔
724
            const auto base_addr = LinearExpression(reg_pack(b.access.basereg).stack_offset);
162✔
725
            do_store_stack(state, stack, dsl_syntax::operator+(base_addr, offset), width, val_svalue, val_uvalue,
243✔
726
                           opt_val_reg);
727
        }
162✔
728
        // do nothing for any other type
729
    });
59,836✔
730
}
731

732
// Construct a Bin operation that does the main operation that a given Atomic operation does atomically.
733
static Bin atomic_to_bin(const Atomic& a) {
128✔
734
    Bin bin{.dst = Reg{R11_ATOMIC_SCRATCH}, .v = a.valreg, .is64 = a.access.width == sizeof(uint64_t), .lddw = false};
128✔
735
    switch (a.op) {
128✔
736
    case Atomic::Op::ADD: bin.op = Bin::Op::ADD; break;
24✔
737
    case Atomic::Op::OR: bin.op = Bin::Op::OR; break;
24✔
738
    case Atomic::Op::AND: bin.op = Bin::Op::AND; break;
24✔
739
    case Atomic::Op::XOR: bin.op = Bin::Op::XOR; break;
24✔
740
    case Atomic::Op::XCHG:
16✔
741
    case Atomic::Op::CMPXCHG: bin.op = Bin::Op::MOV; break;
16✔
UNCOV
742
    default: throw std::exception();
×
743
    }
744
    return bin;
128✔
745
}
746

747
void EbpfTransformer::operator()(const Atomic& a) {
424✔
748
    if (dom.is_bottom()) {
424✔
749
        return;
296✔
750
    }
751
    if (!dom.state.is_in_group(a.access.basereg, TS_POINTER) || !dom.state.is_in_group(a.valreg, TS_NUM)) {
424✔
UNCOV
752
        return;
×
753
    }
754
    if (!dom.state.may_have_type(a.access.basereg, T_STACK)) {
424✔
755
        // Shared memory regions are volatile so we can just havoc
756
        // any register that will be updated.
757
        if (a.op == Atomic::Op::CMPXCHG) {
296✔
758
            dom.state.havoc_register_except_type(Reg{R0_RETURN_VALUE});
4✔
759
        } else if (a.fetch) {
292✔
760
            dom.state.havoc_register_except_type(a.valreg);
52✔
761
        }
762
        return;
296✔
763
    }
764

765
    // Fetch the current value into the R11 pseudo-register.
766
    constexpr Reg r11{R11_ATOMIC_SCRATCH};
128✔
767
    (*this)(Mem{.access = a.access, .value = r11, .is_load = true});
128✔
768

769
    // Compute the new value in R11.
770
    (*this)(atomic_to_bin(a));
128✔
771

772
    if (a.op == Atomic::Op::CMPXCHG) {
128✔
773
        // For CMPXCHG, store the original value in r0.
774
        (*this)(Mem{.access = a.access, .value = Reg{R0_RETURN_VALUE}, .is_load = true});
20✔
775

776
        // For the destination, there are 3 possibilities:
777
        // 1) dst.value == r0.value : set R11 to valreg
778
        // 2) dst.value != r0.value : don't modify R11
779
        // 3) dst.value may or may not == r0.value : set R11 to the union of R11 and valreg
780
        // For now we just havoc the value of R11.
781
        dom.state.havoc_register_except_type(r11);
20✔
782
    } else if (a.fetch) {
108✔
783
        // For other FETCH operations, store the original value in the src register.
784
        (*this)(Mem{.access = a.access, .value = a.valreg, .is_load = true});
60✔
785
    }
786

787
    // Store the new value back in the original shared memory location.
788
    // Note that do_mem_store() currently doesn't track shared memory values,
789
    // but stack memory values are tracked and are legal here.
790
    (*this)(Mem{.access = a.access, .value = r11, .is_load = false});
128✔
791

792
    // Clear the R11 pseudo-register.
793
    dom.state.havoc_register(r11);
128✔
794
}
795

796
void EbpfTransformer::operator()(const Call& call) {
17,572✔
797
    using namespace dsl_syntax;
8,786✔
798
    if (dom.is_bottom()) {
17,572✔
UNCOV
799
        return;
×
800
    }
801
    std::optional<Reg> maybe_fd_reg{};
17,572✔
802
    for (ArgSingle param : call.singles) {
50,192✔
803
        switch (param.kind) {
32,620✔
804
        case ArgSingle::Kind::MAP_FD: maybe_fd_reg = param.reg; break;
22,022✔
805
        case ArgSingle::Kind::ANYTHING:
13,452✔
806
        case ArgSingle::Kind::MAP_FD_PROGRAMS:
807
        case ArgSingle::Kind::PTR_TO_MAP_KEY:
808
        case ArgSingle::Kind::PTR_TO_MAP_VALUE:
809
        case ArgSingle::Kind::PTR_TO_FUNC:
810
        case ArgSingle::Kind::PTR_TO_CTX:
811
        case ArgSingle::Kind::PTR_TO_SOCKET:
812
        case ArgSingle::Kind::PTR_TO_BTF_ID:
813
        case ArgSingle::Kind::PTR_TO_ALLOC_MEM:
814
        case ArgSingle::Kind::PTR_TO_SPIN_LOCK:
815
        case ArgSingle::Kind::PTR_TO_TIMER:
816
        case ArgSingle::Kind::CONST_SIZE_OR_ZERO:
817
            // Do nothing. We don't track the content of relevant memory regions.
818
            break;
13,452✔
819
        case ArgSingle::Kind::PTR_TO_STACK:
2✔
820
            // Do nothing; the stack is passed as context, not to be modified.
821
            break;
2✔
UNCOV
822
        case ArgSingle::Kind::PTR_TO_WRITABLE_LONG:
×
823
        case ArgSingle::Kind::PTR_TO_WRITABLE_INT: {
824
            // Fixed-width writable pointer: the helper may store a number at the pointed-to location.
UNCOV
825
            const int width = param.kind == ArgSingle::Kind::PTR_TO_WRITABLE_LONG ? 8 : 4;
×
UNCOV
826
            Interval w{width};
×
UNCOV
827
            dom.state = dom.state.join_over_types(param.reg, [&](TypeToNumDomain& state, const TypeEncoding type) {
×
828
                // Branch over possible pointer *types* for this register. This is separate from
829
                // uncertainty over regions/offsets within one type (e.g., many shared regions).
UNCOV
830
                if (type == T_STACK) {
×
UNCOV
831
                    const auto offset = get_type_offset_variable(param.reg, type);
×
UNCOV
832
                    if (!offset.has_value()) {
×
UNCOV
833
                        return;
×
834
                    }
UNCOV
835
                    const Interval addr = state.values.eval_interval(*offset);
×
UNCOV
836
                    for (const DataKind kind : iterate_kinds()) {
×
UNCOV
837
                        stack.havoc(state.values, kind, addr, w);
×
UNCOV
838
                    }
×
839
                    // Keep this scoped to stack-typed pointers only.
UNCOV
840
                    stack.store_numbers(addr, w);
×
841
                }
UNCOV
842
            });
×
UNCOV
843
            break;
×
844
        }
845
        }
846
    }
847
    for (ArgPair param : call.pairs) {
27,476✔
848
        switch (param.kind) {
9,904✔
849
        case ArgPair::Kind::PTR_TO_READABLE_MEM:
1,820✔
850
            // Do nothing. No side effect allowed.
851
            break;
1,820✔
852

853
        case ArgPair::Kind::PTR_TO_WRITABLE_MEM: {
6,264✔
854
            bool store_numbers = true;
6,264✔
855
            auto variable = dom.state.get_type_offset_variable(param.mem);
6,264✔
856
            if (!variable.has_value()) {
6,264✔
857
                // checked by the checker
858
                break;
859
            }
860
            Interval addr = dom.state.values.eval_interval(variable.value());
6,264✔
861
            Interval width = dom.state.values.eval_interval(reg_pack(param.size).svalue);
6,264✔
862

863
            dom.state = dom.state.join_over_types(param.mem, [&](TypeToNumDomain& state, const TypeEncoding type) {
12,528✔
864
                if (type == T_STACK) {
6,264✔
865
                    // Pointer to a memory region that the called function may change,
866
                    // so we must havoc.
867
                    for (const DataKind kind : iterate_kinds()) {
81,450✔
868
                        stack.havoc(state.values, kind, addr, width);
76,020✔
869
                    }
5,430✔
870
                } else {
871
                    store_numbers = false;
834✔
872
                }
873
            });
12,528✔
874
            if (store_numbers) {
6,264✔
875
                // Functions are not allowed to write sensitive data,
876
                // and initialization is guaranteed
877
                stack.store_numbers(addr, width);
5,430✔
878
            }
879
        }
880
        }
881
    }
882

883
    constexpr Reg r0_reg{R0_RETURN_VALUE};
17,572✔
884
    const auto r0_pack = reg_pack(r0_reg);
17,572✔
885
    dom.state.values.havoc(r0_pack.stack_numeric_size);
17,572✔
886
    if (call.is_map_lookup) {
17,572✔
887
        // This is the only way to get a null pointer
888
        if (maybe_fd_reg) {
3,754✔
889
            if (const auto map_type = dom.get_map_type(*maybe_fd_reg)) {
3,752✔
890
                if (thread_local_program_info->platform->get_map_type(*map_type).value_type == EbpfMapValueType::MAP) {
3,750✔
891
                    if (const auto inner_map_fd = dom.get_map_inner_map_fd(*maybe_fd_reg)) {
32✔
892
                        do_load_mapfd(r0_reg, to_signed(*inner_map_fd), true);
32✔
893
                        goto out;
32✔
894
                    }
895
                } else {
896
                    assign_valid_ptr(r0_reg, true);
3,718✔
897
                    dom.state.values.assign(r0_pack.shared_offset, 0);
3,718✔
898
                    dom.state.values.set(r0_pack.shared_region_size, dom.get_map_value_size(*maybe_fd_reg));
3,718✔
899
                    dom.state.assign_type(r0_reg, T_SHARED);
3,719✔
900
                }
901
            }
902
        }
903
        assign_valid_ptr(r0_reg, true);
3,722✔
904
        dom.state.values.assign(r0_pack.shared_offset, 0);
3,722✔
905
        dom.state.assign_type(r0_reg, T_SHARED);
3,722✔
906
    } else if (call.return_ptr_type.has_value()) {
13,818✔
UNCOV
907
        assign_valid_ptr(r0_reg, call.return_nullable);
×
UNCOV
908
        dom.state.assign_type(r0_reg, *call.return_ptr_type);
×
UNCOV
909
        dom.state.havoc_offsets(r0_reg);
×
910
    } else {
911
        dom.state.havoc_register_except_type(r0_reg);
13,818✔
912
        dom.state.assign_type(r0_reg, T_NUM);
13,818✔
913
        // dom.state.values.add_constraint(r0_pack.value < 0); for INTEGER_OR_NO_RETURN_IF_SUCCEED.
914
    }
915
out:
17,572✔
916
    scratch_caller_saved_registers();
17,572✔
917
    if (call.reallocate_packet) {
17,572✔
918
        forget_packet_pointers();
40✔
919
    }
920
}
921

922
void EbpfTransformer::operator()(const CallLocal& call) {
62✔
923
    using namespace dsl_syntax;
31✔
924
    if (dom.is_bottom()) {
62✔
UNCOV
925
        return;
×
926
    }
927
    save_callee_saved_registers(call.stack_frame_prefix);
62✔
928

929
    // Update r10.
930
    constexpr Reg r10_reg{R10_STACK_POINTER};
62✔
931
    add(r10_reg, -EBPF_SUBPROGRAM_STACK_SIZE, 64);
62✔
932
}
933

934
void EbpfTransformer::operator()(const Callx& callx) {
22✔
935
    using namespace dsl_syntax;
11✔
936
    if (dom.is_bottom()) {
22✔
UNCOV
937
        return;
×
938
    }
939

940
    // Look up the helper function id.
941
    const RegPack& reg = reg_pack(callx.func);
22✔
942
    const auto src_interval = dom.state.values.eval_interval(reg.svalue);
22✔
943
    if (const auto sn = src_interval.singleton()) {
22✔
944
        if (sn->fits<int32_t>()) {
22✔
945
            // We can now process it as if the id was immediate.
946
            const int32_t imm = sn->cast_to<int32_t>();
22✔
947
            if (!thread_local_program_info->platform->is_helper_usable(imm)) {
22✔
UNCOV
948
                return;
×
949
            }
950
            const Call call = make_call(imm, *thread_local_program_info->platform);
22✔
951
            (*this)(call);
22✔
952
        }
22✔
953
    }
954
}
955

956
void EbpfTransformer::do_load_mapfd(const Reg& dst_reg, const int mapfd, const bool maybe_null) {
6,476✔
957
    const EbpfMapDescriptor& desc = thread_local_program_info->platform->get_map_descriptor(mapfd);
6,476✔
958
    const EbpfMapType& type = thread_local_program_info->platform->get_map_type(desc.type);
6,476✔
959
    const RegPack& dst = reg_pack(dst_reg);
6,476✔
960
    if (type.value_type == EbpfMapValueType::PROGRAM) {
6,476✔
961
        dom.state.assign_type(dst_reg, T_MAP_PROGRAMS);
714✔
962
        dom.state.values.assign(dst.map_fd_programs, mapfd);
1,071✔
963
    } else {
964
        dom.state.assign_type(dst_reg, T_MAP);
5,762✔
965
        dom.state.values.assign(dst.map_fd, mapfd);
8,643✔
966
    }
967
    assign_valid_ptr(dst_reg, maybe_null);
6,476✔
968
}
6,476✔
969

970
void EbpfTransformer::operator()(const LoadMapFd& ins) {
6,444✔
971
    if (dom.is_bottom()) {
6,444✔
972
        return;
973
    }
974
    do_load_mapfd(ins.dst, ins.mapfd, false);
6,444✔
975
}
976

977
void EbpfTransformer::do_load_map_address(const Reg& dst_reg, const int mapfd, const int32_t offset) {
290✔
978
    const EbpfMapDescriptor& desc = thread_local_program_info->platform->get_map_descriptor(mapfd);
290✔
979
    const EbpfMapType& type = thread_local_program_info->platform->get_map_type(desc.type);
290✔
980

981
    if (type.value_type == EbpfMapValueType::PROGRAM) {
290✔
UNCOV
982
        throw std::invalid_argument("Cannot load address of program map type - only data maps are supported");
×
983
    }
984

985
    // Set the shared region size and offset for the map.
986
    dom.state.assign_type(dst_reg, T_SHARED);
290✔
987
    const RegPack& dst = reg_pack(dst_reg);
290✔
988
    dom.state.values.assign(dst.shared_offset, offset);
290✔
989
    dom.state.values.assign(dst.shared_region_size, desc.value_size);
290✔
990
    assign_valid_ptr(dst_reg, false);
290✔
991
}
290✔
992

993
void EbpfTransformer::operator()(const LoadMapAddress& ins) {
290✔
994
    if (dom.is_bottom()) {
290✔
995
        return;
996
    }
997
    do_load_map_address(ins.dst, ins.mapfd, ins.offset);
290✔
998
}
999

1000
void EbpfTransformer::assign_valid_ptr(const Reg& dst_reg, const bool maybe_null) {
14,206✔
1001
    using namespace dsl_syntax;
7,103✔
1002
    const RegPack& reg = reg_pack(dst_reg);
14,206✔
1003
    dom.state.values.havoc(reg.svalue);
14,206✔
1004
    dom.state.values.havoc(reg.uvalue);
14,206✔
1005
    if (maybe_null) {
14,206✔
1006
        dom.state.values.add_constraint(0 <= reg.svalue);
11,208✔
1007
    } else {
1008
        dom.state.values.add_constraint(0 < reg.svalue);
10,101✔
1009
    }
1010
    dom.state.values.add_constraint(reg.svalue <= PTR_MAX);
21,309✔
1011
    dom.state.values.assign(reg.uvalue, reg.svalue);
14,206✔
1012
}
14,206✔
1013

1014
// If nothing is known of the stack_numeric_size,
1015
// try to recompute the stack_numeric_size.
1016
void EbpfTransformer::recompute_stack_numeric_size(TypeToNumDomain& state, const ArrayDomain& stack,
15,600✔
1017
                                                   const Variable type_variable) {
1018
    const Variable stack_numeric_size_variable =
7,800✔
1019
        variable_registry->kind_var(DataKind::stack_numeric_sizes, type_variable);
15,600✔
1020

1021
    if (state.may_have_type(type_variable, T_STACK)) {
15,600✔
1022
        const int numeric_size =
7,180✔
1023
            stack.min_all_num_size(state.values, variable_registry->kind_var(DataKind::stack_offsets, type_variable));
14,360✔
1024
        if (numeric_size > 0) {
14,360✔
1025
            state.values.assign(stack_numeric_size_variable, numeric_size);
18,438✔
1026
        }
1027
    }
1028
}
15,600✔
1029

1030
void EbpfTransformer::recompute_stack_numeric_size(TypeToNumDomain& state, ArrayDomain& stack, const Reg& reg) {
12,616✔
1031
    recompute_stack_numeric_size(state, stack, reg_type(reg));
12,616✔
1032
}
12,616✔
1033

1034
void EbpfTransformer::add(const Reg& dst_reg, const int imm, const int finite_width) {
19,720✔
1035
    const auto dst = reg_pack(dst_reg);
19,720✔
1036
    dom.state.values->add_overflow(dst.svalue, dst.uvalue, imm, finite_width);
19,720✔
1037
    if (const auto offset = dom.state.get_type_offset_variable(dst_reg)) {
19,720✔
1038
        dom.state.values->add(*offset, imm);
12,610✔
1039
        if (imm > 0) {
12,610✔
1040
            // Since the start offset is increasing but
1041
            // the end offset is not, the numeric size decreases.
1042
            dom.state.values->sub(dst.stack_numeric_size, imm);
1,268✔
1043
        } else if (imm < 0) {
11,342✔
1044
            dom.state.values.havoc(dst.stack_numeric_size);
11,342✔
1045
        }
1046
        recompute_stack_numeric_size(dom.state, stack, dst_reg);
12,610✔
1047
    }
1048
}
19,720✔
1049

1050
void EbpfTransformer::shl(const Reg& dst_reg, int imm, const int finite_width) {
26,874✔
1051
    dom.state.havoc_offsets(dst_reg);
26,874✔
1052

1053
    // The BPF ISA requires masking the imm.
1054
    imm &= finite_width - 1;
26,874✔
1055
    const RegPack dst = reg_pack(dst_reg);
26,874✔
1056
    if (dom.state.is_in_group(dst_reg, TS_NUM)) {
26,874✔
1057
        dom.state.values->shl(dst.svalue, dst.uvalue, imm, finite_width);
26,874✔
1058
    } else {
UNCOV
1059
        dom.state.values.havoc(dst.svalue);
×
UNCOV
1060
        dom.state.values.havoc(dst.uvalue);
×
1061
    }
1062
}
26,874✔
1063

1064
void EbpfTransformer::lshr(const Reg& dst_reg, int imm, int finite_width) {
17,822✔
1065
    dom.state.havoc_offsets(dst_reg);
17,822✔
1066

1067
    // The BPF ISA requires masking the imm.
1068
    imm &= finite_width - 1;
17,822✔
1069
    const RegPack dst = reg_pack(dst_reg);
17,822✔
1070
    if (dom.state.is_in_group(dst_reg, TS_NUM)) {
17,822✔
1071
        dom.state.values->lshr(dst.svalue, dst.uvalue, imm, finite_width);
17,822✔
1072
    } else {
UNCOV
1073
        dom.state.values.havoc(dst.svalue);
×
UNCOV
1074
        dom.state.values.havoc(dst.uvalue);
×
1075
    }
1076
}
17,822✔
1077

1078
void EbpfTransformer::ashr(const Reg& dst_reg, const LinearExpression& right_svalue, const int finite_width) {
942✔
1079
    dom.state.havoc_offsets(dst_reg);
942✔
1080

1081
    const RegPack dst = reg_pack(dst_reg);
942✔
1082
    if (dom.state.is_in_group(dst_reg, TS_NUM)) {
942✔
1083
        dom.state.values->ashr(dst.svalue, dst.uvalue, right_svalue, finite_width);
942✔
1084
    } else {
UNCOV
1085
        dom.state.values.havoc(dst.svalue);
×
UNCOV
1086
        dom.state.values.havoc(dst.uvalue);
×
1087
    }
1088
}
942✔
1089

1090
static int _movsx_bits(const Bin::Op op) {
2,648✔
1091
    switch (op) {
2,648✔
1092
    case Bin::Op::MOVSX8: return 8;
11✔
1093
    case Bin::Op::MOVSX16: return 16;
11✔
1094
    case Bin::Op::MOVSX32: return 32;
1,302✔
UNCOV
1095
    default: throw std::exception();
×
1096
    }
1097
}
1098

1099
void EbpfTransformer::operator()(const Bin& bin) {
225,866✔
1100
    if (dom.is_bottom()) {
225,866✔
1101
        return;
42✔
1102
    }
1103
    using namespace dsl_syntax;
112,933✔
1104

1105
    auto dst = reg_pack(bin.dst);
225,866✔
1106
    int finite_width = bin.is64 ? 64 : 32;
225,866✔
1107

1108
    // TODO: Unusable states and values should be better handled.
1109
    //       Probably by propagating an error state.
1110
    if (!dom.state.is_initialized(bin.dst) &&
272,376✔
1111
        !std::set{Bin::Op::MOV, Bin::Op::MOVSX8, Bin::Op::MOVSX16, Bin::Op::MOVSX32}.contains(bin.op)) {
342,141✔
UNCOV
1112
        dom.state.havoc_register(bin.dst);
×
UNCOV
1113
        return;
×
1114
    }
1115
    if (auto pimm = std::get_if<Imm>(&bin.v)) {
225,866✔
1116
        // dst += K
1117
        int64_t imm;
63,923✔
1118
        if (bin.is64) {
127,846✔
1119
            // Use the full signed value.
1120
            imm = to_signed(pimm->v);
117,546✔
1121
        } else {
1122
            // Use only the low 32 bits of the value.
1123
            imm = gsl::narrow_cast<int32_t>(pimm->v);
10,300✔
1124
            // If this is a 32-bit operation and the destination is not proven a number, forget the register.
1125
            if (dom.state.is_in_group(bin.dst, TS_NUM)) {
10,300✔
1126
                // Safe to zero-extend the low 32 bits; even if it's also bin.src, only the 32-bit value is used.
1127
                dom.state.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
7,550✔
1128
            } else {
1129
                dom.state.havoc_register(bin.dst);
2,750✔
1130
            }
1131
        }
1132
        switch (bin.op) {
127,846✔
1133
        case Bin::Op::MOV:
54,926✔
1134
            dom.state.values.assign(dst.svalue, imm);
54,926✔
1135
            dom.state.values.assign(dst.uvalue, imm);
54,926✔
1136
            dom.state.values->overflow_bounds(dst.uvalue, bin.is64 ? 64 : 32, false);
57,888✔
1137
            dom.state.assign_type(bin.dst, T_NUM);
54,926✔
1138
            dom.state.havoc_offsets(bin.dst);
54,926✔
1139
            break;
27,463✔
UNCOV
1140
        case Bin::Op::MOVSX8:
×
1141
        case Bin::Op::MOVSX16:
UNCOV
1142
        case Bin::Op::MOVSX32: CRAB_ERROR("Unsupported operation");
×
1143
        case Bin::Op::ADD:
19,570✔
1144
            if (imm == 0) {
19,570✔
1145
                return;
24✔
1146
            }
1147
            add(bin.dst, gsl::narrow<int>(imm), finite_width);
19,566✔
1148
            break;
9,783✔
1149
        case Bin::Op::SUB:
32✔
1150
            if (imm == 0) {
32✔
1151
                return;
1✔
1152
            }
1153
            add(bin.dst, gsl::narrow<int>(-imm), finite_width);
30✔
1154
            break;
15✔
1155
        case Bin::Op::MUL:
388✔
1156
            dom.state.values->mul(dst.svalue, dst.uvalue, imm, finite_width);
388✔
1157
            dom.state.havoc_offsets(bin.dst);
388✔
1158
            break;
194✔
1159
        case Bin::Op::UDIV:
176✔
1160
            dom.state.values->udiv(dst.svalue, dst.uvalue, imm, finite_width);
176✔
1161
            dom.state.havoc_offsets(bin.dst);
176✔
1162
            break;
88✔
1163
        case Bin::Op::UMOD:
26✔
1164
            dom.state.values->urem(dst.svalue, dst.uvalue, imm, finite_width);
26✔
1165
            dom.state.havoc_offsets(bin.dst);
26✔
1166
            break;
13✔
1167
        case Bin::Op::SDIV:
12✔
1168
            dom.state.values->sdiv(dst.svalue, dst.uvalue, imm, finite_width);
12✔
1169
            dom.state.havoc_offsets(bin.dst);
12✔
1170
            break;
6✔
1171
        case Bin::Op::SMOD:
28✔
1172
            dom.state.values->srem(dst.svalue, dst.uvalue, imm, finite_width);
28✔
1173
            dom.state.havoc_offsets(bin.dst);
28✔
1174
            break;
14✔
1175
        case Bin::Op::OR:
588✔
1176
            dom.state.values->bitwise_or(dst.svalue, dst.uvalue, imm);
588✔
1177
            dom.state.havoc_offsets(bin.dst);
588✔
1178
            break;
294✔
1179
        case Bin::Op::AND:
6,406✔
1180
            // FIX: what to do with ptr&-8 as in counter/simple_loop_unrolled?
1181
            dom.state.values->bitwise_and(dst.svalue, dst.uvalue, imm);
6,406✔
1182
            if (gsl::narrow<int32_t>(imm) > 0) {
6,406✔
1183
                // AND with immediate is only a 32-bit operation so svalue and uvalue are the same.
1184
                dom.state.values.add_constraint(dst.svalue <= imm);
8,751✔
1185
                dom.state.values.add_constraint(dst.uvalue <= imm);
8,751✔
1186
                dom.state.values.add_constraint(0 <= dst.svalue);
8,751✔
1187
                dom.state.values.add_constraint(0 <= dst.uvalue);
8,751✔
1188
            }
1189
            dom.state.havoc_offsets(bin.dst);
6,406✔
1190
            break;
3,203✔
1191
        case Bin::Op::LSH: shl(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
26,820✔
1192
        case Bin::Op::RSH: lshr(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
17,784✔
1193
        case Bin::Op::ARSH: ashr(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
1,359✔
1194
        case Bin::Op::XOR:
184✔
1195
            dom.state.values->bitwise_xor(dst.svalue, dst.uvalue, imm);
184✔
1196
            dom.state.havoc_offsets(bin.dst);
184✔
1197
            break;
92✔
1198
        }
1199
    } else {
1200
        // dst op= src
1201
        auto src_reg = std::get<Reg>(bin.v);
98,020✔
1202
        auto src = reg_pack(src_reg);
98,020✔
1203
        if (!dom.state.is_initialized(src_reg)) {
98,020✔
1204
            dom.state.havoc_register(bin.dst);
8✔
1205
            return;
40✔
1206
        }
1207
        switch (bin.op) {
98,012✔
1208
        case Bin::Op::ADD: {
7,330✔
1209
            if (dom.state.same_type(bin.dst, std::get<Reg>(bin.v))) {
7,330✔
1210
                // both must have been checked to be numbers
1211
                dom.state.values->add_overflow(dst.svalue, dst.uvalue, src.svalue, finite_width);
3,132✔
1212
            } else {
1213
                // Here we're not sure that lhs and rhs are the same type; they might be.
1214
                // But previous assertions should fail unless we know that exactly one of lhs or rhs is a pointer.
1215
                dom.state =
6,297✔
1216
                    dom.state.join_over_types(bin.dst, [&](TypeToNumDomain& state, const TypeEncoding dst_type) {
8,396✔
1217
                        state =
2,100✔
1218
                            state.join_over_types(src_reg, [&](TypeToNumDomain& state, const TypeEncoding src_type) {
8,400✔
1219
                                if (dst_type == T_NUM && src_type != T_NUM) {
4,202✔
1220
                                    // num += ptr
1221
                                    state.assign_type(bin.dst, src_type);
44✔
1222
                                    if (const auto dst_offset = get_type_offset_variable(bin.dst, src_type)) {
44✔
1223
                                        state.values->apply(ArithBinOp::ADD, dst_offset.value(), dst.svalue,
66✔
1224
                                                            get_type_offset_variable(src_reg, src_type).value());
88✔
1225
                                    }
1226
                                    if (src_type == T_SHARED) {
44✔
1227
                                        state.values.assign(dst.shared_region_size, src.shared_region_size);
54✔
1228
                                    }
1229
                                } else if (dst_type != T_NUM && src_type == T_NUM) {
4,180✔
1230
                                    // ptr += num
1231
                                    state.assign_type(bin.dst, dst_type);
4,158✔
1232
                                    if (const auto dst_offset = get_type_offset_variable(bin.dst, dst_type)) {
4,158✔
1233
                                        state.values->apply(ArithBinOp::ADD, dst_offset.value(), dst_offset.value(),
6,237✔
1234
                                                            src.svalue);
4,158✔
1235
                                        if (dst_type == T_STACK) {
4,158✔
1236
                                            // Reduce the numeric size.
1237
                                            using namespace dsl_syntax;
9✔
1238
                                            if (state.values.intersect(src.svalue < 0)) {
27✔
1239
                                                state.values.havoc(dst.stack_numeric_size);
2✔
1240
                                                recompute_stack_numeric_size(state, stack, bin.dst);
2✔
1241
                                            } else {
1242
                                                state.values->apply_signed(ArithBinOp::SUB, dst.stack_numeric_size,
32✔
1243
                                                                           dst.stack_numeric_size,
8✔
1244
                                                                           dst.stack_numeric_size, src.svalue, 0);
16✔
1245
                                            }
1246
                                        }
1247
                                    }
1248
                                } else if (dst_type == T_NUM && src_type == T_NUM) {
4,158✔
1249
                                    // dst and src don't necessarily have the same type, but among the possibilities
1250
                                    // enumerated is the case where they are both numbers.
1251
                                    state.values->apply_signed(ArithBinOp::ADD, dst.svalue, dst.uvalue, dst.svalue,
×
UNCOV
1252
                                                               src.svalue, finite_width);
×
1253
                                } else {
1254
                                    // We ignore the cases here that do not match the assumption described
1255
                                    // above.  Joining bottom with another result will leave the other
1256
                                    // results unchanged.
UNCOV
1257
                                    state.values.set_to_bottom();
×
1258
                                }
1259
                            });
8,402✔
1260
                    });
8,398✔
1261
                // careful: change dst.value only after dealing with offset
1262
                dom.state.values->apply_signed(ArithBinOp::ADD, dst.svalue, dst.uvalue, dst.svalue, src.svalue,
4,198✔
1263
                                               finite_width);
1264
            }
1265
            break;
3,665✔
1266
        }
1267
        case Bin::Op::SUB: {
988✔
1268
            if (dom.state.same_type(bin.dst, std::get<Reg>(bin.v))) {
988✔
1269
                // src and dest have the same type.
1270
                TypeDomain tmp_m_type_inv = dom.state.types;
984✔
1271
                dom.state = dom.state.join_over_types(bin.dst, [&](TypeToNumDomain& state, const TypeEncoding type) {
1,968✔
1272
                    switch (type) {
990✔
1273
                    case T_NUM:
794✔
1274
                        // This is: sub_overflow(inv, dst.value, src.value, finite_width);
1275
                        state.values->apply_signed(ArithBinOp::SUB, dst.svalue, dst.uvalue, dst.svalue, src.svalue,
794✔
1276
                                                   finite_width);
1277
                        state.assign_type(bin.dst, T_NUM);
794✔
1278
                        state.havoc_offsets(bin.dst);
794✔
1279
                        break;
794✔
1280
                    default:
196✔
1281
                        // ptr -= ptr
1282
                        // Assertions should make sure we only perform this on non-shared pointers.
1283
                        if (const auto dst_offset = get_type_offset_variable(bin.dst, type)) {
196✔
1284
                            state.values->apply_signed(ArithBinOp::SUB, dst.svalue, dst.uvalue, dst_offset.value(),
392✔
1285
                                                       get_type_offset_variable(src_reg, type).value(), finite_width);
294✔
1286
                            state.values.havoc(dst_offset.value());
196✔
1287
                        }
1288
                        state.havoc_offsets(bin.dst);
196✔
1289
                        state.assign_type(bin.dst, T_NUM);
196✔
1290
                        break;
98✔
1291
                    }
1292
                });
1,479✔
1293
            } else {
984✔
1294
                // We're not sure that lhs and rhs are the same type.
1295
                // Either they're different, or at least one is not a singleton.
1296
                if (dom.state.is_in_group(std::get<Reg>(bin.v), TS_NUM)) {
4✔
1297
                    dom.state.values->sub_overflow(dst.svalue, dst.uvalue, src.svalue, finite_width);
4✔
1298
                    if (auto dst_offset = dom.state.get_type_offset_variable(bin.dst)) {
4✔
1299
                        dom.state.values->sub(dst_offset.value(), src.svalue);
4✔
1300
                        if (dom.state.may_have_type(bin.dst, T_STACK)) {
4✔
1301
                            // Reduce the numeric size.
1302
                            using namespace dsl_syntax;
2✔
1303
                            if (dom.state.values.intersect(src.svalue > 0)) {
8✔
1304
                                dom.state.values.havoc(dst.stack_numeric_size);
4✔
1305
                                recompute_stack_numeric_size(dom.state, stack, bin.dst);
4✔
1306
                            } else {
UNCOV
1307
                                dom.state.values->apply(ArithBinOp::ADD, dst.stack_numeric_size, dst.stack_numeric_size,
×
1308
                                                        src.svalue);
1309
                            }
1310
                        }
1311
                    }
1312
                } else {
UNCOV
1313
                    dom.state.havoc_register(bin.dst);
×
1314
                }
1315
            }
1316
            break;
494✔
1317
        }
1318
        case Bin::Op::MUL:
76✔
1319
            dom.state.values->mul(dst.svalue, dst.uvalue, src.svalue, finite_width);
76✔
1320
            dom.state.havoc_offsets(bin.dst);
76✔
1321
            break;
38✔
1322
        case Bin::Op::UDIV:
102✔
1323
            dom.state.values->udiv(dst.svalue, dst.uvalue, src.uvalue, finite_width);
102✔
1324
            dom.state.havoc_offsets(bin.dst);
102✔
1325
            break;
51✔
1326
        case Bin::Op::UMOD:
38✔
1327
            dom.state.values->urem(dst.svalue, dst.uvalue, src.uvalue, finite_width);
38✔
1328
            dom.state.havoc_offsets(bin.dst);
38✔
1329
            break;
19✔
1330
        case Bin::Op::SDIV:
28✔
1331
            dom.state.values->sdiv(dst.svalue, dst.uvalue, src.svalue, finite_width);
28✔
1332
            dom.state.havoc_offsets(bin.dst);
28✔
1333
            break;
14✔
1334
        case Bin::Op::SMOD:
42✔
1335
            dom.state.values->srem(dst.svalue, dst.uvalue, src.svalue, finite_width);
42✔
1336
            dom.state.havoc_offsets(bin.dst);
42✔
1337
            break;
21✔
1338
        case Bin::Op::OR:
26,576✔
1339
            dom.state.values->bitwise_or(dst.svalue, dst.uvalue, src.uvalue, finite_width);
26,576✔
1340
            dom.state.havoc_offsets(bin.dst);
26,576✔
1341
            break;
13,288✔
1342
        case Bin::Op::AND:
790✔
1343
            dom.state.values->bitwise_and(dst.svalue, dst.uvalue, src.uvalue, finite_width);
790✔
1344
            dom.state.havoc_offsets(bin.dst);
790✔
1345
            break;
395✔
1346
        case Bin::Op::LSH:
818✔
1347
            if (dom.state.is_in_group(src_reg, TS_NUM)) {
818✔
1348
                auto src_interval = dom.state.values.eval_interval(src.uvalue);
818✔
1349
                if (std::optional<Number> sn = src_interval.singleton()) {
818✔
1350
                    // truncate to uint64?
1351
                    uint64_t imm = sn->cast_to<uint64_t>() & (bin.is64 ? 63 : 31);
54✔
1352
                    if (imm <= std::numeric_limits<int32_t>::max()) {
54✔
1353
                        if (!bin.is64) {
54✔
1354
                            // Use only the low 32 bits of the value.
1355
                            dom.state.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
30✔
1356
                        }
1357
                        shl(bin.dst, gsl::narrow_cast<int32_t>(imm), finite_width);
54✔
1358
                        break;
54✔
1359
                    }
1360
                }
1361
            }
1362
            dom.state.values->shl_overflow(dst.svalue, dst.uvalue, src.uvalue);
764✔
1363
            dom.state.havoc_offsets(bin.dst);
764✔
1364
            break;
382✔
1365
        case Bin::Op::RSH:
86✔
1366
            if (dom.state.is_in_group(src_reg, TS_NUM)) {
86✔
1367
                auto src_interval = dom.state.values.eval_interval(src.uvalue);
86✔
1368
                if (std::optional<Number> sn = src_interval.singleton()) {
86✔
1369
                    uint64_t imm = sn->cast_to<uint64_t>() & (bin.is64 ? 63 : 31);
38✔
1370
                    if (imm <= std::numeric_limits<int32_t>::max()) {
38✔
1371
                        if (!bin.is64) {
38✔
1372
                            // Use only the low 32 bits of the value.
1373
                            dom.state.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
18✔
1374
                        }
1375
                        lshr(bin.dst, gsl::narrow_cast<int32_t>(imm), finite_width);
38✔
1376
                        break;
38✔
1377
                    }
1378
                }
1379
            }
1380
            dom.state.havoc_register_except_type(bin.dst);
48✔
1381
            break;
24✔
1382
        case Bin::Op::ARSH:
36✔
1383
            if (dom.state.is_in_group(src_reg, TS_NUM)) {
36✔
1384
                ashr(bin.dst, src.svalue, finite_width);
36✔
1385
                break;
36✔
1386
            }
UNCOV
1387
            dom.state.havoc_register_except_type(bin.dst);
×
1388
            break;
1389
        case Bin::Op::XOR:
502✔
1390
            dom.state.values->bitwise_xor(dst.svalue, dst.uvalue, src.uvalue, finite_width);
502✔
1391
            dom.state.havoc_offsets(bin.dst);
502✔
1392
            break;
251✔
1393
        case Bin::Op::MOVSX8:
2,648✔
1394
        case Bin::Op::MOVSX16:
1,324✔
1395
        case Bin::Op::MOVSX32: {
1,324✔
1396
            const int source_width = _movsx_bits(bin.op);
2,648✔
1397
            // Keep relational information if operation is a no-op.
1398
            if (dst.svalue == src.svalue &&
3,947✔
1399
                dom.state.values.eval_interval(dst.svalue) <= Interval::signed_int(source_width)) {
5,246✔
1400
                return;
14✔
1401
            }
1402
            if (dom.state.is_in_group(src_reg, TS_NUM)) {
2,620✔
1403
                dom.state.havoc_offsets(bin.dst);
2,620✔
1404
                dom.state.assign_type(bin.dst, T_NUM);
2,620✔
1405
                dom.state.values->sign_extend(dst.svalue, dst.uvalue, src.svalue, finite_width, source_width);
2,620✔
1406
                break;
2,620✔
1407
            }
UNCOV
1408
            dom.state.havoc_register(bin.dst);
×
1409
            break;
1410
        }
1411
        case Bin::Op::MOV:
57,952✔
1412
            // Keep relational information if operation is a no-op.
1413
            if (bin.is64 || dom.state.is_in_group(src_reg, TS_NUM)) {
57,952✔
1414
                if (bin.dst != src_reg) {
105,502✔
1415
                    // the 32bit case is handled below
1416
                    dom.state.assign(bin.dst, src_reg);
55,068✔
1417
                }
1418
            } else {
1419
                // If src is not a number, we don't know how to truncate a pointer.
UNCOV
1420
                dom.state.havoc_register(bin.dst);
×
1421
            }
1422
            break;
28,976✔
1423
        }
1424
    }
1425
    if (!bin.is64) {
225,824✔
1426
        dom.state.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
18,556✔
1427
    }
1428
}
1429

1430
void EbpfTransformer::initialize_loop_counter(const Label& label) {
40✔
1431
    dom.state.values.assign(variable_registry->loop_counter(to_string(label)), 0);
60✔
1432
}
40✔
1433

1434
void EbpfTransformer::operator()(const IncrementLoopCounter& ins) {
206✔
1435
    if (dom.is_bottom()) {
206✔
UNCOV
1436
        return;
×
1437
    }
1438
    const auto counter = variable_registry->loop_counter(to_string(ins.name));
206✔
1439
    dom.state.values->add(counter, 1);
206✔
1440
}
1441

1442
void ebpf_domain_initialize_loop_counter(EbpfDomain& dom, const Label& label) {
40✔
1443
    EbpfTransformer{dom}.initialize_loop_counter(label);
40✔
1444
}
40✔
1445
} // namespace prevail
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc