• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

Alan-Jowett / ebpf-verifier / 21993000823

13 Feb 2026 01:02PM UTC coverage: 86.313% (-0.5%) from 86.783%
21993000823

push

github

web-flow
ISA feature support matrix and precise rejection semantics (#999)

* ISA feature support matrix and precise rejection semantics

Signed-off-by: Elazar Gershuni <elazarg@gmail.com>

282 of 380 new or added lines in 14 files covered. (74.21%)

3 existing lines in 3 files now uncovered.

9535 of 11047 relevant lines covered (86.31%)

3060772.25 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.36
/src/crab/ebpf_transformer.cpp
1
// Copyright (c) Prevail Verifier contributors.
2
// SPDX-License-Identifier: MIT
3

4
// This file is eBPF-specific, not derived from CRAB.
5

6
#include <bitset>
7
#include <cassert>
8
#include <optional>
9
#include <utility>
10
#include <vector>
11

12
#include "boost/endian/conversion.hpp"
13

14
#include "arith/dsl_syntax.hpp"
15
#include "config.hpp"
16
#include "crab/array_domain.hpp"
17
#include "crab/ebpf_domain.hpp"
18
#include "crab/var_registry.hpp"
19
#include "crab_utils/num_safety.hpp"
20
#include "ir/unmarshal.hpp"
21
#include "platform.hpp"
22
#include "string_constraints.hpp"
23

24
namespace prevail {
25
class EbpfTransformer final {
26
    EbpfDomain& dom;
27
    // shorthands:
28
    ArrayDomain& stack;
29

30
  public:
31
    explicit EbpfTransformer(EbpfDomain& _dom) : dom(_dom), stack(_dom.stack) {}
241,762✔
32

33
    // abstract transformers
34
    void operator()(const Assume&);
35

36
    void operator()(const Atomic&);
37

38
    void operator()(const Bin&);
39

40
    void operator()(const Call&);
41

42
    void operator()(const CallLocal&);
43

44
    void operator()(const Callx&);
45
    void operator()(const CallBtf&);
46

47
    void operator()(const Exit&);
48

49
    void operator()(const IncrementLoopCounter&);
50

51
    void operator()(const Jmp&) const;
52

53
    void operator()(const LoadMapFd&);
54

55
    void operator()(const LoadMapAddress&);
56
    void operator()(const LoadPseudo&);
57

58
    void operator()(const Mem&);
59

60
    void operator()(const Packet&);
61

62
    void operator()(const Un&);
63

64
    void operator()(const Undefined&);
65

66
    void initialize_loop_counter(const Label& label);
67

68
  private:
69
    /// Forget everything about all offset variables for a given register.
70
    void scratch_caller_saved_registers();
71

72
    void save_callee_saved_registers(const std::string& prefix);
73

74
    void restore_callee_saved_registers(const std::string& prefix);
75

76
    void havoc_subprogram_stack(const std::string& prefix);
77

78
    void forget_packet_pointers();
79

80
    void do_load_mapfd(const Reg& dst_reg, int mapfd, bool maybe_null);
81

82
    void do_load_map_address(const Reg& dst_reg, int mapfd, int32_t offset);
83

84
    void assign_valid_ptr(const Reg& dst_reg, bool maybe_null);
85

86
    static void recompute_stack_numeric_size(TypeToNumDomain& rcp, ArrayDomain& stack, const Reg& reg);
87

88
    static void recompute_stack_numeric_size(TypeToNumDomain& rcp, ArrayDomain& stack, Variable type_variable);
89

90
    static void do_load_stack(TypeToNumDomain& rcp, ArrayDomain& stack, const Reg& target_reg,
91
                              const LinearExpression& addr, int width, const Reg& src_reg);
92

93
    void do_load(const Mem& b, const Reg& target_reg);
94

95
    static void do_store_stack(TypeToNumDomain& rcp, ArrayDomain& stack, const LinearExpression& symb_addr,
96
                               int exact_width, const LinearExpression& val_svalue, const LinearExpression& val_uvalue,
97
                               const std::optional<Reg>& opt_val_reg);
98

99
    void do_mem_store(const Mem& b, const LinearExpression& val_svalue, const LinearExpression& val_uvalue,
100
                      const std::optional<Reg>& opt_val_reg);
101

102
    void add(const Reg& dst_reg, int imm, int finite_width);
103

104
    void shl(const Reg& dst_reg, int imm, int finite_width);
105

106
    void lshr(const Reg& dst_reg, int imm, int finite_width);
107

108
    void ashr(const Reg& dst_reg, const LinearExpression& right_svalue, int finite_width);
109
}; // end EbpfDomain
110

111
void ebpf_domain_transform(EbpfDomain& inv, const Instruction& ins) {
390,080✔
112
    if (inv.is_bottom()) {
390,080✔
113
        return;
148,350✔
114
    }
115
    const auto pre = inv;
241,730✔
116
    std::visit(EbpfTransformer{inv}, ins);
241,730✔
117
    if (inv.is_bottom() && !std::holds_alternative<Assume>(ins)) {
241,730✔
118
        // Fail. raise an exception to stop the analysis.
119
        std::stringstream msg;
×
120
        msg << "Bug! pre-invariant:\n"
×
121
            << pre << "\n followed by instruction: " << ins << "\n"
×
122
            << "leads to bottom";
×
123
        throw std::logic_error(msg.str());
×
124
    }
×
125
}
241,730✔
126

127
void EbpfTransformer::scratch_caller_saved_registers() {
10,636✔
128
    for (uint8_t i = R1_ARG; i <= R5_ARG; i++) {
63,816✔
129
        dom.rcp.havoc_register(Reg{i});
53,180✔
130
    }
131
}
10,636✔
132

133
/// Create variables specific to the new call stack frame that store
134
/// copies of the states of r6 through r9.
135
void EbpfTransformer::save_callee_saved_registers(const std::string& prefix) {
58✔
136
    // TODO: define location `stack_frame_var`, and pass to dom.rcp.assign().
137
    //       Similarly in restore_callee_saved_registers
138
    for (uint8_t r = R6; r <= R9; r++) {
290✔
139
        if (dom.rcp.types.is_initialized(Reg{r})) {
232✔
140
            const Variable type_var = variable_registry->type_reg(r);
26✔
141
            dom.rcp.assign_type(variable_registry->stack_frame_var(DataKind::types, r, prefix), type_var);
26✔
142
            for (const TypeEncoding type : dom.rcp.types.iterate_types(Reg{r})) {
52✔
143
                auto kinds = type_to_kinds.at(type);
26✔
144
                kinds.push_back(DataKind::uvalues);
26✔
145
                kinds.push_back(DataKind::svalues);
26✔
146
                for (const DataKind kind : kinds) {
104✔
147
                    const Variable src_var = variable_registry->reg(kind, r);
78✔
148
                    const Variable dst_var = variable_registry->stack_frame_var(kind, r, prefix);
78✔
149
                    if (!dom.rcp.values.eval_interval(src_var).is_top()) {
156✔
150
                        dom.rcp.values.assign(dst_var, src_var);
108✔
151
                    }
152
                }
153
            }
52✔
154
        }
155
    }
156
}
58✔
157

158
void EbpfTransformer::restore_callee_saved_registers(const std::string& prefix) {
56✔
159
    for (uint8_t r = R6; r <= R9; r++) {
280✔
160
        Reg reg{r};
224✔
161
        const Variable type_var = variable_registry->stack_frame_var(DataKind::types, r, prefix);
224✔
162
        if (dom.rcp.types.is_initialized(type_var)) {
224✔
163
            dom.rcp.assign_type(reg, type_var);
26✔
164
            for (const TypeEncoding type : dom.rcp.types.iterate_types(reg)) {
52✔
165
                auto kinds = type_to_kinds.at(type);
26✔
166
                kinds.push_back(DataKind::uvalues);
26✔
167
                kinds.push_back(DataKind::svalues);
26✔
168
                for (const DataKind kind : kinds) {
104✔
169
                    const Variable src_var = variable_registry->stack_frame_var(kind, r, prefix);
78✔
170
                    const Variable dst_var = variable_registry->reg(kind, r);
78✔
171
                    if (!dom.rcp.values.eval_interval(src_var).is_top()) {
156✔
172
                        dom.rcp.values.assign(dst_var, src_var);
108✔
173
                    } else {
174
                        dom.rcp.values.havoc(dst_var);
6✔
175
                    }
176
                    dom.rcp.values.havoc(src_var);
78✔
177
                }
178
            }
52✔
179
        }
180
        dom.rcp.types.havoc_type(type_var);
224✔
181
    }
182
}
56✔
183

184
void EbpfTransformer::havoc_subprogram_stack(const std::string& prefix) {
56✔
185
    const Variable r10_stack_offset = reg_pack(R10_STACK_POINTER).stack_offset;
56✔
186
    const auto intv = dom.rcp.values.eval_interval(r10_stack_offset);
56✔
187
    if (!intv.is_singleton()) {
56✔
188
        return;
32✔
189
    }
190
    const int64_t stack_start = intv.singleton()->cast_to<int64_t>() - EBPF_SUBPROGRAM_STACK_SIZE;
24✔
191
    stack.havoc_type(dom.rcp.types, Interval{stack_start}, Interval{EBPF_SUBPROGRAM_STACK_SIZE});
36✔
192
    for (const DataKind kind : iterate_kinds()) {
264✔
193
        stack.havoc(dom.rcp.values, kind, Interval{stack_start}, Interval{EBPF_SUBPROGRAM_STACK_SIZE});
480✔
194
    }
24✔
195
}
56✔
196

197
void EbpfTransformer::forget_packet_pointers() {
40✔
198
    dom.rcp.havoc_all_locations_having_type(T_PACKET);
40✔
199
    dom.initialize_packet();
40✔
200
}
40✔
201

202
/** Linear constraint for a pointer comparison.
203
 */
204
static LinearConstraint assume_cst_offsets_reg(const Condition::Op op, const Variable dst_offset,
1,876✔
205
                                               const Variable src_offset) {
206
    using namespace dsl_syntax;
938✔
207
    using Op = Condition::Op;
938✔
208
    switch (op) {
1,876✔
209
    case Op::EQ: return eq(dst_offset, src_offset);
6✔
210
    case Op::NE: return neq(dst_offset, src_offset);
10✔
211
    case Op::GE: return dst_offset >= src_offset;
6✔
212
    case Op::SGE: return dst_offset >= src_offset; // pointer comparison is unsigned
×
213
    case Op::LE: return dst_offset <= src_offset;
926✔
214
    case Op::SLE: return dst_offset <= src_offset; // pointer comparison is unsigned
×
215
    case Op::GT: return dst_offset > src_offset;
1,389✔
216
    case Op::SGT: return dst_offset > src_offset; // pointer comparison is unsigned
×
217
    case Op::SLT: return src_offset > dst_offset;
×
218
    // Note: reverse the test as a workaround strange lookup:
219
    case Op::LT: return src_offset > dst_offset; // FIX unsigned
6✔
220
    default: return dst_offset - dst_offset == 0;
×
221
    }
222
}
223

224
void EbpfTransformer::operator()(const Assume& s) {
37,996✔
225
    if (dom.is_bottom()) {
37,996✔
226
        return;
×
227
    }
228
    const Condition cond = s.cond;
37,996✔
229
    const auto dst = reg_pack(cond.left);
37,996✔
230
    if (const auto psrc_reg = std::get_if<Reg>(&cond.right)) {
37,996✔
231
        const auto src_reg = *psrc_reg;
9,034✔
232
        const auto src = reg_pack(src_reg);
9,034✔
233
        // This should have been checked by EbpfChecker
234
        assert(dom.rcp.types.same_type(cond.left, std::get<Reg>(cond.right)));
9,034✔
235
        dom.rcp = dom.rcp.join_over_types(cond.left, [&](TypeToNumDomain& rcp, const TypeEncoding type) {
18,068✔
236
            if (type == T_NUM) {
9,038✔
237
                for (const LinearConstraint& cst :
12,160✔
238
                     rcp.values->assume_cst_reg(cond.op, cond.is64, dst.svalue, dst.uvalue, src.svalue, src.uvalue)) {
20,739✔
239
                    rcp.values.add_constraint(cst);
9,996✔
240
                }
7,162✔
241
            } else {
242
                // Either pointers to a singleton region,
243
                // or an equality comparison on map descriptors/pointers to non-singleton locations
244
                if (const auto dst_offset = get_type_offset_variable(cond.left, type)) {
1,876✔
245
                    if (const auto src_offset = get_type_offset_variable(src_reg, type)) {
1,876✔
246
                        rcp.values.add_constraint(
1,876✔
247
                            assume_cst_offsets_reg(cond.op, dst_offset.value(), src_offset.value()));
3,752✔
248
                    }
249
                }
250
            }
251
        });
18,072✔
252
    } else {
253
        const int64_t imm = gsl::narrow_cast<int64_t>(std::get<Imm>(cond.right).v);
28,962✔
254
        for (const LinearConstraint& cst :
60,495✔
255
             dom.rcp.values->assume_cst_imm(cond.op, cond.is64, dst.svalue, dst.uvalue, imm)) {
106,509✔
256
            dom.rcp.values.add_constraint(cst);
63,066✔
257
        }
28,962✔
258
    }
259
}
260

261
void EbpfTransformer::operator()(const Undefined& a) {}
2,522✔
262

263
// Rejected before abstract interpretation by cfg_builder::check_instruction_feature_support.
NEW
264
void EbpfTransformer::operator()(const CallBtf&) {
×
NEW
265
    assert(false && "CallBtf should be rejected before abstract transformation");
×
266
}
267

268
// Rejected before abstract interpretation by cfg_builder::check_instruction_feature_support.
NEW
269
void EbpfTransformer::operator()(const LoadPseudo&) {
×
NEW
270
    assert(false && "LoadPseudo should be rejected before abstract transformation");
×
271
}
272

273
// Simple truncation function usable with swap_endianness().
274
template <class T>
275
constexpr T truncate(T x) noexcept {
40✔
276
    return x;
40✔
277
}
278

279
void EbpfTransformer::operator()(const Un& stmt) {
1,042✔
280
    if (dom.is_bottom()) {
1,042✔
281
        return;
×
282
    }
283
    const auto dst = reg_pack(stmt.dst);
1,042✔
284
    auto swap_endianness = [&](const Variable v, auto be_or_le) {
2,265✔
285
        if (dom.rcp.types.type_is_number(stmt.dst)) {
1,744✔
286
            if (const auto n = dom.rcp.values.eval_interval(v).singleton()) {
3,642✔
287
                if (n->fits_cast_to<int64_t>()) {
308✔
288
                    dom.rcp.values.set(v, Interval{be_or_le(n->cast_to<int64_t>())});
308✔
289
                    return;
308✔
290
                }
291
            }
292
        }
293
        dom.rcp.values.havoc(v);
1,436✔
294
        dom.rcp.havoc_offsets(stmt.dst);
1,436✔
295
    };
1,042✔
296
    // Swap bytes if needed.  For 64-bit types we need the weights to fit in a
297
    // signed int64, but for smaller types we don't want sign extension,
298
    // so we use unsigned which still fits in a signed int64.
299
    switch (stmt.op) {
1,042✔
300
    case Un::Op::BE16:
650✔
301
        if (!thread_local_options.big_endian) {
650✔
302
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
646✔
303
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
646✔
304
        } else {
305
            swap_endianness(dst.svalue, truncate<uint16_t>);
4✔
306
            swap_endianness(dst.uvalue, truncate<uint16_t>);
4✔
307
        }
308
        break;
325✔
309
    case Un::Op::BE32:
124✔
310
        if (!thread_local_options.big_endian) {
124✔
311
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
122✔
312
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
122✔
313
        } else {
314
            swap_endianness(dst.svalue, truncate<uint32_t>);
2✔
315
            swap_endianness(dst.uvalue, truncate<uint32_t>);
2✔
316
        }
317
        break;
62✔
318
    case Un::Op::BE64:
30✔
319
        if (!thread_local_options.big_endian) {
30✔
320
            swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
28✔
321
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
28✔
322
        }
323
        break;
15✔
324
    case Un::Op::LE16:
16✔
325
        if (thread_local_options.big_endian) {
16✔
326
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
2✔
327
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
2✔
328
        } else {
329
            swap_endianness(dst.svalue, truncate<uint16_t>);
14✔
330
            swap_endianness(dst.uvalue, truncate<uint16_t>);
14✔
331
        }
332
        break;
8✔
333
    case Un::Op::LE32:
12✔
334
        if (thread_local_options.big_endian) {
12✔
335
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
2✔
336
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
2✔
337
        } else {
338
            swap_endianness(dst.svalue, truncate<uint32_t>);
10✔
339
            swap_endianness(dst.uvalue, truncate<uint32_t>);
10✔
340
        }
341
        break;
6✔
342
    case Un::Op::LE64:
10✔
343
        if (thread_local_options.big_endian) {
10✔
344
            swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
2✔
345
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
2✔
346
        }
347
        break;
5✔
348
    case Un::Op::SWAP16:
16✔
349
        swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
16✔
350
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
16✔
351
        break;
8✔
352
    case Un::Op::SWAP32:
12✔
353
        swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
12✔
354
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
12✔
355
        break;
6✔
356
    case Un::Op::SWAP64:
12✔
357
        swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
12✔
358
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
12✔
359
        break;
6✔
360
    case Un::Op::NEG:
160✔
361
        dom.rcp.values->neg(dst.svalue, dst.uvalue, stmt.is64 ? 64 : 32);
185✔
362
        dom.rcp.havoc_offsets(stmt.dst);
160✔
363
        break;
80✔
364
    }
365
}
366

367
void EbpfTransformer::operator()(const Exit& a) {
1,486✔
368
    if (dom.is_bottom()) {
1,486✔
369
        return;
1,430✔
370
    }
371
    // Clean up any state for the current stack frame.
372
    const std::string prefix = a.stack_frame_prefix;
1,486✔
373
    if (prefix.empty()) {
1,486✔
374
        return;
1,430✔
375
    }
376
    havoc_subprogram_stack(prefix);
56✔
377
    restore_callee_saved_registers(prefix);
56✔
378

379
    // Restore r10.
380
    constexpr Reg r10_reg{R10_STACK_POINTER};
56✔
381
    add(r10_reg, EBPF_SUBPROGRAM_STACK_SIZE, 64);
56✔
382
}
1,486✔
383

384
void EbpfTransformer::operator()(const Jmp&) const {
11,285✔
385
    // This is a NOP. It only exists to hold the jump preconditions.
386
}
11,285✔
387

388
void EbpfTransformer::operator()(const Packet& a) {
236✔
389
    if (dom.is_bottom()) {
236✔
390
        return;
×
391
    }
392
    constexpr Reg r0_reg{R0_RETURN_VALUE};
236✔
393
    dom.rcp.havoc_register_except_type(r0_reg);
236✔
394
    dom.rcp.assign_type(r0_reg, T_NUM);
236✔
395
    scratch_caller_saved_registers();
236✔
396
}
397

398
void EbpfTransformer::do_load_stack(TypeToNumDomain& rcp, ArrayDomain& stack, const Reg& target_reg,
12,178✔
399
                                    const LinearExpression& symb_addr, const int width, const Reg& src_reg) {
400
    const Interval addr = rcp.values.eval_interval(symb_addr);
12,178✔
401
    using namespace dsl_syntax;
6,089✔
402
    if (rcp.values.entail(width <= reg_pack(src_reg).stack_numeric_size)) {
24,356✔
403
        rcp.assign_type(target_reg, T_NUM);
456✔
404
    } else {
405
        rcp.assign_type(target_reg, stack.load_type(addr, width));
17,583✔
406
        if (!rcp.types.is_initialized(target_reg)) {
11,722✔
407
            // We don't know what we loaded, so just havoc the destination register.
408
            rcp.havoc_register(target_reg);
4✔
409
            return;
4✔
410
        }
411
    }
412

413
    const RegPack& target = reg_pack(target_reg);
12,174✔
414
    if (width == 1 || width == 2 || width == 4 || width == 8) {
12,174✔
415
        // Use the addr before we havoc the destination register since we might be getting the
416
        // addr from that same register.
417
        const std::optional<LinearExpression> sresult = stack.load(rcp.values, DataKind::svalues, addr, width);
12,174✔
418
        const std::optional<LinearExpression> uresult = stack.load(rcp.values, DataKind::uvalues, addr, width);
12,174✔
419
        rcp.havoc_register_except_type(target_reg);
12,174✔
420
        rcp.values.assign(target.svalue, sresult);
12,174✔
421
        rcp.values.assign(target.uvalue, uresult);
12,174✔
422
        for (const TypeEncoding type : rcp.types.iterate_types(target_reg)) {
24,364✔
423
            for (const auto& kind : type_to_kinds.at(type)) {
15,782✔
424
                const Variable dst_var = variable_registry->reg(kind, target_reg.v);
3,592✔
425
                rcp.values.assign(dst_var, stack.load(rcp.values, kind, addr, width));
5,388✔
426
            }
427
        }
12,174✔
428
    } else {
12,174✔
429
        rcp.havoc_register_except_type(target_reg);
×
430
    }
431
}
12,178✔
432

433
static void do_load_ctx(TypeToNumDomain& rcp, const Reg& target_reg, const LinearExpression& addr_vague,
3,768✔
434
                        const int width) {
435
    using namespace dsl_syntax;
1,884✔
436
    if (rcp.values.is_bottom()) {
3,768✔
437
        return;
1,904✔
438
    }
439

440
    const ebpf_context_descriptor_t* desc = thread_local_program_info->type.context_descriptor;
3,768✔
441

442
    const RegPack& target = reg_pack(target_reg);
3,768✔
443

444
    if (desc->end < 0) {
3,768✔
445
        rcp.havoc_register(target_reg);
290✔
446
        rcp.assign_type(target_reg, T_NUM);
290✔
447
        return;
290✔
448
    }
449

450
    const Interval interval = rcp.values.eval_interval(addr_vague);
3,478✔
451
    const std::optional<Number> maybe_addr = interval.singleton();
3,478✔
452
    rcp.havoc_register(target_reg);
3,478✔
453

454
    const bool may_touch_ptr =
1,739✔
455
        interval.contains(desc->data) || interval.contains(desc->meta) || interval.contains(desc->end);
6,017✔
456

457
    if (!maybe_addr) {
3,478✔
458
        if (may_touch_ptr) {
×
459
            rcp.types.havoc_type(target_reg);
×
460
        } else {
461
            rcp.assign_type(target_reg, T_NUM);
×
462
        }
463
        return;
×
464
    }
465

466
    const Number& addr = *maybe_addr;
3,478✔
467

468
    // We use offsets for packet data, data_end, and meta during verification,
469
    // but at runtime they will be 64-bit pointers.  We can use the offset values
470
    // for verification like we use map_fd's as a proxy for maps which
471
    // at runtime are actually 64-bit memory pointers.
472
    const int offset_width = desc->end - desc->data;
3,478✔
473
    if (addr == desc->data) {
3,478✔
474
        if (width == offset_width) {
928✔
475
            rcp.values.assign(target.packet_offset, 0);
1,392✔
476
        }
477
    } else if (addr == desc->end) {
2,550✔
478
        if (width == offset_width) {
914✔
479
            rcp.values.assign(target.packet_offset, variable_registry->packet_size());
908✔
480
            // EXPERIMENTAL: Explicit upper bound since packet_size is min_only.
481
            // This preserves the relational constraint (packet_offset <= packet_size)
482
            // while ensuring comparison checks have a concrete upper bound.
483
            rcp.values.add_constraint(target.packet_offset < MAX_PACKET_SIZE);
1,362✔
484
        }
485
    } else if (addr == desc->meta) {
1,636✔
486
        if (width == offset_width) {
22✔
487
            rcp.values.assign(target.packet_offset, variable_registry->meta_offset());
33✔
488
        }
489
    } else {
490
        if (may_touch_ptr) {
1,614✔
491
            rcp.types.havoc_type(target_reg);
×
492
        } else {
493
            rcp.assign_type(target_reg, T_NUM);
1,614✔
494
        }
495
        return;
1,614✔
496
    }
497
    if (width == offset_width) {
1,864✔
498
        rcp.assign_type(target_reg, T_PACKET);
1,858✔
499
        rcp.values.add_constraint(4098 <= target.svalue);
3,716✔
500
        rcp.values.add_constraint(target.svalue <= PTR_MAX);
2,787✔
501
    }
502
}
6,024✔
503

504
static void do_load_packet_or_shared(TypeToNumDomain& rcp, const Reg& target_reg, const int width,
11,256✔
505
                                     const bool is_signed) {
506
    if (rcp.values.is_bottom()) {
11,256✔
507
        return;
×
508
    }
509
    const RegPack& target = reg_pack(target_reg);
11,256✔
510

511
    rcp.havoc_register(target_reg);
11,256✔
512
    rcp.assign_type(target_reg, T_NUM);
11,256✔
513

514
    // Small copies can be range-limited and useful for later arithmetic.
515
    if (is_signed && (width == 1 || width == 2 || width == 4)) {
11,256✔
NEW
516
        rcp.values.set(target.svalue, Interval::signed_int(width * 8));
×
NEW
517
        rcp.values.set(target.uvalue, Interval::unsigned_int(width * 8));
×
518
    } else if (width == 1 || width == 2) {
11,256✔
519
        const Interval full = Interval::unsigned_int(width * 8);
7,096✔
520
        rcp.values.set(target.svalue, full);
7,096✔
521
        rcp.values.set(target.uvalue, full);
7,096✔
522
    }
7,096✔
523
}
524

525
void EbpfTransformer::do_load(const Mem& b, const Reg& target_reg) {
27,030✔
526
    using namespace dsl_syntax;
13,515✔
527

528
    const auto mem_reg = reg_pack(b.access.basereg);
27,030✔
529
    const int width = b.access.width;
27,030✔
530
    const int offset = b.access.offset;
27,030✔
531

532
    if (b.access.basereg.v == R10_STACK_POINTER) {
27,030✔
533
        const LinearExpression addr = mem_reg.stack_offset + offset;
17,484✔
534
        do_load_stack(dom.rcp, stack, target_reg, addr, width, b.access.basereg);
11,656✔
535
        return;
11,656✔
536
    }
11,656✔
537
    dom.rcp = dom.rcp.join_over_types(b.access.basereg, [&](TypeToNumDomain& rcp, TypeEncoding type) {
30,748✔
538
        switch (type) {
15,546✔
539
        case T_UNINIT: return;
540
        case T_MAP: return;
541
        case T_MAP_PROGRAMS: return;
542
        case T_NUM: return;
543
        case T_CTX: {
3,768✔
544
            LinearExpression addr = mem_reg.ctx_offset + offset;
5,652✔
545
            do_load_ctx(rcp, target_reg, addr, width);
3,768✔
546
            break;
3,768✔
547
        }
3,768✔
548
        case T_STACK: {
522✔
549
            LinearExpression addr = mem_reg.stack_offset + offset;
783✔
550
            do_load_stack(rcp, stack, target_reg, addr, width, b.access.basereg);
522✔
551
            break;
522✔
552
        }
522✔
553
        case T_PACKET: {
3,332✔
554
            LinearExpression addr = mem_reg.packet_offset + offset;
4,998✔
555
            do_load_packet_or_shared(rcp, target_reg, width, b.is_signed);
3,332✔
556
            break;
3,332✔
557
        }
3,332✔
558
        case T_SHARED: {
7,924✔
559
            LinearExpression addr = mem_reg.shared_offset + offset;
11,886✔
560
            do_load_packet_or_shared(rcp, target_reg, width, b.is_signed);
7,924✔
561
            break;
7,924✔
562
        }
7,924✔
563
        }
564
    });
15,374✔
565
}
566

567
void EbpfTransformer::do_store_stack(TypeToNumDomain& rcp, ArrayDomain& stack, const LinearExpression& symb_addr,
24,308✔
568
                                     const int exact_width, const LinearExpression& val_svalue,
569
                                     const LinearExpression& val_uvalue, const std::optional<Reg>& opt_val_reg) {
570
    const Interval addr = rcp.values.eval_interval(symb_addr);
24,308✔
571
    const Interval width{exact_width};
24,308✔
572
    // no aliasing of val - we don't move from stack to stack, so we can just havoc first
573
    stack.havoc_type(rcp.types, addr, width);
24,308✔
574
    for (const DataKind kind : iterate_kinds()) {
267,388✔
575
        if (kind == DataKind::svalues || kind == DataKind::uvalues) {
243,080✔
576
            continue;
48,616✔
577
        }
578
        stack.havoc(rcp.values, kind, addr, width);
194,464✔
579
    }
12,154✔
580
    bool must_be_num = false;
24,308✔
581
    if (opt_val_reg && !rcp.types.is_initialized(*opt_val_reg)) {
24,308✔
582
        stack.havoc(rcp.values, DataKind::svalues, addr, width);
×
583
        stack.havoc(rcp.values, DataKind::uvalues, addr, width);
×
584
    } else {
585
        // opt_val_reg is unset when storing an immediate value.
586
        must_be_num = !opt_val_reg || rcp.types.type_is_number(*opt_val_reg);
24,308✔
587
        const LinearExpression val_type =
12,154✔
588
            must_be_num ? LinearExpression{T_NUM} : variable_registry->type_reg(opt_val_reg->v);
24,308✔
589
        rcp.assign_type(stack.store_type(rcp.types, addr, width, must_be_num), val_type);
24,308✔
590

591
        if (exact_width == 8) {
24,308✔
592
            stack.havoc(rcp.values, DataKind::svalues, addr, width);
8,760✔
593
            stack.havoc(rcp.values, DataKind::uvalues, addr, width);
8,760✔
594
            rcp.values.assign(stack.store(rcp.values, DataKind::svalues, addr, width), val_svalue);
8,760✔
595
            rcp.values.assign(stack.store(rcp.values, DataKind::uvalues, addr, width), val_uvalue);
8,760✔
596

597
            if (!must_be_num) {
8,760✔
598
                for (TypeEncoding type : rcp.types.iterate_types(*opt_val_reg)) {
1,066✔
599
                    for (const DataKind kind : type_to_kinds.at(type)) {
1,240✔
600
                        const Variable src_var = variable_registry->reg(kind, opt_val_reg->v);
702✔
601
                        rcp.values.assign(stack.store(rcp.values, kind, addr, width), src_var);
702✔
602
                    }
603
                }
528✔
604
            }
605
        } else if ((exact_width == 1 || exact_width == 2 || exact_width == 4) && must_be_num) {
15,548✔
606
            // Keep track of numbers on the stack that might be used as array indices.
607
            if (const auto stack_svalue = stack.store(rcp.values, DataKind::svalues, addr, width)) {
15,518✔
608
                rcp.values.assign(stack_svalue, val_svalue);
15,514✔
609
                rcp.values->overflow_bounds(*stack_svalue, exact_width * 8, true);
15,514✔
610
            } else {
611
                stack.havoc(rcp.values, DataKind::svalues, addr, width);
4✔
612
            }
613
            if (const auto stack_uvalue = stack.store(rcp.values, DataKind::uvalues, addr, width)) {
15,518✔
614
                rcp.values.assign(stack_uvalue, val_uvalue);
15,514✔
615
                rcp.values->overflow_bounds(*stack_uvalue, exact_width * 8, false);
15,514✔
616
            } else {
617
                stack.havoc(rcp.values, DataKind::uvalues, addr, width);
4✔
618
            }
619
        } else {
15,518✔
620
            stack.havoc(rcp.values, DataKind::svalues, addr, width);
30✔
621
            stack.havoc(rcp.values, DataKind::uvalues, addr, width);
30✔
622
        }
623
    }
24,308✔
624

625
    // Update stack_numeric_size for any stack type variables.
626
    // stack_numeric_size holds the number of continuous bytes starting from stack_offset that are known to be numeric.
627
    for (const Variable type_variable : variable_registry->get_type_variables()) {
1,283,902✔
628
        if (!rcp.types.is_initialized(type_variable) || !rcp.types.may_have_type(type_variable, T_STACK)) {
1,274,809✔
629
            continue;
1,229,164✔
630
        }
631
        const Variable stack_offset_variable = variable_registry->kind_var(DataKind::stack_offsets, type_variable);
30,430✔
632
        const Variable stack_numeric_size_variable =
15,215✔
633
            variable_registry->kind_var(DataKind::stack_numeric_sizes, type_variable);
30,430✔
634

635
        using namespace dsl_syntax;
15,215✔
636
        // See if the variable's numeric interval overlaps with changed bytes.
637
        if (rcp.values.intersect(
121,720✔
638
                dsl_syntax::operator<=(symb_addr, stack_offset_variable + stack_numeric_size_variable)) &&
91,290✔
639
            rcp.values.intersect(dsl_syntax::operator>=(symb_addr + exact_width, stack_offset_variable))) {
60,860✔
640
            if (!must_be_num) {
6,462✔
641
                rcp.values.havoc(stack_numeric_size_variable);
152✔
642
            }
643
            recompute_stack_numeric_size(rcp, stack, type_variable);
6,462✔
644
        }
645
    }
24,308✔
646
}
36,462✔
647

648
void EbpfTransformer::operator()(const Mem& b) {
56,630✔
649
    if (dom.is_bottom()) {
56,630✔
650
        return;
651
    }
652
    if (const auto preg = std::get_if<Reg>(&b.value)) {
56,630✔
653
        if (b.is_load) {
56,560✔
654
            do_load(b, *preg);
27,030✔
655
            if (b.is_signed) {
27,030✔
656
                Bin::Op op{};
6✔
657
                // MEMSX decode only allows widths 1/2/4. Programmatic Mem construction paths
658
                // (for example from Atomic lowering) do not set is_signed=true.
659
                switch (b.access.width) {
6✔
660
                case 1: op = Bin::Op::MOVSX8; break;
1✔
661
                case 2: op = Bin::Op::MOVSX16; break;
2✔
662
                case 4: op = Bin::Op::MOVSX32; break;
2✔
NEW
663
                default: CRAB_ERROR("unexpected MEMSX width", b.access.width);
×
664
                }
665
                (*this)(Bin{.op = op, .dst = *preg, .v = *preg, .is64 = true, .lddw = false});
6✔
666
            }
667
        } else {
668
            const auto data_reg = reg_pack(*preg);
29,530✔
669
            do_mem_store(b, data_reg.svalue, data_reg.uvalue, *preg);
29,530✔
670
        }
671
    } else {
672
        const uint64_t imm = std::get<Imm>(b.value).v;
70✔
673
        do_mem_store(b, to_signed(imm), imm, {});
70✔
674
    }
675
}
676

677
void EbpfTransformer::do_mem_store(const Mem& b, const LinearExpression& val_svalue, const LinearExpression& val_uvalue,
29,600✔
678
                                   const std::optional<Reg>& opt_val_reg) {
679
    if (dom.is_bottom()) {
29,600✔
680
        return;
24,146✔
681
    }
682
    const int width = b.access.width;
29,600✔
683
    const Number offset{b.access.offset};
29,600✔
684
    if (b.access.basereg.v == R10_STACK_POINTER) {
29,600✔
685
        const auto r10_stack_offset = reg_pack(b.access.basereg).stack_offset;
24,146✔
686
        const auto r10_interval = dom.rcp.values.eval_interval(r10_stack_offset);
24,146✔
687
        if (r10_interval.is_singleton()) {
24,146✔
688
            const int32_t stack_offset = r10_interval.singleton()->cast_to<int32_t>();
24,146✔
689
            const Number base_addr{stack_offset};
24,146✔
690
            do_store_stack(dom.rcp, stack, base_addr + offset, width, val_svalue, val_uvalue, opt_val_reg);
36,219✔
691
            return;
24,146✔
692
        }
24,146✔
693
    }
24,146✔
694
    dom.rcp = dom.rcp.join_over_types(b.access.basereg, [&](TypeToNumDomain& rcp, const TypeEncoding type) {
10,908✔
695
        if (type == T_STACK) {
5,454✔
696
            const auto base_addr = LinearExpression(reg_pack(b.access.basereg).stack_offset);
162✔
697
            do_store_stack(rcp, stack, dsl_syntax::operator+(base_addr, offset), width, val_svalue, val_uvalue,
243✔
698
                           opt_val_reg);
699
        }
162✔
700
        // do nothing for any other type
701
    });
10,908✔
702
}
29,600✔
703

704
// Construct a Bin operation that does the main operation that a given Atomic operation does atomically.
705
static Bin atomic_to_bin(const Atomic& a) {
128✔
706
    Bin bin{.dst = Reg{R11_ATOMIC_SCRATCH}, .v = a.valreg, .is64 = a.access.width == sizeof(uint64_t), .lddw = false};
128✔
707
    switch (a.op) {
128✔
708
    case Atomic::Op::ADD: bin.op = Bin::Op::ADD; break;
24✔
709
    case Atomic::Op::OR: bin.op = Bin::Op::OR; break;
24✔
710
    case Atomic::Op::AND: bin.op = Bin::Op::AND; break;
24✔
711
    case Atomic::Op::XOR: bin.op = Bin::Op::XOR; break;
24✔
712
    case Atomic::Op::XCHG:
16✔
713
    case Atomic::Op::CMPXCHG: bin.op = Bin::Op::MOV; break;
16✔
714
    default: throw std::exception();
×
715
    }
716
    return bin;
128✔
717
}
718

719
void EbpfTransformer::operator()(const Atomic& a) {
424✔
720
    if (dom.is_bottom()) {
424✔
721
        return;
296✔
722
    }
723
    if (!dom.rcp.types.type_is_pointer(a.access.basereg) || !dom.rcp.types.type_is_number(a.valreg)) {
424✔
724
        return;
×
725
    }
726
    if (dom.rcp.types.type_is_not_stack(a.access.basereg)) {
424✔
727
        // Shared memory regions are volatile so we can just havoc
728
        // any register that will be updated.
729
        if (a.op == Atomic::Op::CMPXCHG) {
296✔
730
            dom.rcp.havoc_register_except_type(Reg{R0_RETURN_VALUE});
4✔
731
        } else if (a.fetch) {
292✔
732
            dom.rcp.havoc_register_except_type(a.valreg);
52✔
733
        }
734
        return;
296✔
735
    }
736

737
    // Fetch the current value into the R11 pseudo-register.
738
    constexpr Reg r11{R11_ATOMIC_SCRATCH};
128✔
739
    (*this)(Mem{.access = a.access, .value = r11, .is_load = true});
128✔
740

741
    // Compute the new value in R11.
742
    (*this)(atomic_to_bin(a));
128✔
743

744
    if (a.op == Atomic::Op::CMPXCHG) {
128✔
745
        // For CMPXCHG, store the original value in r0.
746
        (*this)(Mem{.access = a.access, .value = Reg{R0_RETURN_VALUE}, .is_load = true});
20✔
747

748
        // For the destination, there are 3 possibilities:
749
        // 1) dst.value == r0.value : set R11 to valreg
750
        // 2) dst.value != r0.value : don't modify R11
751
        // 3) dst.value may or may not == r0.value : set R11 to the union of R11 and valreg
752
        // For now we just havoc the value of R11.
753
        dom.rcp.havoc_register_except_type(r11);
20✔
754
    } else if (a.fetch) {
108✔
755
        // For other FETCH operations, store the original value in the src register.
756
        (*this)(Mem{.access = a.access, .value = a.valreg, .is_load = true});
60✔
757
    }
758

759
    // Store the new value back in the original shared memory location.
760
    // Note that do_mem_store() currently doesn't track shared memory values,
761
    // but stack memory values are tracked and are legal here.
762
    (*this)(Mem{.access = a.access, .value = r11, .is_load = false});
128✔
763

764
    // Clear the R11 pseudo-register.
765
    dom.rcp.havoc_register(r11);
128✔
766
}
767

768
void EbpfTransformer::operator()(const Call& call) {
10,400✔
769
    using namespace dsl_syntax;
5,200✔
770
    if (dom.is_bottom()) {
10,400✔
771
        return;
×
772
    }
773
    std::optional<Reg> maybe_fd_reg{};
10,400✔
774
    for (ArgSingle param : call.singles) {
34,746✔
775
        switch (param.kind) {
24,346✔
776
        case ArgSingle::Kind::MAP_FD: maybe_fd_reg = param.reg; break;
16,623✔
777
        case ArgSingle::Kind::ANYTHING:
9,948✔
778
        case ArgSingle::Kind::MAP_FD_PROGRAMS:
779
        case ArgSingle::Kind::PTR_TO_MAP_KEY:
780
        case ArgSingle::Kind::PTR_TO_MAP_VALUE:
781
        case ArgSingle::Kind::PTR_TO_CTX:
782
            // Do nothing. We don't track the content of relevant memory regions
783
            break;
9,948✔
784
        case ArgSingle::Kind::PTR_TO_STACK:
785
            // Do nothing; the stack is passed as context, not to be modified.
786
            break;
787
        }
788
    }
789
    for (ArgPair param : call.pairs) {
14,902✔
790
        switch (param.kind) {
4,502✔
791
        case ArgPair::Kind::PTR_TO_READABLE_MEM:
1,703✔
792
            // Do nothing. No side effect allowed.
793
            break;
1,703✔
794

795
        case ArgPair::Kind::PTR_TO_WRITABLE_MEM: {
1,096✔
796
            bool store_numbers = true;
1,096✔
797
            auto variable = dom.rcp.get_type_offset_variable(param.mem);
1,096✔
798
            if (!variable.has_value()) {
1,096✔
799
                // checked by the checker
800
                break;
801
            }
802
            Interval addr = dom.rcp.values.eval_interval(variable.value());
1,096✔
803
            Interval width = dom.rcp.values.eval_interval(reg_pack(param.size).svalue);
1,096✔
804

805
            dom.rcp = dom.rcp.join_over_types(param.mem, [&](TypeToNumDomain& rcp, const TypeEncoding type) {
2,192✔
806
                if (type == T_STACK) {
1,096✔
807
                    // Pointer to a memory region that the called function may change,
808
                    // so we must havoc.
809
                    for (const DataKind kind : iterate_kinds()) {
11,968✔
810
                        stack.havoc(rcp.values, kind, addr, width);
10,880✔
811
                    }
1,088✔
812
                } else {
813
                    store_numbers = false;
8✔
814
                }
815
            });
2,192✔
816
            if (store_numbers) {
1,096✔
817
                // Functions are not allowed to write sensitive data,
818
                // and initialization is guaranteed
819
                stack.store_numbers(addr, width);
1,088✔
820
            }
821
        }
1,644✔
822
        }
823
    }
824

825
    constexpr Reg r0_reg{R0_RETURN_VALUE};
10,400✔
826
    const auto r0_pack = reg_pack(r0_reg);
10,400✔
827
    dom.rcp.values.havoc(r0_pack.stack_numeric_size);
10,400✔
828
    if (call.is_map_lookup) {
10,400✔
829
        // This is the only way to get a null pointer
830
        if (maybe_fd_reg) {
2,634✔
831
            if (const auto map_type = dom.get_map_type(*maybe_fd_reg)) {
2,634✔
832
                if (thread_local_program_info->platform->get_map_type(*map_type).value_type == EbpfMapValueType::MAP) {
2,632✔
833
                    if (const auto inner_map_fd = dom.get_map_inner_map_fd(*maybe_fd_reg)) {
32✔
834
                        do_load_mapfd(r0_reg, to_signed(*inner_map_fd), true);
32✔
835
                        goto out;
32✔
836
                    }
837
                } else {
838
                    assign_valid_ptr(r0_reg, true);
2,600✔
839
                    dom.rcp.values.assign(r0_pack.shared_offset, 0);
2,600✔
840
                    dom.rcp.values.set(r0_pack.shared_region_size, dom.get_map_value_size(*maybe_fd_reg));
2,600✔
841
                    dom.rcp.assign_type(r0_reg, T_SHARED);
2,600✔
842
                }
843
            }
844
        }
845
        assign_valid_ptr(r0_reg, true);
2,602✔
846
        dom.rcp.values.assign(r0_pack.shared_offset, 0);
2,602✔
847
        dom.rcp.assign_type(r0_reg, T_SHARED);
2,602✔
848
    } else {
849
        dom.rcp.havoc_register_except_type(r0_reg);
7,766✔
850
        dom.rcp.assign_type(r0_reg, T_NUM);
7,766✔
851
        // dom.rcp.values.add_constraint(r0_pack.value < 0); for INTEGER_OR_NO_RETURN_IF_SUCCEED.
852
    }
853
out:
10,400✔
854
    scratch_caller_saved_registers();
10,400✔
855
    if (call.reallocate_packet) {
10,400✔
856
        forget_packet_pointers();
40✔
857
    }
858
}
859

860
void EbpfTransformer::operator()(const CallLocal& call) {
58✔
861
    using namespace dsl_syntax;
29✔
862
    if (dom.is_bottom()) {
58✔
863
        return;
×
864
    }
865
    save_callee_saved_registers(call.stack_frame_prefix);
58✔
866

867
    // Update r10.
868
    constexpr Reg r10_reg{R10_STACK_POINTER};
58✔
869
    add(r10_reg, -EBPF_SUBPROGRAM_STACK_SIZE, 64);
58✔
870
}
871

872
void EbpfTransformer::operator()(const Callx& callx) {
22✔
873
    using namespace dsl_syntax;
11✔
874
    if (dom.is_bottom()) {
22✔
875
        return;
×
876
    }
877

878
    // Look up the helper function id.
879
    const RegPack& reg = reg_pack(callx.func);
22✔
880
    const auto src_interval = dom.rcp.values.eval_interval(reg.svalue);
22✔
881
    if (const auto sn = src_interval.singleton()) {
22✔
882
        if (sn->fits<int32_t>()) {
22✔
883
            // We can now process it as if the id was immediate.
884
            const int32_t imm = sn->cast_to<int32_t>();
22✔
885
            if (!thread_local_program_info->platform->is_helper_usable(imm)) {
22✔
886
                return;
×
887
            }
888
            const Call call = make_call(imm, *thread_local_program_info->platform);
22✔
889
            (*this)(call);
22✔
890
        }
22✔
891
    }
22✔
892
}
22✔
893

894
void EbpfTransformer::do_load_mapfd(const Reg& dst_reg, const int mapfd, const bool maybe_null) {
5,038✔
895
    const EbpfMapDescriptor& desc = thread_local_program_info->platform->get_map_descriptor(mapfd);
5,038✔
896
    const EbpfMapType& type = thread_local_program_info->platform->get_map_type(desc.type);
5,038✔
897
    const RegPack& dst = reg_pack(dst_reg);
5,038✔
898
    if (type.value_type == EbpfMapValueType::PROGRAM) {
5,038✔
899
        dom.rcp.assign_type(dst_reg, T_MAP_PROGRAMS);
546✔
900
        dom.rcp.values.assign(dst.map_fd_programs, mapfd);
819✔
901
    } else {
902
        dom.rcp.assign_type(dst_reg, T_MAP);
4,492✔
903
        dom.rcp.values.assign(dst.map_fd, mapfd);
6,738✔
904
    }
905
    assign_valid_ptr(dst_reg, maybe_null);
5,038✔
906
}
5,038✔
907

908
void EbpfTransformer::operator()(const LoadMapFd& ins) {
5,006✔
909
    if (dom.is_bottom()) {
5,006✔
910
        return;
911
    }
912
    do_load_mapfd(ins.dst, ins.mapfd, false);
5,006✔
913
}
914

915
void EbpfTransformer::do_load_map_address(const Reg& dst_reg, const int mapfd, const int32_t offset) {
288✔
916
    const EbpfMapDescriptor& desc = thread_local_program_info->platform->get_map_descriptor(mapfd);
288✔
917
    const EbpfMapType& type = thread_local_program_info->platform->get_map_type(desc.type);
288✔
918

919
    if (type.value_type == EbpfMapValueType::PROGRAM) {
288✔
920
        throw std::invalid_argument("Cannot load address of program map type - only data maps are supported");
×
921
    }
922

923
    // Set the shared region size and offset for the map.
924
    dom.rcp.assign_type(dst_reg, T_SHARED);
288✔
925
    const RegPack& dst = reg_pack(dst_reg);
288✔
926
    dom.rcp.values.assign(dst.shared_offset, offset);
288✔
927
    dom.rcp.values.assign(dst.shared_region_size, desc.value_size);
288✔
928
    assign_valid_ptr(dst_reg, false);
288✔
929
}
288✔
930

931
void EbpfTransformer::operator()(const LoadMapAddress& ins) {
288✔
932
    if (dom.is_bottom()) {
288✔
933
        return;
934
    }
935
    do_load_map_address(ins.dst, ins.mapfd, ins.offset);
288✔
936
}
937

938
void EbpfTransformer::assign_valid_ptr(const Reg& dst_reg, const bool maybe_null) {
10,528✔
939
    using namespace dsl_syntax;
5,264✔
940
    const RegPack& reg = reg_pack(dst_reg);
10,528✔
941
    dom.rcp.values.havoc(reg.svalue);
10,528✔
942
    dom.rcp.values.havoc(reg.uvalue);
10,528✔
943
    if (maybe_null) {
10,528✔
944
        dom.rcp.values.add_constraint(0 <= reg.svalue);
10,468✔
945
    } else {
946
        dom.rcp.values.add_constraint(0 < reg.svalue);
10,588✔
947
    }
948
    dom.rcp.values.add_constraint(reg.svalue <= PTR_MAX);
15,792✔
949
    dom.rcp.values.assign(reg.uvalue, reg.svalue);
10,528✔
950
}
10,528✔
951

952
// If nothing is known of the stack_numeric_size,
953
// try to recompute the stack_numeric_size.
954
void EbpfTransformer::recompute_stack_numeric_size(TypeToNumDomain& rcp, ArrayDomain& stack,
15,348✔
955
                                                   const Variable type_variable) {
956
    const Variable stack_numeric_size_variable =
7,674✔
957
        variable_registry->kind_var(DataKind::stack_numeric_sizes, type_variable);
15,348✔
958

959
    if (rcp.types.may_have_type(type_variable, T_STACK)) {
15,348✔
960
        const int numeric_size =
7,189✔
961
            stack.min_all_num_size(rcp.values, variable_registry->kind_var(DataKind::stack_offsets, type_variable));
14,378✔
962
        if (numeric_size > 0) {
14,378✔
963
            rcp.values.assign(stack_numeric_size_variable, numeric_size);
12,942✔
964
        }
965
    }
966
}
15,348✔
967

968
void EbpfTransformer::recompute_stack_numeric_size(TypeToNumDomain& rcp, ArrayDomain& stack, const Reg& reg) {
8,886✔
969
    recompute_stack_numeric_size(rcp, stack, reg_type(reg));
8,886✔
970
}
8,886✔
971

972
void EbpfTransformer::add(const Reg& dst_reg, const int imm, const int finite_width) {
11,706✔
973
    const auto dst = reg_pack(dst_reg);
11,706✔
974
    dom.rcp.values->add_overflow(dst.svalue, dst.uvalue, imm, finite_width);
11,706✔
975
    if (const auto offset = dom.rcp.get_type_offset_variable(dst_reg)) {
11,706✔
976
        dom.rcp.values->add(*offset, imm);
8,880✔
977
        if (imm > 0) {
8,880✔
978
            // Since the start offset is increasing but
979
            // the end offset is not, the numeric size decreases.
980
            dom.rcp.values->sub(dst.stack_numeric_size, imm);
1,491✔
981
        } else if (imm < 0) {
7,886✔
982
            dom.rcp.values.havoc(dst.stack_numeric_size);
7,886✔
983
        }
984
        recompute_stack_numeric_size(dom.rcp, stack, dst_reg);
8,880✔
985
    }
986
}
11,706✔
987

988
void EbpfTransformer::shl(const Reg& dst_reg, int imm, const int finite_width) {
5,610✔
989
    dom.rcp.havoc_offsets(dst_reg);
5,610✔
990

991
    // The BPF ISA requires masking the imm.
992
    imm &= finite_width - 1;
5,610✔
993
    const RegPack dst = reg_pack(dst_reg);
5,610✔
994
    if (dom.rcp.types.type_is_number(dst_reg)) {
5,610✔
995
        dom.rcp.values->shl(dst.svalue, dst.uvalue, imm, finite_width);
5,610✔
996
    } else {
997
        dom.rcp.values.havoc(dst.svalue);
×
998
        dom.rcp.values.havoc(dst.uvalue);
×
999
    }
1000
}
5,610✔
1001

1002
void EbpfTransformer::lshr(const Reg& dst_reg, int imm, int finite_width) {
2,620✔
1003
    dom.rcp.havoc_offsets(dst_reg);
2,620✔
1004

1005
    // The BPF ISA requires masking the imm.
1006
    imm &= finite_width - 1;
2,620✔
1007
    const RegPack dst = reg_pack(dst_reg);
2,620✔
1008
    if (dom.rcp.types.type_is_number(dst_reg)) {
2,620✔
1009
        dom.rcp.values->lshr(dst.svalue, dst.uvalue, imm, finite_width);
2,620✔
1010
    } else {
1011
        dom.rcp.values.havoc(dst.svalue);
×
1012
        dom.rcp.values.havoc(dst.uvalue);
×
1013
    }
1014
}
2,620✔
1015

1016
void EbpfTransformer::ashr(const Reg& dst_reg, const LinearExpression& right_svalue, const int finite_width) {
770✔
1017
    dom.rcp.havoc_offsets(dst_reg);
770✔
1018

1019
    const RegPack dst = reg_pack(dst_reg);
770✔
1020
    if (dom.rcp.types.type_is_number(dst_reg)) {
770✔
1021
        dom.rcp.values->ashr(dst.svalue, dst.uvalue, right_svalue, finite_width);
770✔
1022
    } else {
1023
        dom.rcp.values.havoc(dst.svalue);
×
1024
        dom.rcp.values.havoc(dst.uvalue);
×
1025
    }
1026
}
770✔
1027

1028
static int _movsx_bits(const Bin::Op op) {
2,224✔
1029
    switch (op) {
2,224✔
1030
    case Bin::Op::MOVSX8: return 8;
11✔
1031
    case Bin::Op::MOVSX16: return 16;
11✔
1032
    case Bin::Op::MOVSX32: return 32;
1,090✔
1033
    default: throw std::exception();
×
1034
    }
1035
}
1036

1037
void EbpfTransformer::operator()(const Bin& bin) {
100,856✔
1038
    if (dom.is_bottom()) {
100,856✔
1039
        return;
36✔
1040
    }
1041
    using namespace dsl_syntax;
50,428✔
1042

1043
    auto dst = reg_pack(bin.dst);
100,856✔
1044
    int finite_width = bin.is64 ? 64 : 32;
100,856✔
1045

1046
    // TODO: Unusable states and values should be better handled.
1047
    //       Probably by propagating an error state.
1048
    if (!dom.rcp.types.is_initialized(bin.dst) &&
132,154✔
1049
        !std::set{Bin::Op::MOV, Bin::Op::MOVSX8, Bin::Op::MOVSX16, Bin::Op::MOVSX32}.contains(bin.op)) {
179,101✔
1050
        dom.rcp.havoc_register(bin.dst);
×
1051
        return;
×
1052
    }
1053
    if (auto pimm = std::get_if<Imm>(&bin.v)) {
100,856✔
1054
        // dst += K
1055
        int64_t imm;
30,381✔
1056
        if (bin.is64) {
60,762✔
1057
            // Use the full signed value.
1058
            imm = to_signed(pimm->v);
50,470✔
1059
        } else {
1060
            // Use only the low 32 bits of the value.
1061
            imm = gsl::narrow_cast<int32_t>(pimm->v);
10,292✔
1062
            // If this is a 32-bit operation and the destination is not proven a number, forget the register.
1063
            if (dom.rcp.types.type_is_number(bin.dst)) {
10,292✔
1064
                // Safe to zero-extend the low 32 bits; even if it's also bin.src, only the 32-bit value is used.
1065
                dom.rcp.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
11,313✔
1066
            } else {
1067
                dom.rcp.havoc_register(bin.dst);
2,750✔
1068
            }
1069
        }
1070
        switch (bin.op) {
60,762✔
1071
        case Bin::Op::MOV:
35,050✔
1072
            dom.rcp.values.assign(dst.svalue, imm);
35,050✔
1073
            dom.rcp.values.assign(dst.uvalue, imm);
35,050✔
1074
            dom.rcp.values->overflow_bounds(dst.uvalue, bin.is64 ? 64 : 32, false);
38,012✔
1075
            dom.rcp.assign_type(bin.dst, T_NUM);
35,050✔
1076
            dom.rcp.havoc_offsets(bin.dst);
35,050✔
1077
            break;
17,525✔
1078
        case Bin::Op::MOVSX8:
×
1079
        case Bin::Op::MOVSX16:
1080
        case Bin::Op::MOVSX32: CRAB_ERROR("Unsupported operation");
×
1081
        case Bin::Op::ADD:
11,566✔
1082
            if (imm == 0) {
11,566✔
1083
                return;
21✔
1084
            }
1085
            add(bin.dst, gsl::narrow<int>(imm), finite_width);
11,562✔
1086
            break;
5,781✔
1087
        case Bin::Op::SUB:
32✔
1088
            if (imm == 0) {
32✔
1089
                return;
1✔
1090
            }
1091
            add(bin.dst, gsl::narrow<int>(-imm), finite_width);
30✔
1092
            break;
15✔
1093
        case Bin::Op::MUL:
106✔
1094
            dom.rcp.values->mul(dst.svalue, dst.uvalue, imm, finite_width);
106✔
1095
            dom.rcp.havoc_offsets(bin.dst);
106✔
1096
            break;
53✔
1097
        case Bin::Op::UDIV:
172✔
1098
            dom.rcp.values->udiv(dst.svalue, dst.uvalue, imm, finite_width);
172✔
1099
            dom.rcp.havoc_offsets(bin.dst);
172✔
1100
            break;
86✔
1101
        case Bin::Op::UMOD:
26✔
1102
            dom.rcp.values->urem(dst.svalue, dst.uvalue, imm, finite_width);
26✔
1103
            dom.rcp.havoc_offsets(bin.dst);
26✔
1104
            break;
13✔
1105
        case Bin::Op::SDIV:
12✔
1106
            dom.rcp.values->sdiv(dst.svalue, dst.uvalue, imm, finite_width);
12✔
1107
            dom.rcp.havoc_offsets(bin.dst);
12✔
1108
            break;
6✔
1109
        case Bin::Op::SMOD:
28✔
1110
            dom.rcp.values->srem(dst.svalue, dst.uvalue, imm, finite_width);
28✔
1111
            dom.rcp.havoc_offsets(bin.dst);
28✔
1112
            break;
14✔
1113
        case Bin::Op::OR:
560✔
1114
            dom.rcp.values->bitwise_or(dst.svalue, dst.uvalue, imm);
560✔
1115
            dom.rcp.havoc_offsets(bin.dst);
560✔
1116
            break;
280✔
1117
        case Bin::Op::AND:
4,154✔
1118
            // FIX: what to do with ptr&-8 as in counter/simple_loop_unrolled?
1119
            dom.rcp.values->bitwise_and(dst.svalue, dst.uvalue, imm);
4,154✔
1120
            if (gsl::narrow<int32_t>(imm) > 0) {
4,154✔
1121
                // AND with immediate is only a 32-bit operation so svalue and uvalue are the same.
1122
                dom.rcp.values.add_constraint(dst.svalue <= imm);
5,376✔
1123
                dom.rcp.values.add_constraint(dst.uvalue <= imm);
5,376✔
1124
                dom.rcp.values.add_constraint(0 <= dst.svalue);
7,168✔
1125
                dom.rcp.values.add_constraint(0 <= dst.uvalue);
7,168✔
1126
            }
1127
            dom.rcp.havoc_offsets(bin.dst);
4,154✔
1128
            break;
2,077✔
1129
        case Bin::Op::LSH: shl(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
5,556✔
1130
        case Bin::Op::RSH: lshr(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
2,582✔
1131
        case Bin::Op::ARSH: ashr(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
734✔
1132
        case Bin::Op::XOR:
184✔
1133
            dom.rcp.values->bitwise_xor(dst.svalue, dst.uvalue, imm);
184✔
1134
            dom.rcp.havoc_offsets(bin.dst);
184✔
1135
            break;
92✔
1136
        }
1137
    } else {
1138
        // dst op= src
1139
        auto src_reg = std::get<Reg>(bin.v);
40,094✔
1140
        auto src = reg_pack(src_reg);
40,094✔
1141
        if (!dom.rcp.types.is_initialized(src_reg)) {
40,094✔
1142
            dom.rcp.havoc_register(bin.dst);
8✔
1143
            return;
34✔
1144
        }
1145
        switch (bin.op) {
40,086✔
1146
        case Bin::Op::ADD: {
2,082✔
1147
            if (dom.rcp.types.same_type(bin.dst, std::get<Reg>(bin.v))) {
2,082✔
1148
                // both must have been checked to be numbers
1149
                dom.rcp.values->add_overflow(dst.svalue, dst.uvalue, src.svalue, finite_width);
1,482✔
1150
            } else {
1151
                // Here we're not sure that lhs and rhs are the same type; they might be.
1152
                // But previous assertions should fail unless we know that exactly one of lhs or rhs is a pointer.
1153
                dom.rcp = dom.rcp.join_over_types(bin.dst, [&](TypeToNumDomain& rcp, const TypeEncoding dst_type) {
1,200✔
1154
                    rcp = rcp.join_over_types(src_reg, [&](TypeToNumDomain& rcp, const TypeEncoding src_type) {
1,204✔
1155
                        if (dst_type == T_NUM && src_type != T_NUM) {
604✔
1156
                            // num += ptr
1157
                            rcp.assign_type(bin.dst, src_type);
8✔
1158
                            if (const auto dst_offset = get_type_offset_variable(bin.dst, src_type)) {
8✔
1159
                                rcp.values->apply(ArithBinOp::ADD, dst_offset.value(), dst.svalue,
12✔
1160
                                                  get_type_offset_variable(src_reg, src_type).value());
16✔
1161
                            }
1162
                            if (src_type == T_SHARED) {
8✔
1163
                                rcp.values.assign(dst.shared_region_size, src.shared_region_size);
×
1164
                            }
1165
                        } else if (dst_type != T_NUM && src_type == T_NUM) {
600✔
1166
                            // ptr += num
1167
                            rcp.assign_type(bin.dst, dst_type);
596✔
1168
                            if (const auto dst_offset = get_type_offset_variable(bin.dst, dst_type)) {
596✔
1169
                                rcp.values->apply(ArithBinOp::ADD, dst_offset.value(), dst_offset.value(), src.svalue);
596✔
1170
                                if (dst_type == T_STACK) {
596✔
1171
                                    // Reduce the numeric size.
1172
                                    using namespace dsl_syntax;
8✔
1173
                                    if (rcp.values.intersect(src.svalue < 0)) {
24✔
1174
                                        rcp.values.havoc(dst.stack_numeric_size);
2✔
1175
                                        recompute_stack_numeric_size(rcp, stack, bin.dst);
2✔
1176
                                    } else {
1177
                                        rcp.values->apply_signed(ArithBinOp::SUB, dst.stack_numeric_size,
28✔
1178
                                                                 dst.stack_numeric_size, dst.stack_numeric_size,
14✔
1179
                                                                 src.svalue, 0);
14✔
1180
                                    }
1181
                                }
1182
                            }
1183
                        } else if (dst_type == T_NUM && src_type == T_NUM) {
596✔
1184
                            // dst and src don't necessarily have the same type, but among the possibilities
1185
                            // enumerated is the case where they are both numbers.
1186
                            rcp.values->apply_signed(ArithBinOp::ADD, dst.svalue, dst.uvalue, dst.svalue, src.svalue,
×
1187
                                                     finite_width);
1188
                        } else {
1189
                            // We ignore the cases here that do not match the assumption described
1190
                            // above.  Joining bottom with another result will leave the other
1191
                            // results unchanged.
1192
                            rcp.values.set_to_bottom();
×
1193
                        }
1194
                    });
1,206✔
1195
                });
1,202✔
1196
                // careful: change dst.value only after dealing with offset
1197
                dom.rcp.values->apply_signed(ArithBinOp::ADD, dst.svalue, dst.uvalue, dst.svalue, src.svalue,
600✔
1198
                                             finite_width);
1199
            }
1200
            break;
1,041✔
1201
        }
1202
        case Bin::Op::SUB: {
722✔
1203
            if (dom.rcp.types.same_type(bin.dst, std::get<Reg>(bin.v))) {
722✔
1204
                // src and dest have the same type.
1205
                TypeDomain tmp_m_type_inv = dom.rcp.types;
718✔
1206
                dom.rcp = dom.rcp.join_over_types(bin.dst, [&](TypeToNumDomain& rcp, const TypeEncoding type) {
1,436✔
1207
                    switch (type) {
722✔
1208
                    case T_NUM:
528✔
1209
                        // This is: sub_overflow(inv, dst.value, src.value, finite_width);
1210
                        rcp.values->apply_signed(ArithBinOp::SUB, dst.svalue, dst.uvalue, dst.svalue, src.svalue,
528✔
1211
                                                 finite_width);
1212
                        rcp.assign_type(bin.dst, T_NUM);
528✔
1213
                        rcp.havoc_offsets(bin.dst);
528✔
1214
                        break;
528✔
1215
                    default:
194✔
1216
                        // ptr -= ptr
1217
                        // Assertions should make sure we only perform this on non-shared pointers.
1218
                        if (const auto dst_offset = get_type_offset_variable(bin.dst, type)) {
194✔
1219
                            rcp.values->apply_signed(ArithBinOp::SUB, dst.svalue, dst.uvalue, dst_offset.value(),
388✔
1220
                                                     get_type_offset_variable(src_reg, type).value(), finite_width);
291✔
1221
                            rcp.values.havoc(dst_offset.value());
194✔
1222
                        }
1223
                        rcp.havoc_offsets(bin.dst);
194✔
1224
                        rcp.assign_type(bin.dst, T_NUM);
194✔
1225
                        break;
194✔
1226
                    }
1227
                });
1,079✔
1228
            } else {
718✔
1229
                // We're not sure that lhs and rhs are the same type.
1230
                // Either they're different, or at least one is not a singleton.
1231
                if (dom.rcp.types.type_is_number(std::get<Reg>(bin.v))) {
4✔
1232
                    dom.rcp.values->sub_overflow(dst.svalue, dst.uvalue, src.svalue, finite_width);
4✔
1233
                    if (auto dst_offset = dom.rcp.get_type_offset_variable(bin.dst)) {
4✔
1234
                        dom.rcp.values->sub(dst_offset.value(), src.svalue);
4✔
1235
                        if (dom.rcp.types.may_have_type(bin.dst, T_STACK)) {
4✔
1236
                            // Reduce the numeric size.
1237
                            using namespace dsl_syntax;
2✔
1238
                            if (dom.rcp.values.intersect(src.svalue > 0)) {
8✔
1239
                                dom.rcp.values.havoc(dst.stack_numeric_size);
4✔
1240
                                recompute_stack_numeric_size(dom.rcp, stack, bin.dst);
4✔
1241
                            } else {
1242
                                dom.rcp.values->apply(ArithBinOp::ADD, dst.stack_numeric_size, dst.stack_numeric_size,
×
1243
                                                      src.svalue);
1244
                            }
1245
                        }
1246
                    }
1247
                } else {
1248
                    dom.rcp.havoc_register(bin.dst);
×
1249
                }
1250
            }
1251
            break;
361✔
1252
        }
1253
        case Bin::Op::MUL:
76✔
1254
            dom.rcp.values->mul(dst.svalue, dst.uvalue, src.svalue, finite_width);
76✔
1255
            dom.rcp.havoc_offsets(bin.dst);
76✔
1256
            break;
38✔
1257
        case Bin::Op::UDIV:
96✔
1258
            dom.rcp.values->udiv(dst.svalue, dst.uvalue, src.uvalue, finite_width);
96✔
1259
            dom.rcp.havoc_offsets(bin.dst);
96✔
1260
            break;
48✔
1261
        case Bin::Op::UMOD:
38✔
1262
            dom.rcp.values->urem(dst.svalue, dst.uvalue, src.uvalue, finite_width);
38✔
1263
            dom.rcp.havoc_offsets(bin.dst);
38✔
1264
            break;
19✔
1265
        case Bin::Op::SDIV:
28✔
1266
            dom.rcp.values->sdiv(dst.svalue, dst.uvalue, src.svalue, finite_width);
28✔
1267
            dom.rcp.havoc_offsets(bin.dst);
28✔
1268
            break;
14✔
1269
        case Bin::Op::SMOD:
42✔
1270
            dom.rcp.values->srem(dst.svalue, dst.uvalue, src.svalue, finite_width);
42✔
1271
            dom.rcp.havoc_offsets(bin.dst);
42✔
1272
            break;
21✔
1273
        case Bin::Op::OR:
5,760✔
1274
            dom.rcp.values->bitwise_or(dst.svalue, dst.uvalue, src.uvalue, finite_width);
5,760✔
1275
            dom.rcp.havoc_offsets(bin.dst);
5,760✔
1276
            break;
2,880✔
1277
        case Bin::Op::AND:
504✔
1278
            dom.rcp.values->bitwise_and(dst.svalue, dst.uvalue, src.uvalue, finite_width);
504✔
1279
            dom.rcp.havoc_offsets(bin.dst);
504✔
1280
            break;
252✔
1281
        case Bin::Op::LSH:
536✔
1282
            if (dom.rcp.types.type_is_number(src_reg)) {
536✔
1283
                auto src_interval = dom.rcp.values.eval_interval(src.uvalue);
536✔
1284
                if (std::optional<Number> sn = src_interval.singleton()) {
536✔
1285
                    // truncate to uint64?
1286
                    uint64_t imm = sn->cast_to<uint64_t>() & (bin.is64 ? 63 : 31);
54✔
1287
                    if (imm <= std::numeric_limits<int32_t>::max()) {
54✔
1288
                        if (!bin.is64) {
54✔
1289
                            // Use only the low 32 bits of the value.
1290
                            dom.rcp.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
45✔
1291
                        }
1292
                        shl(bin.dst, gsl::narrow_cast<int32_t>(imm), finite_width);
54✔
1293
                        break;
54✔
1294
                    }
1295
                }
536✔
1296
            }
536✔
1297
            dom.rcp.values->shl_overflow(dst.svalue, dst.uvalue, src.uvalue);
482✔
1298
            dom.rcp.havoc_offsets(bin.dst);
482✔
1299
            break;
241✔
1300
        case Bin::Op::RSH:
82✔
1301
            if (dom.rcp.types.type_is_number(src_reg)) {
82✔
1302
                auto src_interval = dom.rcp.values.eval_interval(src.uvalue);
82✔
1303
                if (std::optional<Number> sn = src_interval.singleton()) {
82✔
1304
                    uint64_t imm = sn->cast_to<uint64_t>() & (bin.is64 ? 63 : 31);
38✔
1305
                    if (imm <= std::numeric_limits<int32_t>::max()) {
38✔
1306
                        if (!bin.is64) {
38✔
1307
                            // Use only the low 32 bits of the value.
1308
                            dom.rcp.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
27✔
1309
                        }
1310
                        lshr(bin.dst, gsl::narrow_cast<int32_t>(imm), finite_width);
38✔
1311
                        break;
38✔
1312
                    }
1313
                }
82✔
1314
            }
82✔
1315
            dom.rcp.havoc_register_except_type(bin.dst);
44✔
1316
            break;
22✔
1317
        case Bin::Op::ARSH:
36✔
1318
            if (dom.rcp.types.type_is_number(src_reg)) {
36✔
1319
                ashr(bin.dst, src.svalue, finite_width);
36✔
1320
                break;
36✔
1321
            }
1322
            dom.rcp.havoc_register_except_type(bin.dst);
×
1323
            break;
1324
        case Bin::Op::XOR:
502✔
1325
            dom.rcp.values->bitwise_xor(dst.svalue, dst.uvalue, src.uvalue, finite_width);
502✔
1326
            dom.rcp.havoc_offsets(bin.dst);
502✔
1327
            break;
251✔
1328
        case Bin::Op::MOVSX8:
2,224✔
1329
        case Bin::Op::MOVSX16:
1,112✔
1330
        case Bin::Op::MOVSX32: {
1,112✔
1331
            const int source_width = _movsx_bits(bin.op);
2,224✔
1332
            // Keep relational information if operation is a no-op.
1333
            if (dst.svalue == src.svalue &&
3,311✔
1334
                dom.rcp.values.eval_interval(dst.svalue) <= Interval::signed_int(source_width)) {
5,485✔
1335
                return;
11✔
1336
            }
1337
            if (dom.rcp.types.type_is_number(src_reg)) {
2,202✔
1338
                dom.rcp.havoc_offsets(bin.dst);
2,202✔
1339
                dom.rcp.assign_type(bin.dst, T_NUM);
2,202✔
1340
                dom.rcp.values->sign_extend(dst.svalue, dst.uvalue, src.svalue, finite_width, source_width);
2,202✔
1341
                break;
2,202✔
1342
            }
1343
            dom.rcp.havoc_register(bin.dst);
×
1344
            break;
1345
        }
1346
        case Bin::Op::MOV:
27,358✔
1347
            // Keep relational information if operation is a no-op.
1348
            if (bin.is64 || dom.rcp.types.type_is_number(src_reg)) {
27,358✔
1349
                if (bin.dst != src_reg) {
46,721✔
1350
                    // the 32bit case is handled below
1351
                    dom.rcp.assign(bin.dst, src_reg);
26,020✔
1352
                }
1353
            } else {
1354
                // If src is not a number, we don't know how to truncate a pointer.
1355
                dom.rcp.havoc_register(bin.dst);
×
1356
            }
1357
            break;
13,679✔
1358
        }
1359
    }
1360
    if (!bin.is64) {
100,820✔
1361
        dom.rcp.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
25,500✔
1362
    }
1363
}
1364

1365
void EbpfTransformer::initialize_loop_counter(const Label& label) {
32✔
1366
    dom.rcp.values.assign(variable_registry->loop_counter(to_string(label)), 0);
48✔
1367
}
32✔
1368

1369
void EbpfTransformer::operator()(const IncrementLoopCounter& ins) {
164✔
1370
    if (dom.is_bottom()) {
164✔
1371
        return;
×
1372
    }
1373
    const auto counter = variable_registry->loop_counter(to_string(ins.name));
164✔
1374
    dom.rcp.values->add(counter, 1);
246✔
1375
}
1376

1377
void ebpf_domain_initialize_loop_counter(EbpfDomain& dom, const Label& label) {
32✔
1378
    EbpfTransformer{dom}.initialize_loop_counter(label);
32✔
1379
}
32✔
1380
} // namespace prevail
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc