• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

Alan-Jowett / ebpf-verifier / 22160684311

18 Feb 2026 09:20PM UTC coverage: 88.256% (+1.1%) from 87.142%
22160684311

push

github

elazarg
lint

Signed-off-by: Elazar Gershuni <elazarg@gmail.com>

78 of 82 new or added lines in 6 files covered. (95.12%)

402 existing lines in 18 files now uncovered.

10731 of 12159 relevant lines covered (88.26%)

837642.51 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.16
/src/crab/ebpf_transformer.cpp
1
// Copyright (c) Prevail Verifier contributors.
2
// SPDX-License-Identifier: MIT
3

4
// This file is eBPF-specific, not derived from CRAB.
5

6
#include <bitset>
7
#include <cassert>
8
#include <optional>
9
#include <utility>
10
#include <vector>
11

12
#include "boost/endian/conversion.hpp"
13

14
#include "arith/dsl_syntax.hpp"
15
#include "config.hpp"
16
#include "crab/array_domain.hpp"
17
#include "crab/ebpf_domain.hpp"
18
#include "crab/var_registry.hpp"
19
#include "crab_utils/num_safety.hpp"
20
#include "ir/unmarshal.hpp"
21
#include "platform.hpp"
22
#include "string_constraints.hpp"
23

24
namespace prevail {
25
class EbpfTransformer final {
26
    EbpfDomain& dom;
27
    // shorthands:
28
    ArrayDomain& stack;
29

30
  public:
31
    explicit EbpfTransformer(EbpfDomain& _dom) : dom(_dom), stack(_dom.stack) {}
242,732✔
32

33
    // abstract transformers
34
    void operator()(const Assume&);
35

36
    void operator()(const Atomic&);
37

38
    void operator()(const Bin&);
39

40
    void operator()(const Call&);
41

42
    void operator()(const CallLocal&);
43

44
    void operator()(const Callx&);
45
    void operator()(const CallBtf&);
46

47
    void operator()(const Exit&);
48

49
    void operator()(const IncrementLoopCounter&);
50

51
    void operator()(const Jmp&) const;
52

53
    void operator()(const LoadMapFd&);
54

55
    void operator()(const LoadMapAddress&);
56
    void operator()(const LoadPseudo&);
57

58
    void operator()(const Mem&);
59

60
    void operator()(const Packet&);
61

62
    void operator()(const Un&);
63

64
    void operator()(const Undefined&);
65

66
    void initialize_loop_counter(const Label& label);
67

68
  private:
69
    /// Forget everything about all offset variables for a given register.
70
    void scratch_caller_saved_registers();
71

72
    void save_callee_saved_registers(const std::string& prefix);
73

74
    void restore_callee_saved_registers(const std::string& prefix);
75

76
    void havoc_subprogram_stack(const std::string& prefix);
77

78
    void forget_packet_pointers();
79

80
    void do_load_mapfd(const Reg& dst_reg, int mapfd, bool maybe_null);
81

82
    void do_load_map_address(const Reg& dst_reg, int mapfd, int32_t offset);
83

84
    void assign_valid_ptr(const Reg& dst_reg, bool maybe_null);
85

86
    static void recompute_stack_numeric_size(TypeToNumDomain& state, ArrayDomain& stack, const Reg& reg);
87

88
    static void recompute_stack_numeric_size(TypeToNumDomain& state, const ArrayDomain& stack, Variable type_variable);
89

90
    static void do_load_stack(TypeToNumDomain& state, ArrayDomain& stack, const Reg& target_reg,
91
                              const LinearExpression& addr, int width, const Reg& src_reg);
92

93
    void do_load(const Mem& b, const Reg& target_reg);
94

95
    static void do_store_stack(TypeToNumDomain& state, ArrayDomain& stack, const LinearExpression& symb_addr,
96
                               int exact_width, const LinearExpression& val_svalue, const LinearExpression& val_uvalue,
97
                               const std::optional<Reg>& opt_val_reg);
98

99
    void do_mem_store(const Mem& b, const LinearExpression& val_svalue, const LinearExpression& val_uvalue,
100
                      const std::optional<Reg>& opt_val_reg);
101

102
    void add(const Reg& dst_reg, int imm, int finite_width);
103

104
    void shl(const Reg& dst_reg, int imm, int finite_width);
105

106
    void lshr(const Reg& dst_reg, int imm, int finite_width);
107

108
    void ashr(const Reg& dst_reg, const LinearExpression& right_svalue, int finite_width);
109
}; // end EbpfDomain
110

111
void ebpf_domain_transform(EbpfDomain& inv, const Instruction& ins) {
391,034✔
112
    if (inv.is_bottom()) {
391,034✔
113
        return;
148,342✔
114
    }
115
    const auto pre = inv;
242,692✔
116
    std::visit(EbpfTransformer{inv}, ins);
242,692✔
117
    if (inv.is_bottom() && !std::holds_alternative<Assume>(ins)) {
242,692✔
118
        // Fail. raise an exception to stop the analysis.
119
        std::stringstream msg;
×
120
        msg << "Bug! pre-invariant:\n"
×
121
            << pre << "\n followed by instruction: " << ins << "\n"
×
122
            << "leads to bottom";
×
123
        throw std::logic_error(msg.str());
×
124
    }
×
125
}
242,692✔
126

127
void EbpfTransformer::scratch_caller_saved_registers() {
10,652✔
128
    for (uint8_t i = R1_ARG; i <= R5_ARG; i++) {
63,912✔
129
        dom.state.havoc_register(Reg{i});
53,260✔
130
    }
131
}
10,652✔
132

133
/// Create variables specific to the new call stack frame that store
134
/// copies of the states of r6 through r9.
135
void EbpfTransformer::save_callee_saved_registers(const std::string& prefix) {
58✔
136
    // TODO: define location `stack_frame_var`, and pass to dom.state.assign().
137
    //       Similarly in restore_callee_saved_registers
138
    for (const Reg r : {Reg{R6}, Reg{R7}, Reg{R8}, Reg{R9}}) {
290✔
139
        if (dom.state.is_initialized(r)) {
232✔
140
            const Variable type_var = variable_registry->type_reg(r.v);
26✔
141
            dom.state.assign_type(variable_registry->stack_frame_var(DataKind::types, r.v, prefix), type_var);
26✔
142
            for (const TypeEncoding type : dom.state.iterate_types(r)) {
52✔
143
                auto kinds = type_to_kinds.at(type);
26✔
144
                kinds.push_back(DataKind::uvalues);
26✔
145
                kinds.push_back(DataKind::svalues);
26✔
146
                for (const DataKind kind : kinds) {
104✔
147
                    const Variable src_var = variable_registry->reg(kind, r.v);
78✔
148
                    const Variable dst_var = variable_registry->stack_frame_var(kind, r.v, prefix);
78✔
149
                    if (!dom.state.values.eval_interval(src_var).is_top()) {
117✔
150
                        dom.state.values.assign(dst_var, src_var);
108✔
151
                    }
152
                }
153
            }
52✔
154
        }
155
    }
156
}
58✔
157

158
void EbpfTransformer::restore_callee_saved_registers(const std::string& prefix) {
56✔
159
    for (uint8_t r = R6; r <= R9; r++) {
280✔
160
        Reg reg{r};
224✔
161
        const Variable type_var = variable_registry->stack_frame_var(DataKind::types, r, prefix);
224✔
162
        if (dom.state.is_initialized(type_var)) {
224✔
163
            dom.state.assign_type(reg, type_var);
26✔
164
            for (const TypeEncoding type : dom.state.iterate_types(reg)) {
52✔
165
                auto kinds = type_to_kinds.at(type);
26✔
166
                kinds.push_back(DataKind::uvalues);
26✔
167
                kinds.push_back(DataKind::svalues);
26✔
168
                for (const DataKind kind : kinds) {
104✔
169
                    const Variable src_var = variable_registry->stack_frame_var(kind, r, prefix);
78✔
170
                    const Variable dst_var = variable_registry->reg(kind, r);
78✔
171
                    if (!dom.state.values.eval_interval(src_var).is_top()) {
117✔
172
                        dom.state.values.assign(dst_var, src_var);
108✔
173
                    } else {
174
                        dom.state.values.havoc(dst_var);
6✔
175
                    }
176
                    dom.state.values.havoc(src_var);
78✔
177
                }
178
            }
52✔
179
        }
180
        dom.state.havoc_type(type_var);
224✔
181
    }
182
}
56✔
183

184
void EbpfTransformer::havoc_subprogram_stack(const std::string& prefix) {
56✔
185
    const Variable r10_stack_offset = reg_pack(R10_STACK_POINTER).stack_offset;
56✔
186
    const auto intv = dom.state.values.eval_interval(r10_stack_offset);
56✔
187
    if (!intv.is_singleton()) {
56✔
188
        return;
32✔
189
    }
190
    const int64_t stack_start = intv.singleton()->cast_to<int64_t>() - EBPF_SUBPROGRAM_STACK_SIZE;
24✔
191
    stack.havoc_type(dom.state.types, Interval{stack_start}, Interval{EBPF_SUBPROGRAM_STACK_SIZE});
24✔
192
    for (const DataKind kind : iterate_kinds()) {
264✔
193
        stack.havoc(dom.state.values, kind, Interval{stack_start}, Interval{EBPF_SUBPROGRAM_STACK_SIZE});
240✔
194
    }
24✔
195
}
196

197
void EbpfTransformer::forget_packet_pointers() {
40✔
198
    dom.state.havoc_all_locations_having_type(T_PACKET);
40✔
199
    dom.initialize_packet();
40✔
200
}
40✔
201

202
/** Linear constraint for a pointer comparison.
203
 */
204
static LinearConstraint assume_cst_offsets_reg(const Condition::Op op, const Variable dst_offset,
1,880✔
205
                                               const Variable src_offset) {
206
    using namespace dsl_syntax;
940✔
207
    using Op = Condition::Op;
940✔
208
    switch (op) {
1,880✔
209
    case Op::EQ: return eq(dst_offset, src_offset);
6✔
210
    case Op::NE: return neq(dst_offset, src_offset);
10✔
211
    case Op::GE: return dst_offset >= src_offset;
6✔
212
    case Op::SGE: return dst_offset >= src_offset; // pointer comparison is unsigned
×
213
    case Op::LE: return dst_offset <= src_offset;
1,392✔
214
    case Op::SLE: return dst_offset <= src_offset; // pointer comparison is unsigned
×
215
    case Op::GT: return dst_offset > src_offset;
1,392✔
216
    case Op::SGT: return dst_offset > src_offset; // pointer comparison is unsigned
×
217
    case Op::SLT: return src_offset > dst_offset;
×
218
    // Note: reverse the test as a workaround strange lookup:
219
    case Op::LT: return src_offset > dst_offset; // FIX unsigned
6✔
220
    default: return dst_offset - dst_offset == 0;
×
221
    }
222
}
223

224
void EbpfTransformer::operator()(const Assume& s) {
38,136✔
225
    if (dom.is_bottom()) {
38,136✔
226
        return;
×
227
    }
228
    const Condition cond = s.cond;
38,136✔
229
    const auto dst = reg_pack(cond.left);
38,136✔
230
    if (const auto psrc_reg = std::get_if<Reg>(&cond.right)) {
38,136✔
231
        const auto src_reg = *psrc_reg;
9,088✔
232
        const auto src = reg_pack(src_reg);
9,088✔
233
        // This should have been checked by EbpfChecker
234
        assert(dom.state.same_type(cond.left, std::get<Reg>(cond.right)));
9,088✔
235
        dom.state = dom.state.join_over_types(cond.left, [&](TypeToNumDomain& state, const TypeEncoding type) {
18,176✔
236
            if (type == T_NUM) {
9,092✔
237
                for (const LinearConstraint& cst :
12,285✔
238
                     state.values->assume_cst_reg(cond.op, cond.is64, dst.svalue, dst.uvalue, src.svalue, src.uvalue)) {
20,964✔
239
                    state.values.add_constraint(cst);
10,146✔
240
                }
7,212✔
241
            } else {
242
                // Either pointers to a singleton region,
243
                // or an equality comparison on map descriptors/pointers to non-singleton locations
244
                if (const auto dst_offset = get_type_offset_variable(cond.left, type)) {
1,880✔
245
                    if (const auto src_offset = get_type_offset_variable(src_reg, type)) {
1,880✔
246
                        state.values.add_constraint(
1,880✔
247
                            assume_cst_offsets_reg(cond.op, dst_offset.value(), src_offset.value()));
3,760✔
248
                    }
249
                }
250
            }
251
        });
18,180✔
252
    } else {
253
        const int64_t imm = gsl::narrow_cast<int64_t>(std::get<Imm>(cond.right).v);
29,048✔
254
        for (const LinearConstraint& cst :
60,674✔
255
             dom.state.values->assume_cst_imm(cond.op, cond.is64, dst.svalue, dst.uvalue, imm)) {
106,824✔
256
            dom.state.values.add_constraint(cst);
63,252✔
257
        }
29,048✔
258
    }
259
}
260

261
void EbpfTransformer::operator()(const Undefined& a) {}
2,564✔
262

263
// Rejected before abstract interpretation by cfg_builder::check_instruction_feature_support.
264
void EbpfTransformer::operator()(const CallBtf&) {
×
265
    assert(false && "CallBtf should be rejected before abstract transformation");
×
266
}
267

268
// Rejected before abstract interpretation by cfg_builder::check_instruction_feature_support.
269
void EbpfTransformer::operator()(const LoadPseudo&) {
×
270
    assert(false && "LoadPseudo should be rejected before abstract transformation");
×
271
}
272

273
// Simple truncation function usable with swap_endianness().
274
template <class T>
275
constexpr T truncate(T x) noexcept {
40✔
276
    return x;
40✔
277
}
278

279
void EbpfTransformer::operator()(const Un& stmt) {
1,042✔
280
    if (dom.is_bottom()) {
1,042✔
281
        return;
×
282
    }
283
    const auto dst = reg_pack(stmt.dst);
1,042✔
284
    auto swap_endianness = [&](const Variable v, auto be_or_le) {
2,265✔
285
        if (dom.state.is_in_group(stmt.dst, TS_NUM)) {
1,744✔
286
            if (const auto n = dom.state.values.eval_interval(v).singleton()) {
1,744✔
287
                if (n->fits_cast_to<int64_t>()) {
1,026✔
288
                    dom.state.values.set(v, Interval{be_or_le(n->cast_to<int64_t>())});
308✔
289
                    return;
308✔
290
                }
291
            }
292
        }
293
        dom.state.values.havoc(v);
1,436✔
294
        dom.state.havoc_offsets(stmt.dst);
1,436✔
295
    };
1,042✔
296
    // Swap bytes if needed.  For 64-bit types we need the weights to fit in a
297
    // signed int64, but for smaller types we don't want sign extension,
298
    // so we use unsigned which still fits in a signed int64.
299
    switch (stmt.op) {
1,042✔
300
    case Un::Op::BE16:
650✔
301
        if (!thread_local_options.big_endian) {
650✔
302
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
646✔
303
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
646✔
304
        } else {
305
            swap_endianness(dst.svalue, truncate<uint16_t>);
4✔
306
            swap_endianness(dst.uvalue, truncate<uint16_t>);
4✔
307
        }
308
        break;
325✔
309
    case Un::Op::BE32:
124✔
310
        if (!thread_local_options.big_endian) {
124✔
311
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
122✔
312
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
122✔
313
        } else {
314
            swap_endianness(dst.svalue, truncate<uint32_t>);
2✔
315
            swap_endianness(dst.uvalue, truncate<uint32_t>);
2✔
316
        }
317
        break;
62✔
318
    case Un::Op::BE64:
30✔
319
        if (!thread_local_options.big_endian) {
30✔
320
            swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
28✔
321
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
28✔
322
        }
323
        break;
15✔
324
    case Un::Op::LE16:
16✔
325
        if (thread_local_options.big_endian) {
16✔
326
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
2✔
327
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
2✔
328
        } else {
329
            swap_endianness(dst.svalue, truncate<uint16_t>);
14✔
330
            swap_endianness(dst.uvalue, truncate<uint16_t>);
14✔
331
        }
332
        break;
8✔
333
    case Un::Op::LE32:
12✔
334
        if (thread_local_options.big_endian) {
12✔
335
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
2✔
336
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
2✔
337
        } else {
338
            swap_endianness(dst.svalue, truncate<uint32_t>);
10✔
339
            swap_endianness(dst.uvalue, truncate<uint32_t>);
10✔
340
        }
341
        break;
6✔
342
    case Un::Op::LE64:
10✔
343
        if (thread_local_options.big_endian) {
10✔
344
            swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
2✔
345
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
2✔
346
        }
347
        break;
5✔
348
    case Un::Op::SWAP16:
16✔
349
        swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
16✔
350
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
16✔
351
        break;
8✔
352
    case Un::Op::SWAP32:
12✔
353
        swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
12✔
354
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
12✔
355
        break;
6✔
356
    case Un::Op::SWAP64:
12✔
357
        swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
12✔
358
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
12✔
359
        break;
6✔
360
    case Un::Op::NEG:
160✔
361
        dom.state.values->neg(dst.svalue, dst.uvalue, stmt.is64 ? 64 : 32);
185✔
362
        dom.state.havoc_offsets(stmt.dst);
160✔
363
        break;
80✔
364
    }
365
}
366

367
void EbpfTransformer::operator()(const Exit& a) {
1,502✔
368
    if (dom.is_bottom()) {
1,502✔
369
        return;
1,446✔
370
    }
371
    // Clean up any state for the current stack frame.
372
    const std::string prefix = a.stack_frame_prefix;
1,502✔
373
    if (prefix.empty()) {
1,502✔
374
        return;
1,446✔
375
    }
376
    havoc_subprogram_stack(prefix);
56✔
377
    restore_callee_saved_registers(prefix);
56✔
378

379
    // Restore r10.
380
    constexpr Reg r10_reg{R10_STACK_POINTER};
56✔
381
    add(r10_reg, EBPF_SUBPROGRAM_STACK_SIZE, 64);
56✔
382
}
1,502✔
383

384
void EbpfTransformer::operator()(const Jmp&) const {
11,374✔
385
    // This is a NOP. It only exists to hold the jump preconditions.
386
}
11,374✔
387

388
void EbpfTransformer::operator()(const Packet& a) {
236✔
389
    if (dom.is_bottom()) {
236✔
390
        return;
×
391
    }
392
    constexpr Reg r0_reg{R0_RETURN_VALUE};
236✔
393
    dom.state.havoc_register_except_type(r0_reg);
236✔
394
    dom.state.assign_type(r0_reg, T_NUM);
236✔
395
    scratch_caller_saved_registers();
236✔
396
}
397

398
void EbpfTransformer::do_load_stack(TypeToNumDomain& state, ArrayDomain& stack, const Reg& target_reg,
12,186✔
399
                                    const LinearExpression& symb_addr, const int width, const Reg& src_reg) {
400
    const Interval addr = state.values.eval_interval(symb_addr);
12,186✔
401
    using namespace dsl_syntax;
6,093✔
402
    if (state.values.entail(width <= reg_pack(src_reg).stack_numeric_size)) {
18,279✔
403
        state.assign_type(target_reg, T_NUM);
458✔
404
    } else {
405
        state.assign_type(target_reg, stack.load_type(addr, width));
17,592✔
406
        if (!state.is_initialized(target_reg)) {
11,728✔
407
            // We don't know what we loaded, so just havoc the destination register.
408
            state.havoc_register(target_reg);
4✔
409
            return;
4✔
410
        }
411
    }
412

413
    const RegPack& target = reg_pack(target_reg);
12,182✔
414
    if (width == 1 || width == 2 || width == 4 || width == 8) {
12,182✔
415
        // Use the addr before we havoc the destination register since we might be getting the
416
        // addr from that same register.
417
        const std::optional<LinearExpression> sresult = stack.load(state.values, DataKind::svalues, addr, width);
12,182✔
418
        const std::optional<LinearExpression> uresult = stack.load(state.values, DataKind::uvalues, addr, width);
12,182✔
419
        state.havoc_register_except_type(target_reg);
12,182✔
420
        state.values.assign(target.svalue, sresult);
12,182✔
421
        state.values.assign(target.uvalue, uresult);
12,182✔
422
        for (const TypeEncoding type : state.iterate_types(target_reg)) {
24,368✔
423
            for (const auto& kind : type_to_kinds.at(type)) {
15,762✔
424
                const Variable dst_var = variable_registry->reg(kind, target_reg.v);
3,576✔
425
                state.values.assign(dst_var, stack.load(state.values, kind, addr, width));
5,364✔
426
            }
427
        }
12,182✔
428
    } else {
12,182✔
429
        state.havoc_register_except_type(target_reg);
×
430
    }
431
}
432

433
static void do_load_ctx(TypeToNumDomain& state, const Reg& target_reg, const LinearExpression& addr_vague,
3,772✔
434
                        const int width) {
435
    using namespace dsl_syntax;
1,886✔
436
    if (state.values.is_bottom()) {
3,772✔
437
        return;
1,904✔
438
    }
439

440
    const ebpf_context_descriptor_t* desc = thread_local_program_info->type.context_descriptor;
3,772✔
441

442
    const RegPack& target = reg_pack(target_reg);
3,772✔
443

444
    if (desc->end < 0) {
3,772✔
445
        state.havoc_register(target_reg);
290✔
446
        state.assign_type(target_reg, T_NUM);
290✔
447
        return;
290✔
448
    }
449

450
    const Interval interval = state.values.eval_interval(addr_vague);
3,482✔
451
    const std::optional<Number> maybe_addr = interval.singleton();
3,482✔
452
    state.havoc_register(target_reg);
3,482✔
453

454
    const bool may_touch_ptr =
1,741✔
455
        interval.contains(desc->data) || interval.contains(desc->meta) || interval.contains(desc->end);
3,482✔
456

457
    if (!maybe_addr) {
3,482✔
458
        if (may_touch_ptr) {
×
459
            state.havoc_type(target_reg);
×
460
        } else {
461
            state.assign_type(target_reg, T_NUM);
×
462
        }
463
        return;
×
464
    }
465

466
    const Number& addr = *maybe_addr;
3,482✔
467

468
    // We use offsets for packet data, data_end, and meta during verification,
469
    // but at runtime they will be 64-bit pointers.  We can use the offset values
470
    // for verification like we use map_fd's as a proxy for maps which
471
    // at runtime are actually 64-bit memory pointers.
472
    const int offset_width = desc->end - desc->data;
3,482✔
473
    if (addr == desc->data) {
3,482✔
474
        if (width == offset_width) {
930✔
475
            state.values.assign(target.packet_offset, 0);
1,395✔
476
        }
477
    } else if (addr == desc->end) {
2,552✔
478
        if (width == offset_width) {
916✔
479
            state.values.assign(target.packet_offset, variable_registry->packet_size());
910✔
480
            // EXPERIMENTAL: Explicit upper bound since packet_size is min_only.
481
            // This preserves the relational constraint (packet_offset <= packet_size)
482
            // while ensuring comparison checks have a concrete upper bound.
483
            state.values.add_constraint(target.packet_offset < MAX_PACKET_SIZE);
1,365✔
484
        }
485
    } else if (addr == desc->meta) {
1,636✔
486
        if (width == offset_width) {
22✔
487
            state.values.assign(target.packet_offset, variable_registry->meta_offset());
33✔
488
        }
489
    } else {
490
        if (may_touch_ptr) {
1,614✔
491
            state.havoc_type(target_reg);
×
492
        } else {
493
            state.assign_type(target_reg, T_NUM);
1,614✔
494
        }
495
        return;
1,614✔
496
    }
497
    if (width == offset_width) {
1,868✔
498
        state.assign_type(target_reg, T_PACKET);
1,862✔
499
        state.values.add_constraint(4098 <= target.svalue);
2,793✔
500
        state.values.add_constraint(target.svalue <= PTR_MAX);
2,793✔
501
    }
502
}
503

504
static void do_load_packet_or_shared(TypeToNumDomain& state, const Reg& target_reg, const int width,
11,272✔
505
                                     const bool is_signed) {
506
    if (state.values.is_bottom()) {
11,272✔
507
        return;
×
508
    }
509
    const RegPack& target = reg_pack(target_reg);
11,272✔
510

511
    state.havoc_register(target_reg);
11,272✔
512
    state.assign_type(target_reg, T_NUM);
11,272✔
513

514
    // Small copies can be range-limited and useful for later arithmetic.
515
    if (is_signed && (width == 1 || width == 2 || width == 4)) {
11,272✔
516
        state.values.set(target.svalue, Interval::signed_int(width * 8));
×
517
        state.values.set(target.uvalue, Interval::unsigned_int(width * 8));
×
518
    } else if (width == 1 || width == 2) {
11,272✔
519
        const Interval full = Interval::unsigned_int(width * 8);
7,100✔
520
        state.values.set(target.svalue, full);
7,100✔
521
        state.values.set(target.uvalue, full);
7,100✔
522
    }
523
}
524

525
void EbpfTransformer::do_load(const Mem& b, const Reg& target_reg) {
27,058✔
526
    using namespace dsl_syntax;
13,529✔
527

528
    const auto mem_reg = reg_pack(b.access.basereg);
27,058✔
529
    const int width = b.access.width;
27,058✔
530
    const int offset = b.access.offset;
27,058✔
531

532
    if (b.access.basereg.v == R10_STACK_POINTER) {
27,058✔
533
        const LinearExpression addr = mem_reg.stack_offset + offset;
17,493✔
534
        do_load_stack(dom.state, stack, target_reg, addr, width, b.access.basereg);
11,662✔
535
        return;
11,662✔
536
    }
11,662✔
537
    dom.state = dom.state.join_over_types(b.access.basereg, [&](TypeToNumDomain& state, TypeEncoding type) {
30,792✔
538
        switch (type) {
15,568✔
539
        case T_UNINIT: return;
540
        case T_MAP: return;
541
        case T_MAP_PROGRAMS: return;
542
        case T_NUM: return;
543
        case T_CTX: {
3,772✔
544
            LinearExpression addr = mem_reg.ctx_offset + offset;
5,658✔
545
            do_load_ctx(state, target_reg, addr, width);
3,772✔
546
            break;
3,772✔
547
        }
3,772✔
548
        case T_STACK: {
524✔
549
            LinearExpression addr = mem_reg.stack_offset + offset;
786✔
550
            do_load_stack(state, stack, target_reg, addr, width, b.access.basereg);
524✔
551
            break;
524✔
552
        }
524✔
553
        case T_PACKET: {
3,332✔
554
            LinearExpression addr = mem_reg.packet_offset + offset;
4,998✔
555
            do_load_packet_or_shared(state, target_reg, width, b.is_signed);
3,332✔
556
            break;
3,332✔
557
        }
3,332✔
558
        case T_SHARED: {
7,940✔
559
            LinearExpression addr = mem_reg.shared_offset + offset;
11,910✔
560
            do_load_packet_or_shared(state, target_reg, width, b.is_signed);
7,940✔
561
            break;
7,940✔
562
        }
7,940✔
563
        }
564
    });
15,396✔
565
}
566

567
void EbpfTransformer::do_store_stack(TypeToNumDomain& state, ArrayDomain& stack, const LinearExpression& symb_addr,
24,402✔
568
                                     const int exact_width, const LinearExpression& val_svalue,
569
                                     const LinearExpression& val_uvalue, const std::optional<Reg>& opt_val_reg) {
570
    const Interval addr = state.values.eval_interval(symb_addr);
24,402✔
571
    const Interval width{exact_width};
24,402✔
572
    // no aliasing of val - we don't move from stack to stack, so we can just havoc first
573
    stack.havoc_type(state.types, addr, width);
24,402✔
574
    for (const DataKind kind : iterate_kinds()) {
268,422✔
575
        if (kind == DataKind::svalues || kind == DataKind::uvalues) {
244,020✔
576
            continue;
48,804✔
577
        }
578
        stack.havoc(state.values, kind, addr, width);
195,216✔
579
    }
12,201✔
580
    bool must_be_num = false;
24,402✔
581
    if (opt_val_reg && !state.is_initialized(*opt_val_reg)) {
24,402✔
582
        stack.havoc(state.values, DataKind::svalues, addr, width);
×
583
        stack.havoc(state.values, DataKind::uvalues, addr, width);
×
584
    } else {
585
        // opt_val_reg is unset when storing an immediate value.
586
        must_be_num = !opt_val_reg || state.is_in_group(*opt_val_reg, TS_NUM);
24,402✔
587
        const LinearExpression val_type =
12,201✔
588
            must_be_num ? LinearExpression{T_NUM} : variable_registry->type_reg(opt_val_reg->v);
24,402✔
589
        state.assign_type(stack.store_type(state.types, addr, width, must_be_num), val_type);
24,402✔
590

591
        if (exact_width == 8) {
24,402✔
592
            stack.havoc(state.values, DataKind::svalues, addr, width);
8,830✔
593
            stack.havoc(state.values, DataKind::uvalues, addr, width);
8,830✔
594
            state.values.assign(stack.store(state.values, DataKind::svalues, addr, width), val_svalue);
8,830✔
595
            state.values.assign(stack.store(state.values, DataKind::uvalues, addr, width), val_uvalue);
8,830✔
596

597
            if (!must_be_num) {
8,830✔
598
                for (TypeEncoding type : state.iterate_types(*opt_val_reg)) {
1,064✔
599
                    for (const DataKind kind : type_to_kinds.at(type)) {
1,230✔
600
                        const Variable src_var = variable_registry->reg(kind, opt_val_reg->v);
696✔
601
                        state.values.assign(stack.store(state.values, kind, addr, width), src_var);
1,044✔
602
                    }
603
                }
530✔
604
            }
605
        } else if ((exact_width == 1 || exact_width == 2 || exact_width == 4) && must_be_num) {
15,572✔
606
            // Keep track of numbers on the stack that might be used as array indices.
607
            if (const auto stack_svalue = stack.store(state.values, DataKind::svalues, addr, width)) {
15,542✔
608
                state.values.assign(stack_svalue, val_svalue);
15,538✔
609
                state.values->overflow_bounds(*stack_svalue, exact_width * 8, true);
15,538✔
610
            } else {
611
                stack.havoc(state.values, DataKind::svalues, addr, width);
4✔
612
            }
613
            if (const auto stack_uvalue = stack.store(state.values, DataKind::uvalues, addr, width)) {
15,542✔
614
                state.values.assign(stack_uvalue, val_uvalue);
15,538✔
615
                state.values->overflow_bounds(*stack_uvalue, exact_width * 8, false);
15,538✔
616
            } else {
617
                stack.havoc(state.values, DataKind::uvalues, addr, width);
4✔
618
            }
619
        } else {
15,542✔
620
            stack.havoc(state.values, DataKind::svalues, addr, width);
30✔
621
            stack.havoc(state.values, DataKind::uvalues, addr, width);
30✔
622
        }
623
    }
24,402✔
624

625
    // Update stack_numeric_size for any stack type variables.
626
    // stack_numeric_size holds the number of continuous bytes starting from stack_offset that are known to be numeric.
627
    for (const Variable type_variable : variable_registry->get_type_variables()) {
1,286,126✔
628
        if (!state.is_initialized(type_variable) || !state.may_have_type(type_variable, T_STACK)) {
1,604,846✔
629
            continue;
1,235,070✔
630
        }
631
        const Variable stack_offset_variable = variable_registry->kind_var(DataKind::stack_offsets, type_variable);
26,654✔
632
        const Variable stack_numeric_size_variable =
13,327✔
633
            variable_registry->kind_var(DataKind::stack_numeric_sizes, type_variable);
26,654✔
634

635
        using namespace dsl_syntax;
13,327✔
636
        // See if the variable's numeric interval overlaps with changed bytes.
637
        if (state.values.intersect(
106,616✔
638
                dsl_syntax::operator<=(symb_addr, stack_offset_variable + stack_numeric_size_variable)) &&
79,962✔
639
            state.values.intersect(dsl_syntax::operator>=(symb_addr + exact_width, stack_offset_variable))) {
106,616✔
640
            if (!must_be_num) {
2,614✔
641
                state.values.havoc(stack_numeric_size_variable);
64✔
642
            }
643
            recompute_stack_numeric_size(state, stack, type_variable);
2,614✔
644
        }
645
    }
24,402✔
646
}
24,402✔
647

648
void EbpfTransformer::operator()(const Mem& b) {
56,764✔
649
    if (dom.is_bottom()) {
56,764✔
650
        return;
651
    }
652
    if (const auto preg = std::get_if<Reg>(&b.value)) {
56,764✔
653
        if (b.is_load) {
56,694✔
654
            do_load(b, *preg);
27,058✔
655
            if (b.is_signed) {
27,058✔
656
                Bin::Op op{};
6✔
657
                // MEMSX decode only allows widths 1/2/4. Programmatic Mem construction paths
658
                // (for example from Atomic lowering) do not set is_signed=true.
659
                switch (b.access.width) {
6✔
660
                case 1: op = Bin::Op::MOVSX8; break;
1✔
661
                case 2: op = Bin::Op::MOVSX16; break;
2✔
662
                case 4: op = Bin::Op::MOVSX32; break;
2✔
663
                default: CRAB_ERROR("unexpected MEMSX width", b.access.width);
×
664
                }
665
                (*this)(Bin{.op = op, .dst = *preg, .v = *preg, .is64 = true, .lddw = false});
6✔
666
            }
667
        } else {
668
            const auto data_reg = reg_pack(*preg);
29,636✔
669
            do_mem_store(b, data_reg.svalue, data_reg.uvalue, *preg);
44,454✔
670
        }
671
    } else {
672
        const uint64_t imm = std::get<Imm>(b.value).v;
70✔
673
        do_mem_store(b, to_signed(imm), imm, {});
105✔
674
    }
675
}
676

677
void EbpfTransformer::do_mem_store(const Mem& b, const LinearExpression& val_svalue, const LinearExpression& val_uvalue,
29,706✔
678
                                   const std::optional<Reg>& opt_val_reg) {
679
    if (dom.is_bottom()) {
29,706✔
680
        return;
24,240✔
681
    }
682
    const int width = b.access.width;
29,706✔
683
    const Number offset{b.access.offset};
29,706✔
684
    if (b.access.basereg.v == R10_STACK_POINTER) {
29,706✔
685
        const auto r10_stack_offset = reg_pack(b.access.basereg).stack_offset;
24,240✔
686
        const auto r10_interval = dom.state.values.eval_interval(r10_stack_offset);
24,240✔
687
        if (r10_interval.is_singleton()) {
24,240✔
688
            const int32_t stack_offset = r10_interval.singleton()->cast_to<int32_t>();
24,240✔
689
            const Number base_addr{stack_offset};
24,240✔
690
            do_store_stack(dom.state, stack, base_addr + offset, width, val_svalue, val_uvalue, opt_val_reg);
24,240✔
691
            return;
24,240✔
692
        }
693
    }
694
    dom.state = dom.state.join_over_types(b.access.basereg, [&](TypeToNumDomain& state, const TypeEncoding type) {
10,932✔
695
        if (type == T_STACK) {
5,466✔
696
            const auto base_addr = LinearExpression(reg_pack(b.access.basereg).stack_offset);
162✔
697
            do_store_stack(state, stack, dsl_syntax::operator+(base_addr, offset), width, val_svalue, val_uvalue,
243✔
698
                           opt_val_reg);
699
        }
162✔
700
        // do nothing for any other type
701
    });
10,932✔
702
}
703

704
// Construct a Bin operation that does the main operation that a given Atomic operation does atomically.
705
static Bin atomic_to_bin(const Atomic& a) {
128✔
706
    Bin bin{.dst = Reg{R11_ATOMIC_SCRATCH}, .v = a.valreg, .is64 = a.access.width == sizeof(uint64_t), .lddw = false};
128✔
707
    switch (a.op) {
128✔
708
    case Atomic::Op::ADD: bin.op = Bin::Op::ADD; break;
24✔
709
    case Atomic::Op::OR: bin.op = Bin::Op::OR; break;
24✔
710
    case Atomic::Op::AND: bin.op = Bin::Op::AND; break;
24✔
711
    case Atomic::Op::XOR: bin.op = Bin::Op::XOR; break;
24✔
712
    case Atomic::Op::XCHG:
16✔
713
    case Atomic::Op::CMPXCHG: bin.op = Bin::Op::MOV; break;
16✔
714
    default: throw std::exception();
×
715
    }
716
    return bin;
128✔
717
}
718

719
void EbpfTransformer::operator()(const Atomic& a) {
424✔
720
    if (dom.is_bottom()) {
424✔
721
        return;
296✔
722
    }
723
    if (!dom.state.is_in_group(a.access.basereg, TS_POINTER) || !dom.state.is_in_group(a.valreg, TS_NUM)) {
424✔
724
        return;
×
725
    }
726
    if (!dom.state.may_have_type(a.access.basereg, T_STACK)) {
424✔
727
        // Shared memory regions are volatile so we can just havoc
728
        // any register that will be updated.
729
        if (a.op == Atomic::Op::CMPXCHG) {
296✔
730
            dom.state.havoc_register_except_type(Reg{R0_RETURN_VALUE});
4✔
731
        } else if (a.fetch) {
292✔
732
            dom.state.havoc_register_except_type(a.valreg);
52✔
733
        }
734
        return;
296✔
735
    }
736

737
    // Fetch the current value into the R11 pseudo-register.
738
    constexpr Reg r11{R11_ATOMIC_SCRATCH};
128✔
739
    (*this)(Mem{.access = a.access, .value = r11, .is_load = true});
128✔
740

741
    // Compute the new value in R11.
742
    (*this)(atomic_to_bin(a));
128✔
743

744
    if (a.op == Atomic::Op::CMPXCHG) {
128✔
745
        // For CMPXCHG, store the original value in r0.
746
        (*this)(Mem{.access = a.access, .value = Reg{R0_RETURN_VALUE}, .is_load = true});
20✔
747

748
        // For the destination, there are 3 possibilities:
749
        // 1) dst.value == r0.value : set R11 to valreg
750
        // 2) dst.value != r0.value : don't modify R11
751
        // 3) dst.value may or may not == r0.value : set R11 to the union of R11 and valreg
752
        // For now we just havoc the value of R11.
753
        dom.state.havoc_register_except_type(r11);
20✔
754
    } else if (a.fetch) {
108✔
755
        // For other FETCH operations, store the original value in the src register.
756
        (*this)(Mem{.access = a.access, .value = a.valreg, .is_load = true});
60✔
757
    }
758

759
    // Store the new value back in the original shared memory location.
760
    // Note that do_mem_store() currently doesn't track shared memory values,
761
    // but stack memory values are tracked and are legal here.
762
    (*this)(Mem{.access = a.access, .value = r11, .is_load = false});
128✔
763

764
    // Clear the R11 pseudo-register.
765
    dom.state.havoc_register(r11);
128✔
766
}
767

768
void EbpfTransformer::operator()(const Call& call) {
10,416✔
769
    using namespace dsl_syntax;
5,208✔
770
    if (dom.is_bottom()) {
10,416✔
771
        return;
×
772
    }
773
    std::optional<Reg> maybe_fd_reg{};
10,416✔
774
    for (ArgSingle param : call.singles) {
34,790✔
775
        switch (param.kind) {
24,374✔
776
        case ArgSingle::Kind::MAP_FD: maybe_fd_reg = param.reg; break;
16,651✔
777
        case ArgSingle::Kind::ANYTHING:
9,955✔
778
        case ArgSingle::Kind::MAP_FD_PROGRAMS:
779
        case ArgSingle::Kind::PTR_TO_MAP_KEY:
780
        case ArgSingle::Kind::PTR_TO_MAP_VALUE:
781
        case ArgSingle::Kind::PTR_TO_CTX:
782
            // Do nothing. We don't track the content of relevant memory regions
783
            break;
9,955✔
784
        case ArgSingle::Kind::PTR_TO_STACK:
785
            // Do nothing; the stack is passed as context, not to be modified.
786
            break;
787
        }
788
    }
789
    for (ArgPair param : call.pairs) {
14,918✔
790
        switch (param.kind) {
4,502✔
791
        case ArgPair::Kind::PTR_TO_READABLE_MEM:
1,703✔
792
            // Do nothing. No side effect allowed.
793
            break;
1,703✔
794

795
        case ArgPair::Kind::PTR_TO_WRITABLE_MEM: {
1,096✔
796
            bool store_numbers = true;
1,096✔
797
            auto variable = dom.state.get_type_offset_variable(param.mem);
1,096✔
798
            if (!variable.has_value()) {
1,096✔
799
                // checked by the checker
800
                break;
801
            }
802
            Interval addr = dom.state.values.eval_interval(variable.value());
1,096✔
803
            Interval width = dom.state.values.eval_interval(reg_pack(param.size).svalue);
1,096✔
804

805
            dom.state = dom.state.join_over_types(param.mem, [&](TypeToNumDomain& state, const TypeEncoding type) {
2,192✔
806
                if (type == T_STACK) {
1,096✔
807
                    // Pointer to a memory region that the called function may change,
808
                    // so we must havoc.
809
                    for (const DataKind kind : iterate_kinds()) {
11,968✔
810
                        stack.havoc(state.values, kind, addr, width);
10,880✔
811
                    }
1,088✔
812
                } else {
813
                    store_numbers = false;
8✔
814
                }
815
            });
2,192✔
816
            if (store_numbers) {
1,096✔
817
                // Functions are not allowed to write sensitive data,
818
                // and initialization is guaranteed
819
                stack.store_numbers(addr, width);
1,088✔
820
            }
821
        }
822
        }
823
    }
824

825
    constexpr Reg r0_reg{R0_RETURN_VALUE};
10,416✔
826
    const auto r0_pack = reg_pack(r0_reg);
10,416✔
827
    dom.state.values.havoc(r0_pack.stack_numeric_size);
10,416✔
828
    if (call.is_map_lookup) {
10,416✔
829
        // This is the only way to get a null pointer
830
        if (maybe_fd_reg) {
2,648✔
831
            if (const auto map_type = dom.get_map_type(*maybe_fd_reg)) {
2,648✔
832
                if (thread_local_program_info->platform->get_map_type(*map_type).value_type == EbpfMapValueType::MAP) {
2,646✔
833
                    if (const auto inner_map_fd = dom.get_map_inner_map_fd(*maybe_fd_reg)) {
32✔
834
                        do_load_mapfd(r0_reg, to_signed(*inner_map_fd), true);
32✔
835
                        goto out;
32✔
836
                    }
837
                } else {
838
                    assign_valid_ptr(r0_reg, true);
2,614✔
839
                    dom.state.values.assign(r0_pack.shared_offset, 0);
2,614✔
840
                    dom.state.values.set(r0_pack.shared_region_size, dom.get_map_value_size(*maybe_fd_reg));
2,614✔
841
                    dom.state.assign_type(r0_reg, T_SHARED);
2,615✔
842
                }
843
            }
844
        }
845
        assign_valid_ptr(r0_reg, true);
2,616✔
846
        dom.state.values.assign(r0_pack.shared_offset, 0);
2,616✔
847
        dom.state.assign_type(r0_reg, T_SHARED);
2,616✔
848
    } else {
849
        dom.state.havoc_register_except_type(r0_reg);
7,768✔
850
        dom.state.assign_type(r0_reg, T_NUM);
7,768✔
851
        // dom.state.values.add_constraint(r0_pack.value < 0); for INTEGER_OR_NO_RETURN_IF_SUCCEED.
852
    }
853
out:
10,416✔
854
    scratch_caller_saved_registers();
10,416✔
855
    if (call.reallocate_packet) {
10,416✔
856
        forget_packet_pointers();
40✔
857
    }
858
}
859

860
void EbpfTransformer::operator()(const CallLocal& call) {
58✔
861
    using namespace dsl_syntax;
29✔
862
    if (dom.is_bottom()) {
58✔
863
        return;
×
864
    }
865
    save_callee_saved_registers(call.stack_frame_prefix);
58✔
866

867
    // Update r10.
868
    constexpr Reg r10_reg{R10_STACK_POINTER};
58✔
869
    add(r10_reg, -EBPF_SUBPROGRAM_STACK_SIZE, 64);
58✔
870
}
871

872
void EbpfTransformer::operator()(const Callx& callx) {
22✔
873
    using namespace dsl_syntax;
11✔
874
    if (dom.is_bottom()) {
22✔
875
        return;
×
876
    }
877

878
    // Look up the helper function id.
879
    const RegPack& reg = reg_pack(callx.func);
22✔
880
    const auto src_interval = dom.state.values.eval_interval(reg.svalue);
22✔
881
    if (const auto sn = src_interval.singleton()) {
22✔
882
        if (sn->fits<int32_t>()) {
22✔
883
            // We can now process it as if the id was immediate.
884
            const int32_t imm = sn->cast_to<int32_t>();
22✔
885
            if (!thread_local_program_info->platform->is_helper_usable(imm)) {
22✔
886
                return;
×
887
            }
888
            const Call call = make_call(imm, *thread_local_program_info->platform);
22✔
889
            (*this)(call);
22✔
890
        }
22✔
891
    }
892
}
893

894
void EbpfTransformer::do_load_mapfd(const Reg& dst_reg, const int mapfd, const bool maybe_null) {
5,060✔
895
    const EbpfMapDescriptor& desc = thread_local_program_info->platform->get_map_descriptor(mapfd);
5,060✔
896
    const EbpfMapType& type = thread_local_program_info->platform->get_map_type(desc.type);
5,060✔
897
    const RegPack& dst = reg_pack(dst_reg);
5,060✔
898
    if (type.value_type == EbpfMapValueType::PROGRAM) {
5,060✔
899
        dom.state.assign_type(dst_reg, T_MAP_PROGRAMS);
546✔
900
        dom.state.values.assign(dst.map_fd_programs, mapfd);
819✔
901
    } else {
902
        dom.state.assign_type(dst_reg, T_MAP);
4,514✔
903
        dom.state.values.assign(dst.map_fd, mapfd);
6,771✔
904
    }
905
    assign_valid_ptr(dst_reg, maybe_null);
5,060✔
906
}
5,060✔
907

908
void EbpfTransformer::operator()(const LoadMapFd& ins) {
5,028✔
909
    if (dom.is_bottom()) {
5,028✔
910
        return;
911
    }
912
    do_load_mapfd(ins.dst, ins.mapfd, false);
5,028✔
913
}
914

915
void EbpfTransformer::do_load_map_address(const Reg& dst_reg, const int mapfd, const int32_t offset) {
290✔
916
    const EbpfMapDescriptor& desc = thread_local_program_info->platform->get_map_descriptor(mapfd);
290✔
917
    const EbpfMapType& type = thread_local_program_info->platform->get_map_type(desc.type);
290✔
918

919
    if (type.value_type == EbpfMapValueType::PROGRAM) {
290✔
920
        throw std::invalid_argument("Cannot load address of program map type - only data maps are supported");
×
921
    }
922

923
    // Set the shared region size and offset for the map.
924
    dom.state.assign_type(dst_reg, T_SHARED);
290✔
925
    const RegPack& dst = reg_pack(dst_reg);
290✔
926
    dom.state.values.assign(dst.shared_offset, offset);
290✔
927
    dom.state.values.assign(dst.shared_region_size, desc.value_size);
290✔
928
    assign_valid_ptr(dst_reg, false);
290✔
929
}
290✔
930

931
void EbpfTransformer::operator()(const LoadMapAddress& ins) {
290✔
932
    if (dom.is_bottom()) {
290✔
933
        return;
934
    }
935
    do_load_map_address(ins.dst, ins.mapfd, ins.offset);
290✔
936
}
937

938
void EbpfTransformer::assign_valid_ptr(const Reg& dst_reg, const bool maybe_null) {
10,580✔
939
    using namespace dsl_syntax;
5,290✔
940
    const RegPack& reg = reg_pack(dst_reg);
10,580✔
941
    dom.state.values.havoc(reg.svalue);
10,580✔
942
    dom.state.values.havoc(reg.uvalue);
10,580✔
943
    if (maybe_null) {
10,580✔
944
        dom.state.values.add_constraint(0 <= reg.svalue);
7,893✔
945
    } else {
946
        dom.state.values.add_constraint(0 < reg.svalue);
7,977✔
947
    }
948
    dom.state.values.add_constraint(reg.svalue <= PTR_MAX);
15,870✔
949
    dom.state.values.assign(reg.uvalue, reg.svalue);
10,580✔
950
}
10,580✔
951

952
// If nothing is known of the stack_numeric_size,
953
// try to recompute the stack_numeric_size.
954
void EbpfTransformer::recompute_stack_numeric_size(TypeToNumDomain& state, const ArrayDomain& stack,
11,522✔
955
                                                   const Variable type_variable) {
956
    const Variable stack_numeric_size_variable =
5,761✔
957
        variable_registry->kind_var(DataKind::stack_numeric_sizes, type_variable);
11,522✔
958

959
    if (state.may_have_type(type_variable, T_STACK)) {
11,522✔
960
        const int numeric_size =
5,275✔
961
            stack.min_all_num_size(state.values, variable_registry->kind_var(DataKind::stack_offsets, type_variable));
10,550✔
962
        if (numeric_size > 0) {
10,550✔
963
            state.values.assign(stack_numeric_size_variable, numeric_size);
12,969✔
964
        }
965
    }
966
}
11,522✔
967

968
void EbpfTransformer::recompute_stack_numeric_size(TypeToNumDomain& state, ArrayDomain& stack, const Reg& reg) {
8,908✔
969
    recompute_stack_numeric_size(state, stack, reg_type(reg));
8,908✔
970
}
8,908✔
971

972
void EbpfTransformer::add(const Reg& dst_reg, const int imm, const int finite_width) {
11,794✔
973
    const auto dst = reg_pack(dst_reg);
11,794✔
974
    dom.state.values->add_overflow(dst.svalue, dst.uvalue, imm, finite_width);
11,794✔
975
    if (const auto offset = dom.state.get_type_offset_variable(dst_reg)) {
11,794✔
976
        dom.state.values->add(*offset, imm);
8,902✔
977
        if (imm > 0) {
8,902✔
978
            // Since the start offset is increasing but
979
            // the end offset is not, the numeric size decreases.
980
            dom.state.values->sub(dst.stack_numeric_size, imm);
996✔
981
        } else if (imm < 0) {
7,906✔
982
            dom.state.values.havoc(dst.stack_numeric_size);
7,906✔
983
        }
984
        recompute_stack_numeric_size(dom.state, stack, dst_reg);
8,902✔
985
    }
986
}
11,794✔
987

988
void EbpfTransformer::shl(const Reg& dst_reg, int imm, const int finite_width) {
5,612✔
989
    dom.state.havoc_offsets(dst_reg);
5,612✔
990

991
    // The BPF ISA requires masking the imm.
992
    imm &= finite_width - 1;
5,612✔
993
    const RegPack dst = reg_pack(dst_reg);
5,612✔
994
    if (dom.state.is_in_group(dst_reg, TS_NUM)) {
5,612✔
995
        dom.state.values->shl(dst.svalue, dst.uvalue, imm, finite_width);
5,612✔
996
    } else {
997
        dom.state.values.havoc(dst.svalue);
×
998
        dom.state.values.havoc(dst.uvalue);
×
999
    }
1000
}
5,612✔
1001

1002
void EbpfTransformer::lshr(const Reg& dst_reg, int imm, int finite_width) {
2,620✔
1003
    dom.state.havoc_offsets(dst_reg);
2,620✔
1004

1005
    // The BPF ISA requires masking the imm.
1006
    imm &= finite_width - 1;
2,620✔
1007
    const RegPack dst = reg_pack(dst_reg);
2,620✔
1008
    if (dom.state.is_in_group(dst_reg, TS_NUM)) {
2,620✔
1009
        dom.state.values->lshr(dst.svalue, dst.uvalue, imm, finite_width);
2,620✔
1010
    } else {
1011
        dom.state.values.havoc(dst.svalue);
×
1012
        dom.state.values.havoc(dst.uvalue);
×
1013
    }
1014
}
2,620✔
1015

1016
void EbpfTransformer::ashr(const Reg& dst_reg, const LinearExpression& right_svalue, const int finite_width) {
772✔
1017
    dom.state.havoc_offsets(dst_reg);
772✔
1018

1019
    const RegPack dst = reg_pack(dst_reg);
772✔
1020
    if (dom.state.is_in_group(dst_reg, TS_NUM)) {
772✔
1021
        dom.state.values->ashr(dst.svalue, dst.uvalue, right_svalue, finite_width);
772✔
1022
    } else {
1023
        dom.state.values.havoc(dst.svalue);
×
1024
        dom.state.values.havoc(dst.uvalue);
×
1025
    }
1026
}
772✔
1027

1028
static int _movsx_bits(const Bin::Op op) {
2,224✔
1029
    switch (op) {
2,224✔
1030
    case Bin::Op::MOVSX8: return 8;
11✔
1031
    case Bin::Op::MOVSX16: return 16;
11✔
1032
    case Bin::Op::MOVSX32: return 32;
1,090✔
1033
    default: throw std::exception();
×
1034
    }
1035
}
1036

1037
void EbpfTransformer::operator()(const Bin& bin) {
101,184✔
1038
    if (dom.is_bottom()) {
101,184✔
1039
        return;
36✔
1040
    }
1041
    using namespace dsl_syntax;
50,592✔
1042

1043
    auto dst = reg_pack(bin.dst);
101,184✔
1044
    int finite_width = bin.is64 ? 64 : 32;
101,184✔
1045

1046
    // TODO: Unusable states and values should be better handled.
1047
    //       Probably by propagating an error state.
1048
    if (!dom.state.is_initialized(bin.dst) &&
132,620✔
1049
        !std::set{Bin::Op::MOV, Bin::Op::MOVSX8, Bin::Op::MOVSX16, Bin::Op::MOVSX32}.contains(bin.op)) {
179,774✔
1050
        dom.state.havoc_register(bin.dst);
×
1051
        return;
×
1052
    }
1053
    if (auto pimm = std::get_if<Imm>(&bin.v)) {
101,184✔
1054
        // dst += K
1055
        int64_t imm;
30,472✔
1056
        if (bin.is64) {
60,944✔
1057
            // Use the full signed value.
1058
            imm = to_signed(pimm->v);
50,644✔
1059
        } else {
1060
            // Use only the low 32 bits of the value.
1061
            imm = gsl::narrow_cast<int32_t>(pimm->v);
10,300✔
1062
            // If this is a 32-bit operation and the destination is not proven a number, forget the register.
1063
            if (dom.state.is_in_group(bin.dst, TS_NUM)) {
10,300✔
1064
                // Safe to zero-extend the low 32 bits; even if it's also bin.src, only the 32-bit value is used.
1065
                dom.state.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
7,550✔
1066
            } else {
1067
                dom.state.havoc_register(bin.dst);
2,750✔
1068
            }
1069
        }
1070
        switch (bin.op) {
60,944✔
1071
        case Bin::Op::MOV:
35,090✔
1072
            dom.state.values.assign(dst.svalue, imm);
35,090✔
1073
            dom.state.values.assign(dst.uvalue, imm);
35,090✔
1074
            dom.state.values->overflow_bounds(dst.uvalue, bin.is64 ? 64 : 32, false);
38,052✔
1075
            dom.state.assign_type(bin.dst, T_NUM);
35,090✔
1076
            dom.state.havoc_offsets(bin.dst);
35,090✔
1077
            break;
17,545✔
1078
        case Bin::Op::MOVSX8:
×
1079
        case Bin::Op::MOVSX16:
1080
        case Bin::Op::MOVSX32: CRAB_ERROR("Unsupported operation");
×
1081
        case Bin::Op::ADD:
11,654✔
1082
            if (imm == 0) {
11,654✔
1083
                return;
21✔
1084
            }
1085
            add(bin.dst, gsl::narrow<int>(imm), finite_width);
11,650✔
1086
            break;
5,825✔
1087
        case Bin::Op::SUB:
32✔
1088
            if (imm == 0) {
32✔
1089
                return;
1✔
1090
            }
1091
            add(bin.dst, gsl::narrow<int>(-imm), finite_width);
30✔
1092
            break;
15✔
1093
        case Bin::Op::MUL:
106✔
1094
            dom.state.values->mul(dst.svalue, dst.uvalue, imm, finite_width);
106✔
1095
            dom.state.havoc_offsets(bin.dst);
106✔
1096
            break;
53✔
1097
        case Bin::Op::UDIV:
172✔
1098
            dom.state.values->udiv(dst.svalue, dst.uvalue, imm, finite_width);
172✔
1099
            dom.state.havoc_offsets(bin.dst);
172✔
1100
            break;
86✔
1101
        case Bin::Op::UMOD:
26✔
1102
            dom.state.values->urem(dst.svalue, dst.uvalue, imm, finite_width);
26✔
1103
            dom.state.havoc_offsets(bin.dst);
26✔
1104
            break;
13✔
1105
        case Bin::Op::SDIV:
12✔
1106
            dom.state.values->sdiv(dst.svalue, dst.uvalue, imm, finite_width);
12✔
1107
            dom.state.havoc_offsets(bin.dst);
12✔
1108
            break;
6✔
1109
        case Bin::Op::SMOD:
28✔
1110
            dom.state.values->srem(dst.svalue, dst.uvalue, imm, finite_width);
28✔
1111
            dom.state.havoc_offsets(bin.dst);
28✔
1112
            break;
14✔
1113
        case Bin::Op::OR:
560✔
1114
            dom.state.values->bitwise_or(dst.svalue, dst.uvalue, imm);
560✔
1115
            dom.state.havoc_offsets(bin.dst);
560✔
1116
            break;
280✔
1117
        case Bin::Op::AND:
4,204✔
1118
            // FIX: what to do with ptr&-8 as in counter/simple_loop_unrolled?
1119
            dom.state.values->bitwise_and(dst.svalue, dst.uvalue, imm);
4,204✔
1120
            if (gsl::narrow<int32_t>(imm) > 0) {
4,204✔
1121
                // AND with immediate is only a 32-bit operation so svalue and uvalue are the same.
1122
                dom.state.values.add_constraint(dst.svalue <= imm);
5,451✔
1123
                dom.state.values.add_constraint(dst.uvalue <= imm);
5,451✔
1124
                dom.state.values.add_constraint(0 <= dst.svalue);
5,451✔
1125
                dom.state.values.add_constraint(0 <= dst.uvalue);
5,451✔
1126
            }
1127
            dom.state.havoc_offsets(bin.dst);
4,204✔
1128
            break;
2,102✔
1129
        case Bin::Op::LSH: shl(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
5,558✔
1130
        case Bin::Op::RSH: lshr(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
2,582✔
1131
        case Bin::Op::ARSH: ashr(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
1,104✔
1132
        case Bin::Op::XOR:
184✔
1133
            dom.state.values->bitwise_xor(dst.svalue, dst.uvalue, imm);
184✔
1134
            dom.state.havoc_offsets(bin.dst);
184✔
1135
            break;
92✔
1136
        }
1137
    } else {
1138
        // dst op= src
1139
        auto src_reg = std::get<Reg>(bin.v);
40,240✔
1140
        auto src = reg_pack(src_reg);
40,240✔
1141
        if (!dom.state.is_initialized(src_reg)) {
40,240✔
1142
            dom.state.havoc_register(bin.dst);
8✔
1143
            return;
34✔
1144
        }
1145
        switch (bin.op) {
40,232✔
1146
        case Bin::Op::ADD: {
2,084✔
1147
            if (dom.state.same_type(bin.dst, std::get<Reg>(bin.v))) {
2,084✔
1148
                // both must have been checked to be numbers
1149
                dom.state.values->add_overflow(dst.svalue, dst.uvalue, src.svalue, finite_width);
1,482✔
1150
            } else {
1151
                // Here we're not sure that lhs and rhs are the same type; they might be.
1152
                // But previous assertions should fail unless we know that exactly one of lhs or rhs is a pointer.
1153
                dom.state =
903✔
1154
                    dom.state.join_over_types(bin.dst, [&](TypeToNumDomain& state, const TypeEncoding dst_type) {
1,204✔
1155
                        state =
302✔
1156
                            state.join_over_types(src_reg, [&](TypeToNumDomain& state, const TypeEncoding src_type) {
1,208✔
1157
                                if (dst_type == T_NUM && src_type != T_NUM) {
606✔
1158
                                    // num += ptr
1159
                                    state.assign_type(bin.dst, src_type);
8✔
1160
                                    if (const auto dst_offset = get_type_offset_variable(bin.dst, src_type)) {
8✔
1161
                                        state.values->apply(ArithBinOp::ADD, dst_offset.value(), dst.svalue,
12✔
1162
                                                            get_type_offset_variable(src_reg, src_type).value());
16✔
1163
                                    }
1164
                                    if (src_type == T_SHARED) {
8✔
UNCOV
1165
                                        state.values.assign(dst.shared_region_size, src.shared_region_size);
×
1166
                                    }
1167
                                } else if (dst_type != T_NUM && src_type == T_NUM) {
602✔
1168
                                    // ptr += num
1169
                                    state.assign_type(bin.dst, dst_type);
598✔
1170
                                    if (const auto dst_offset = get_type_offset_variable(bin.dst, dst_type)) {
598✔
1171
                                        state.values->apply(ArithBinOp::ADD, dst_offset.value(), dst_offset.value(),
897✔
1172
                                                            src.svalue);
598✔
1173
                                        if (dst_type == T_STACK) {
598✔
1174
                                            // Reduce the numeric size.
1175
                                            using namespace dsl_syntax;
9✔
1176
                                            if (state.values.intersect(src.svalue < 0)) {
27✔
1177
                                                state.values.havoc(dst.stack_numeric_size);
2✔
1178
                                                recompute_stack_numeric_size(state, stack, bin.dst);
2✔
1179
                                            } else {
1180
                                                state.values->apply_signed(ArithBinOp::SUB, dst.stack_numeric_size,
32✔
1181
                                                                           dst.stack_numeric_size,
8✔
1182
                                                                           dst.stack_numeric_size, src.svalue, 0);
16✔
1183
                                            }
1184
                                        }
1185
                                    }
1186
                                } else if (dst_type == T_NUM && src_type == T_NUM) {
598✔
1187
                                    // dst and src don't necessarily have the same type, but among the possibilities
1188
                                    // enumerated is the case where they are both numbers.
UNCOV
1189
                                    state.values->apply_signed(ArithBinOp::ADD, dst.svalue, dst.uvalue, dst.svalue,
×
UNCOV
1190
                                                               src.svalue, finite_width);
×
1191
                                } else {
1192
                                    // We ignore the cases here that do not match the assumption described
1193
                                    // above.  Joining bottom with another result will leave the other
1194
                                    // results unchanged.
UNCOV
1195
                                    state.values.set_to_bottom();
×
1196
                                }
1197
                            });
1,210✔
1198
                    });
1,206✔
1199
                // careful: change dst.value only after dealing with offset
1200
                dom.state.values->apply_signed(ArithBinOp::ADD, dst.svalue, dst.uvalue, dst.svalue, src.svalue,
602✔
1201
                                               finite_width);
1202
            }
1203
            break;
1,042✔
1204
        }
1205
        case Bin::Op::SUB: {
724✔
1206
            if (dom.state.same_type(bin.dst, std::get<Reg>(bin.v))) {
724✔
1207
                // src and dest have the same type.
1208
                TypeDomain tmp_m_type_inv = dom.state.types;
720✔
1209
                dom.state = dom.state.join_over_types(bin.dst, [&](TypeToNumDomain& state, const TypeEncoding type) {
1,440✔
1210
                    switch (type) {
726✔
1211
                    case T_NUM:
530✔
1212
                        // This is: sub_overflow(inv, dst.value, src.value, finite_width);
1213
                        state.values->apply_signed(ArithBinOp::SUB, dst.svalue, dst.uvalue, dst.svalue, src.svalue,
530✔
1214
                                                   finite_width);
1215
                        state.assign_type(bin.dst, T_NUM);
530✔
1216
                        state.havoc_offsets(bin.dst);
530✔
1217
                        break;
530✔
1218
                    default:
196✔
1219
                        // ptr -= ptr
1220
                        // Assertions should make sure we only perform this on non-shared pointers.
1221
                        if (const auto dst_offset = get_type_offset_variable(bin.dst, type)) {
196✔
1222
                            state.values->apply_signed(ArithBinOp::SUB, dst.svalue, dst.uvalue, dst_offset.value(),
392✔
1223
                                                       get_type_offset_variable(src_reg, type).value(), finite_width);
294✔
1224
                            state.values.havoc(dst_offset.value());
196✔
1225
                        }
1226
                        state.havoc_offsets(bin.dst);
196✔
1227
                        state.assign_type(bin.dst, T_NUM);
196✔
1228
                        break;
98✔
1229
                    }
1230
                });
1,083✔
1231
            } else {
720✔
1232
                // We're not sure that lhs and rhs are the same type.
1233
                // Either they're different, or at least one is not a singleton.
1234
                if (dom.state.is_in_group(std::get<Reg>(bin.v), TS_NUM)) {
4✔
1235
                    dom.state.values->sub_overflow(dst.svalue, dst.uvalue, src.svalue, finite_width);
4✔
1236
                    if (auto dst_offset = dom.state.get_type_offset_variable(bin.dst)) {
4✔
1237
                        dom.state.values->sub(dst_offset.value(), src.svalue);
4✔
1238
                        if (dom.state.may_have_type(bin.dst, T_STACK)) {
4✔
1239
                            // Reduce the numeric size.
1240
                            using namespace dsl_syntax;
2✔
1241
                            if (dom.state.values.intersect(src.svalue > 0)) {
8✔
1242
                                dom.state.values.havoc(dst.stack_numeric_size);
4✔
1243
                                recompute_stack_numeric_size(dom.state, stack, bin.dst);
4✔
1244
                            } else {
UNCOV
1245
                                dom.state.values->apply(ArithBinOp::ADD, dst.stack_numeric_size, dst.stack_numeric_size,
×
1246
                                                        src.svalue);
1247
                            }
1248
                        }
1249
                    }
1250
                } else {
UNCOV
1251
                    dom.state.havoc_register(bin.dst);
×
1252
                }
1253
            }
1254
            break;
362✔
1255
        }
1256
        case Bin::Op::MUL:
76✔
1257
            dom.state.values->mul(dst.svalue, dst.uvalue, src.svalue, finite_width);
76✔
1258
            dom.state.havoc_offsets(bin.dst);
76✔
1259
            break;
38✔
1260
        case Bin::Op::UDIV:
96✔
1261
            dom.state.values->udiv(dst.svalue, dst.uvalue, src.uvalue, finite_width);
96✔
1262
            dom.state.havoc_offsets(bin.dst);
96✔
1263
            break;
48✔
1264
        case Bin::Op::UMOD:
38✔
1265
            dom.state.values->urem(dst.svalue, dst.uvalue, src.uvalue, finite_width);
38✔
1266
            dom.state.havoc_offsets(bin.dst);
38✔
1267
            break;
19✔
1268
        case Bin::Op::SDIV:
28✔
1269
            dom.state.values->sdiv(dst.svalue, dst.uvalue, src.svalue, finite_width);
28✔
1270
            dom.state.havoc_offsets(bin.dst);
28✔
1271
            break;
14✔
1272
        case Bin::Op::SMOD:
42✔
1273
            dom.state.values->srem(dst.svalue, dst.uvalue, src.svalue, finite_width);
42✔
1274
            dom.state.havoc_offsets(bin.dst);
42✔
1275
            break;
21✔
1276
        case Bin::Op::OR:
5,760✔
1277
            dom.state.values->bitwise_or(dst.svalue, dst.uvalue, src.uvalue, finite_width);
5,760✔
1278
            dom.state.havoc_offsets(bin.dst);
5,760✔
1279
            break;
2,880✔
1280
        case Bin::Op::AND:
504✔
1281
            dom.state.values->bitwise_and(dst.svalue, dst.uvalue, src.uvalue, finite_width);
504✔
1282
            dom.state.havoc_offsets(bin.dst);
504✔
1283
            break;
252✔
1284
        case Bin::Op::LSH:
536✔
1285
            if (dom.state.is_in_group(src_reg, TS_NUM)) {
536✔
1286
                auto src_interval = dom.state.values.eval_interval(src.uvalue);
536✔
1287
                if (std::optional<Number> sn = src_interval.singleton()) {
536✔
1288
                    // truncate to uint64?
1289
                    uint64_t imm = sn->cast_to<uint64_t>() & (bin.is64 ? 63 : 31);
54✔
1290
                    if (imm <= std::numeric_limits<int32_t>::max()) {
54✔
1291
                        if (!bin.is64) {
54✔
1292
                            // Use only the low 32 bits of the value.
1293
                            dom.state.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
30✔
1294
                        }
1295
                        shl(bin.dst, gsl::narrow_cast<int32_t>(imm), finite_width);
54✔
1296
                        break;
54✔
1297
                    }
1298
                }
1299
            }
1300
            dom.state.values->shl_overflow(dst.svalue, dst.uvalue, src.uvalue);
482✔
1301
            dom.state.havoc_offsets(bin.dst);
482✔
1302
            break;
241✔
1303
        case Bin::Op::RSH:
82✔
1304
            if (dom.state.is_in_group(src_reg, TS_NUM)) {
82✔
1305
                auto src_interval = dom.state.values.eval_interval(src.uvalue);
82✔
1306
                if (std::optional<Number> sn = src_interval.singleton()) {
82✔
1307
                    uint64_t imm = sn->cast_to<uint64_t>() & (bin.is64 ? 63 : 31);
38✔
1308
                    if (imm <= std::numeric_limits<int32_t>::max()) {
38✔
1309
                        if (!bin.is64) {
38✔
1310
                            // Use only the low 32 bits of the value.
1311
                            dom.state.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
18✔
1312
                        }
1313
                        lshr(bin.dst, gsl::narrow_cast<int32_t>(imm), finite_width);
38✔
1314
                        break;
38✔
1315
                    }
1316
                }
1317
            }
1318
            dom.state.havoc_register_except_type(bin.dst);
44✔
1319
            break;
22✔
1320
        case Bin::Op::ARSH:
36✔
1321
            if (dom.state.is_in_group(src_reg, TS_NUM)) {
36✔
1322
                ashr(bin.dst, src.svalue, finite_width);
36✔
1323
                break;
36✔
1324
            }
UNCOV
1325
            dom.state.havoc_register_except_type(bin.dst);
×
1326
            break;
1327
        case Bin::Op::XOR:
502✔
1328
            dom.state.values->bitwise_xor(dst.svalue, dst.uvalue, src.uvalue, finite_width);
502✔
1329
            dom.state.havoc_offsets(bin.dst);
502✔
1330
            break;
251✔
1331
        case Bin::Op::MOVSX8:
2,224✔
1332
        case Bin::Op::MOVSX16:
1,112✔
1333
        case Bin::Op::MOVSX32: {
1,112✔
1334
            const int source_width = _movsx_bits(bin.op);
2,224✔
1335
            // Keep relational information if operation is a no-op.
1336
            if (dst.svalue == src.svalue &&
3,311✔
1337
                dom.state.values.eval_interval(dst.svalue) <= Interval::signed_int(source_width)) {
4,398✔
1338
                return;
11✔
1339
            }
1340
            if (dom.state.is_in_group(src_reg, TS_NUM)) {
2,202✔
1341
                dom.state.havoc_offsets(bin.dst);
2,202✔
1342
                dom.state.assign_type(bin.dst, T_NUM);
2,202✔
1343
                dom.state.values->sign_extend(dst.svalue, dst.uvalue, src.svalue, finite_width, source_width);
2,202✔
1344
                break;
2,202✔
1345
            }
UNCOV
1346
            dom.state.havoc_register(bin.dst);
×
1347
            break;
1348
        }
1349
        case Bin::Op::MOV:
27,500✔
1350
            // Keep relational information if operation is a no-op.
1351
            if (bin.is64 || dom.state.is_in_group(src_reg, TS_NUM)) {
27,500✔
1352
                if (bin.dst != src_reg) {
46,924✔
1353
                    // the 32bit case is handled below
1354
                    dom.state.assign(bin.dst, src_reg);
26,138✔
1355
                }
1356
            } else {
1357
                // If src is not a number, we don't know how to truncate a pointer.
UNCOV
1358
                dom.state.havoc_register(bin.dst);
×
1359
            }
1360
            break;
13,750✔
1361
        }
1362
    }
1363
    if (!bin.is64) {
101,148✔
1364
        dom.state.values->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
17,034✔
1365
    }
1366
}
1367

1368
void EbpfTransformer::initialize_loop_counter(const Label& label) {
40✔
1369
    dom.state.values.assign(variable_registry->loop_counter(to_string(label)), 0);
60✔
1370
}
40✔
1371

1372
void EbpfTransformer::operator()(const IncrementLoopCounter& ins) {
206✔
1373
    if (dom.is_bottom()) {
206✔
UNCOV
1374
        return;
×
1375
    }
1376
    const auto counter = variable_registry->loop_counter(to_string(ins.name));
206✔
1377
    dom.state.values->add(counter, 1);
206✔
1378
}
1379

1380
void ebpf_domain_initialize_loop_counter(EbpfDomain& dom, const Label& label) {
40✔
1381
    EbpfTransformer{dom}.initialize_loop_counter(label);
40✔
1382
}
40✔
1383
} // namespace prevail
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc