• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

Alan-Jowett / ebpf-verifier / 15194704016

22 May 2025 08:53AM UTC coverage: 88.11% (-0.07%) from 88.177%
15194704016

push

github

elazarg
uniform class names and explicit constructors for adapt_sgraph.hpp

Signed-off-by: Elazar Gershuni <elazarg@gmail.com>

27 of 30 new or added lines in 1 file covered. (90.0%)

481 existing lines in 33 files now uncovered.

8552 of 9706 relevant lines covered (88.11%)

9089054.61 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.25
/src/crab/ebpf_transformer.cpp
1
// Copyright (c) Prevail Verifier contributors.
2
// SPDX-License-Identifier: MIT
3

4
// This file is eBPF-specific, not derived from CRAB.
5

6
#include <bitset>
7
#include <optional>
8
#include <utility>
9
#include <vector>
10

11
#include "boost/endian/conversion.hpp"
12

13
#include "asm_unmarshal.hpp"
14
#include "config.hpp"
15
#include "crab/array_domain.hpp"
16
#include "crab/dsl_syntax.hpp"
17
#include "crab/ebpf_domain.hpp"
18
#include "crab_utils/num_safety.hpp"
19
#include "platform.hpp"
20
#include "string_constraints.hpp"
21

22
namespace prevail {
23

24
class EbpfTransformer final {
25
    EbpfDomain& dom;
26
    // shorthands:
27
    NumAbsDomain& m_inv;
28
    ArrayDomain& stack;
29
    TypeDomain& type_inv;
30

31
  public:
32
    explicit EbpfTransformer(EbpfDomain& dom) : dom(dom), m_inv(dom.m_inv), stack(dom.stack), type_inv(dom.type_inv) {}
339,168✔
33

34
    // abstract transformers
35
    void operator()(const Assume&);
36
    void operator()(const Atomic&);
37
    void operator()(const Bin&);
38
    void operator()(const Call&);
39
    void operator()(const CallLocal&);
40
    void operator()(const Callx&);
41
    void operator()(const Exit&);
42
    void operator()(const IncrementLoopCounter&);
43
    void operator()(const Jmp&) const;
44
    void operator()(const LoadMapFd&);
45
    void operator()(const LoadMapAddress&);
46
    void operator()(const Mem&);
47
    void operator()(const Packet&);
48
    void operator()(const Un&);
49
    void operator()(const Undefined&);
50

51
    void initialize_loop_counter(const Label& label);
52

53
  private:
54
    /// Forget everything about all offset variables for a given register.
55
    void havoc_offsets(const Reg& reg);
56

57
    void scratch_caller_saved_registers();
58
    void save_callee_saved_registers(const std::string& prefix);
59
    void restore_callee_saved_registers(const std::string& prefix);
60
    void havoc_subprogram_stack(const std::string& prefix);
61
    void forget_packet_pointers();
62
    void do_load_mapfd(const Reg& dst_reg, int mapfd, bool maybe_null);
63
    void do_load_map_address(const Reg& dst_reg, const int mapfd, int32_t offset);
64

65
    void assign_valid_ptr(const Reg& dst_reg, bool maybe_null);
66

67
    void recompute_stack_numeric_size(NumAbsDomain& inv, const Reg& reg) const;
68
    void recompute_stack_numeric_size(NumAbsDomain& inv, Variable type_variable) const;
69
    void do_load_stack(NumAbsDomain& inv, const Reg& target_reg, const LinearExpression& addr, int width,
70
                       const Reg& src_reg);
71
    void do_load_ctx(NumAbsDomain& inv, const Reg& target_reg, const LinearExpression& addr_vague, int width);
72
    void do_load_packet_or_shared(NumAbsDomain& inv, const Reg& target_reg, const LinearExpression& addr, int width);
73
    void do_load(const Mem& b, const Reg& target_reg);
74

75
    void do_store_stack(NumAbsDomain& inv, const LinearExpression& addr, int width, const LinearExpression& val_type,
76
                        const LinearExpression& val_svalue, const LinearExpression& val_uvalue,
77
                        const std::optional<RegPack>& opt_val_reg);
78

79
    void do_mem_store(const Mem& b, const LinearExpression& val_type, const LinearExpression& val_svalue,
80
                      const LinearExpression& val_uvalue, const std::optional<RegPack>& opt_val_reg);
81

82
    void add(const Reg& dst_reg, int imm, int finite_width);
83
    void shl(const Reg& dst_reg, int imm, int finite_width);
84
    void lshr(const Reg& dst_reg, int imm, int finite_width);
85
    void ashr(const Reg& dst_reg, const LinearExpression& right_svalue, int finite_width);
86
    void sign_extend(const Reg& dst_reg, const LinearExpression& right_svalue, int target_width, int source_width);
87
}; // end EbpfDomain
88

89
void ebpf_domain_transform(EbpfDomain& inv, const Instruction& ins) {
341,660✔
90
    if (inv.is_bottom()) {
341,660✔
91
        return;
2,524✔
92
    }
93
    const auto pre = inv;
339,136✔
94
    std::visit(EbpfTransformer{inv}, ins);
339,136✔
95
    if (inv.is_bottom() && !std::holds_alternative<Assume>(ins)) {
339,134✔
96
        // Fail. raise an exception to stop the analysis.
UNCOV
97
        std::stringstream msg;
×
UNCOV
98
        msg << "Bug! pre-invariant:\n"
×
99
            << pre << "\n followed by instruction: " << ins << "\n"
×
100
            << "leads to bottom";
×
101
        throw std::logic_error(msg.str());
×
102
    }
×
103
}
339,135✔
104

105
static void havoc_offsets(NumAbsDomain& inv, const Reg& reg) {
272,732✔
106
    const RegPack r = reg_pack(reg);
272,732✔
107
    inv.havoc(r.ctx_offset);
272,732✔
108
    inv.havoc(r.map_fd);
272,732✔
109
    inv.havoc(r.packet_offset);
272,732✔
110
    inv.havoc(r.shared_offset);
272,732✔
111
    inv.havoc(r.shared_region_size);
272,732✔
112
    inv.havoc(r.stack_offset);
272,732✔
113
    inv.havoc(r.stack_numeric_size);
272,732✔
114
}
272,732✔
115

116
static void havoc_register(NumAbsDomain& inv, const Reg& reg) {
130,320✔
117
    const RegPack r = reg_pack(reg);
130,320✔
118
    havoc_offsets(inv, reg);
130,320✔
119
    inv.havoc(r.svalue);
130,320✔
120
    inv.havoc(r.uvalue);
130,320✔
121
}
130,320✔
122

123
void EbpfTransformer::scratch_caller_saved_registers() {
20,026✔
124
    for (int i = R1_ARG; i <= R5_ARG; i++) {
120,156✔
125
        Reg r{gsl::narrow<uint8_t>(i)};
100,130✔
126
        havoc_register(m_inv, r);
100,130✔
127
        type_inv.havoc_type(m_inv, r);
100,130✔
128
    }
129
}
20,026✔
130

131
void EbpfTransformer::save_callee_saved_registers(const std::string& prefix) {
58✔
132
    // Create variables specific to the new call stack frame that store
133
    // copies of the states of r6 through r9.
134
    for (int r = R6; r <= R9; r++) {
290✔
135
        for (const DataKind kind : iterate_kinds()) {
2,552✔
136
            const Variable src_var = Variable::reg(kind, r);
2,320✔
137
            if (!m_inv.eval_interval(src_var).is_top()) {
4,640✔
138
                m_inv.assign(Variable::stack_frame_var(kind, r, prefix), src_var);
150✔
139
            }
140
        }
232✔
141
    }
142
}
58✔
143

144
void EbpfTransformer::restore_callee_saved_registers(const std::string& prefix) {
58✔
145
    for (int r = R6; r <= R9; r++) {
290✔
146
        for (const DataKind kind : iterate_kinds()) {
2,552✔
147
            const Variable src_var = Variable::stack_frame_var(kind, r, prefix);
2,320✔
148
            if (!m_inv.eval_interval(src_var).is_top()) {
4,640✔
149
                m_inv.assign(Variable::reg(kind, r), src_var);
150✔
150
            } else {
151
                m_inv.havoc(Variable::reg(kind, r));
2,220✔
152
            }
153
            m_inv.havoc(src_var);
2,320✔
154
        }
232✔
155
    }
156
}
58✔
157

158
void EbpfTransformer::havoc_subprogram_stack(const std::string& prefix) {
58✔
159
    const Variable r10_stack_offset = reg_pack(R10_STACK_POINTER).stack_offset;
58✔
160
    const auto intv = m_inv.eval_interval(r10_stack_offset);
58✔
161
    if (!intv.is_singleton()) {
58✔
162
        return;
36✔
163
    }
164
    const int64_t stack_start = intv.singleton()->cast_to<int64_t>() - EBPF_SUBPROGRAM_STACK_SIZE;
22✔
165
    for (const DataKind kind : iterate_kinds()) {
242✔
166
        stack.havoc(m_inv, kind, stack_start, EBPF_SUBPROGRAM_STACK_SIZE);
220✔
167
    }
22✔
168
}
58✔
169

170
void EbpfTransformer::forget_packet_pointers() {
42✔
171
    using namespace dsl_syntax;
21✔
172

173
    for (const Variable type_variable : Variable::get_type_variables()) {
1,522✔
174
        if (type_inv.has_type(m_inv, type_variable, T_PACKET)) {
1,480✔
175
            m_inv.havoc(Variable::kind_var(DataKind::types, type_variable));
956✔
176
            m_inv.havoc(Variable::kind_var(DataKind::packet_offsets, type_variable));
956✔
177
            m_inv.havoc(Variable::kind_var(DataKind::svalues, type_variable));
956✔
178
            m_inv.havoc(Variable::kind_var(DataKind::uvalues, type_variable));
956✔
179
        }
180
    }
42✔
181

182
    dom.initialize_packet();
42✔
183
}
42✔
184

185
void EbpfTransformer::havoc_offsets(const Reg& reg) { prevail::havoc_offsets(m_inv, reg); }
70,818✔
186

187
static LinearConstraint type_is_pointer(const RegPack& r) {
414✔
188
    using namespace dsl_syntax;
207✔
189
    return r.type >= T_CTX;
621✔
190
}
191

192
static LinearConstraint type_is_number(const RegPack& r) {
19,692✔
193
    using namespace dsl_syntax;
9,846✔
194
    return r.type == T_NUM;
19,692✔
195
}
196

197
static LinearConstraint type_is_number(const Reg& r) { return type_is_number(reg_pack(r)); }
8,538✔
198

199
static LinearConstraint type_is_not_stack(const RegPack& r) {
398✔
200
    using namespace dsl_syntax;
199✔
201
    return r.type != T_STACK;
398✔
202
}
203

204
/** Linear constraint for a pointer comparison.
205
 */
206
static LinearConstraint assume_cst_offsets_reg(const Condition::Op op, const Variable dst_offset,
1,912✔
207
                                               const Variable src_offset) {
208
    using namespace dsl_syntax;
956✔
209
    using Op = Condition::Op;
956✔
210
    switch (op) {
1,912✔
211
    case Op::EQ: return eq(dst_offset, src_offset);
6✔
212
    case Op::NE: return neq(dst_offset, src_offset);
12✔
213
    case Op::GE: return dst_offset >= src_offset;
6✔
UNCOV
214
    case Op::SGE: return dst_offset >= src_offset; // pointer comparison is unsigned
×
215
    case Op::LE: return dst_offset <= src_offset;
942✔
UNCOV
216
    case Op::SLE: return dst_offset <= src_offset; // pointer comparison is unsigned
×
217
    case Op::GT: return dst_offset > src_offset;
1,413✔
UNCOV
218
    case Op::SGT: return dst_offset > src_offset; // pointer comparison is unsigned
×
219
    case Op::SLT:
×
220
        return src_offset > dst_offset;
×
221
        // Note: reverse the test as a workaround strange lookup:
222
    case Op::LT: return src_offset > dst_offset; // FIX unsigned
9✔
UNCOV
223
    default: return dst_offset - dst_offset == 0;
×
224
    }
225
}
226

227
void EbpfTransformer::operator()(const Assume& s) {
50,490✔
228
    if (m_inv.is_bottom()) {
50,490✔
UNCOV
229
        return;
×
230
    }
231
    const Condition cond = s.cond;
50,490✔
232
    const auto dst = reg_pack(cond.left);
50,490✔
233
    if (const auto psrc_reg = std::get_if<Reg>(&cond.right)) {
50,490✔
234
        const auto src_reg = *psrc_reg;
10,058✔
235
        const auto src = reg_pack(src_reg);
10,058✔
236
        if (type_inv.same_type(m_inv, cond.left, std::get<Reg>(cond.right))) {
10,058✔
237
            m_inv = type_inv.join_over_types(m_inv, cond.left, [&](NumAbsDomain& inv, const TypeEncoding type) {
18,548✔
238
                if (type == T_NUM) {
9,278✔
239
                    for (const LinearConstraint& cst :
16,457✔
240
                         inv->assume_cst_reg(cond.op, cond.is64, dst.svalue, dst.uvalue, src.svalue, src.uvalue)) {
21,865✔
241
                        inv.add_constraint(cst);
10,816✔
242
                    }
7,366✔
243
                } else {
244
                    // Either pointers to a singleton region,
245
                    // or an equality comparison on map descriptors/pointers to non-singleton locations
246
                    if (const auto dst_offset = dom.get_type_offset_variable(cond.left, type)) {
1,912✔
247
                        if (const auto src_offset = dom.get_type_offset_variable(src_reg, type)) {
1,912✔
248
                            inv.add_constraint(assume_cst_offsets_reg(cond.op, dst_offset.value(), src_offset.value()));
2,868✔
249
                        }
250
                    }
251
                }
252
            });
18,552✔
253
        } else {
254
            // We should only reach here if `--assume-assert` is off
255
            assert(!thread_local_options.assume_assertions || dom.is_bottom());
784✔
256
            // be sound in any case, it happens to flush out bugs:
257
            m_inv.set_to_top();
784✔
258
        }
259
    } else {
260
        const int64_t imm = gsl::narrow_cast<int64_t>(std::get<Imm>(cond.right).v);
40,432✔
261
        for (const LinearConstraint& cst : m_inv->assume_cst_imm(cond.op, cond.is64, dst.svalue, dst.uvalue, imm)) {
124,878✔
262
            m_inv.add_constraint(cst);
84,446✔
263
        }
40,432✔
264
    }
265
}
266

267
void EbpfTransformer::operator()(const Undefined& a) {}
2,402✔
268

269
// Simple truncation function usable with swap_endianness().
270
template <class T>
271
constexpr T truncate(T x) noexcept {
24✔
272
    return x;
24✔
273
}
274

275
void EbpfTransformer::operator()(const Un& stmt) {
2,956✔
276
    if (m_inv.is_bottom()) {
2,956✔
UNCOV
277
        return;
×
278
    }
279
    const auto dst = reg_pack(stmt.dst);
2,956✔
280
    auto swap_endianness = [&](const Variable v, auto be_or_le) {
7,082✔
281
        if (m_inv.entail(type_is_number(stmt.dst))) {
8,406✔
282
            if (const auto n = m_inv.eval_interval(v).singleton()) {
11,194✔
283
                if (n->fits_cast_to<int64_t>()) {
244✔
284
                    m_inv.set(v, Interval{be_or_le(n->cast_to<int64_t>())});
244✔
285
                    return;
244✔
286
                }
287
            }
288
        }
289
        m_inv.havoc(v);
5,360✔
290
        havoc_offsets(stmt.dst);
5,360✔
291
    };
2,956✔
292
    // Swap bytes if needed.  For 64-bit types we need the weights to fit in a
293
    // signed int64, but for smaller types we don't want sign extension,
294
    // so we use unsigned which still fits in a signed int64.
295
    switch (stmt.op) {
2,956✔
296
    case Un::Op::BE16:
2,632✔
297
        if (!thread_local_options.big_endian) {
2,632✔
298
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
2,628✔
299
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
2,628✔
300
        } else {
301
            swap_endianness(dst.svalue, truncate<uint16_t>);
4✔
302
            swap_endianness(dst.uvalue, truncate<uint16_t>);
4✔
303
        }
304
        break;
1,316✔
305
    case Un::Op::BE32:
118✔
306
        if (!thread_local_options.big_endian) {
118✔
307
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
116✔
308
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
116✔
309
        } else {
310
            swap_endianness(dst.svalue, truncate<uint32_t>);
2✔
311
            swap_endianness(dst.uvalue, truncate<uint32_t>);
2✔
312
        }
313
        break;
59✔
314
    case Un::Op::BE64:
10✔
315
        if (!thread_local_options.big_endian) {
10✔
316
            swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
8✔
317
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
8✔
318
        }
319
        break;
5✔
320
    case Un::Op::LE16:
12✔
321
        if (thread_local_options.big_endian) {
12✔
322
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
2✔
323
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
2✔
324
        } else {
325
            swap_endianness(dst.svalue, truncate<uint16_t>);
10✔
326
            swap_endianness(dst.uvalue, truncate<uint16_t>);
10✔
327
        }
328
        break;
6✔
329
    case Un::Op::LE32:
8✔
330
        if (thread_local_options.big_endian) {
8✔
331
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
2✔
332
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
2✔
333
        } else {
334
            swap_endianness(dst.svalue, truncate<uint32_t>);
6✔
335
            swap_endianness(dst.uvalue, truncate<uint32_t>);
6✔
336
        }
337
        break;
4✔
338
    case Un::Op::LE64:
8✔
339
        if (thread_local_options.big_endian) {
8✔
340
            swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
2✔
341
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
2✔
342
        }
343
        break;
4✔
344
    case Un::Op::SWAP16:
10✔
345
        swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
10✔
346
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
10✔
347
        break;
5✔
348
    case Un::Op::SWAP32:
6✔
349
        swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
6✔
350
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
6✔
351
        break;
3✔
352
    case Un::Op::SWAP64:
6✔
353
        swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
6✔
354
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
6✔
355
        break;
3✔
356
    case Un::Op::NEG:
146✔
357
        m_inv->neg(dst.svalue, dst.uvalue, stmt.is64 ? 64 : 32);
166✔
358
        havoc_offsets(stmt.dst);
1,551✔
359
        break;
73✔
360
    }
361
}
362

363
void EbpfTransformer::operator()(const Exit& a) {
1,256✔
364
    if (m_inv.is_bottom()) {
1,256✔
365
        return;
1,198✔
366
    }
367
    // Clean up any state for the current stack frame.
368
    const std::string prefix = a.stack_frame_prefix;
1,256✔
369
    if (prefix.empty()) {
1,256✔
370
        return;
1,198✔
371
    }
372
    havoc_subprogram_stack(prefix);
58✔
373
    restore_callee_saved_registers(prefix);
58✔
374

375
    // Restore r10.
376
    constexpr Reg r10_reg{R10_STACK_POINTER};
58✔
377
    add(r10_reg, EBPF_SUBPROGRAM_STACK_SIZE, 64);
58✔
378
}
1,256✔
379

380
void EbpfTransformer::operator()(const Jmp&) const {
14,796✔
381
    // This is a NOP. It only exists to hold the jump preconditions.
382
}
14,796✔
383

384
void EbpfTransformer::operator()(const Packet& a) {
236✔
385
    if (m_inv.is_bottom()) {
236✔
UNCOV
386
        return;
×
387
    }
388
    const auto reg = reg_pack(R0_RETURN_VALUE);
236✔
389
    constexpr Reg r0_reg{R0_RETURN_VALUE};
236✔
390
    type_inv.assign_type(m_inv, r0_reg, T_NUM);
236✔
391
    havoc_offsets(r0_reg);
236✔
392
    m_inv.havoc(reg.svalue);
236✔
393
    m_inv.havoc(reg.uvalue);
236✔
394
    scratch_caller_saved_registers();
236✔
395
}
396

397
void EbpfTransformer::do_load_stack(NumAbsDomain& inv, const Reg& target_reg, const LinearExpression& addr,
12,518✔
398
                                    const int width, const Reg& src_reg) {
399
    type_inv.assign_type(inv, target_reg, stack.load(inv, DataKind::types, addr, width));
12,518✔
400
    using namespace dsl_syntax;
6,259✔
401
    if (inv.entail(width <= reg_pack(src_reg).stack_numeric_size)) {
25,036✔
402
        type_inv.assign_type(inv, target_reg, T_NUM);
723✔
403
    }
404

405
    const RegPack& target = reg_pack(target_reg);
12,518✔
406
    if (width == 1 || width == 2 || width == 4 || width == 8) {
12,518✔
407
        // Use the addr before we havoc the destination register since we might be getting the
408
        // addr from that same register.
409
        const std::optional<LinearExpression> sresult = stack.load(inv, DataKind::svalues, addr, width);
12,518✔
410
        const std::optional<LinearExpression> uresult = stack.load(inv, DataKind::uvalues, addr, width);
12,518✔
411
        havoc_register(inv, target_reg);
12,518✔
412
        inv.assign(target.svalue, sresult);
12,518✔
413
        inv.assign(target.uvalue, uresult);
12,518✔
414

415
        if (type_inv.has_type(inv, target.type, T_CTX)) {
12,518✔
416
            inv.assign(target.ctx_offset, stack.load(inv, DataKind::ctx_offsets, addr, width));
3,318✔
417
        }
418
        if (type_inv.has_type(inv, target.type, T_MAP) || type_inv.has_type(inv, target.type, T_MAP_PROGRAMS)) {
12,518✔
419
            inv.assign(target.map_fd, stack.load(inv, DataKind::map_fds, addr, width));
1,248✔
420
        }
421
        if (type_inv.has_type(inv, target.type, T_PACKET)) {
12,518✔
422
            inv.assign(target.packet_offset, stack.load(inv, DataKind::packet_offsets, addr, width));
1,089✔
423
        }
424
        if (type_inv.has_type(inv, target.type, T_SHARED)) {
12,518✔
425
            inv.assign(target.shared_offset, stack.load(inv, DataKind::shared_offsets, addr, width));
2,388✔
426
            inv.assign(target.shared_region_size, stack.load(inv, DataKind::shared_region_sizes, addr, width));
2,388✔
427
        }
428
        if (type_inv.has_type(inv, target.type, T_STACK)) {
12,518✔
429
            inv.assign(target.stack_offset, stack.load(inv, DataKind::stack_offsets, addr, width));
1,131✔
430
            inv.assign(target.stack_numeric_size, stack.load(inv, DataKind::stack_numeric_sizes, addr, width));
1,131✔
431
        }
432
    } else {
12,518✔
UNCOV
433
        havoc_register(inv, target_reg);
×
434
    }
435
}
12,518✔
436

437
void EbpfTransformer::do_load_ctx(NumAbsDomain& inv, const Reg& target_reg, const LinearExpression& addr_vague,
3,534✔
438
                                  const int width) {
439
    using namespace dsl_syntax;
1,767✔
440
    if (inv.is_bottom()) {
3,534✔
441
        return;
1,526✔
442
    }
443

444
    const ebpf_context_descriptor_t* desc = thread_local_program_info->type.context_descriptor;
3,534✔
445

446
    const RegPack& target = reg_pack(target_reg);
3,534✔
447

448
    if (desc->end < 0) {
3,534✔
449
        havoc_register(inv, target_reg);
290✔
450
        type_inv.assign_type(inv, target_reg, T_NUM);
290✔
451
        return;
290✔
452
    }
453

454
    const Interval interval = inv.eval_interval(addr_vague);
3,244✔
455
    const std::optional<Number> maybe_addr = interval.singleton();
3,244✔
456
    havoc_register(inv, target_reg);
3,244✔
457

458
    const bool may_touch_ptr =
1,622✔
459
        interval.contains(desc->data) || interval.contains(desc->meta) || interval.contains(desc->end);
5,477✔
460

461
    if (!maybe_addr) {
3,244✔
462
        if (may_touch_ptr) {
2✔
463
            type_inv.havoc_type(inv, target_reg);
2✔
464
        } else {
UNCOV
465
            type_inv.assign_type(inv, target_reg, T_NUM);
×
466
        }
467
        return;
2✔
468
    }
469

470
    const Number addr = *maybe_addr;
3,242✔
471

472
    // We use offsets for packet data, data_end, and meta during verification,
473
    // but at runtime they will be 64-bit pointers.  We can use the offset values
474
    // for verification like we use map_fd's as a proxy for maps which
475
    // at runtime are actually 64-bit memory pointers.
476
    const int offset_width = desc->end - desc->data;
3,242✔
477
    if (addr == desc->data) {
3,242✔
478
        if (width == offset_width) {
1,000✔
479
            inv.assign(target.packet_offset, 0);
1,500✔
480
        }
481
    } else if (addr == desc->end) {
2,242✔
482
        if (width == offset_width) {
990✔
483
            inv.assign(target.packet_offset, Variable::packet_size());
1,479✔
484
        }
485
    } else if (addr == desc->meta) {
1,252✔
486
        if (width == offset_width) {
18✔
487
            inv.assign(target.packet_offset, Variable::meta_offset());
27✔
488
        }
489
    } else {
490
        if (may_touch_ptr) {
1,234✔
UNCOV
491
            type_inv.havoc_type(inv, target_reg);
×
492
        } else {
493
            type_inv.assign_type(inv, target_reg, T_NUM);
1,851✔
494
        }
495
        return;
1,234✔
496
    }
497
    if (width == offset_width) {
2,008✔
498
        type_inv.assign_type(inv, target_reg, T_PACKET);
2,004✔
499
        inv.add_constraint(4098 <= target.svalue);
4,008✔
500
        inv.add_constraint(target.svalue <= PTR_MAX);
3,006✔
501
    }
502
}
6,718✔
503

504
void EbpfTransformer::do_load_packet_or_shared(NumAbsDomain& inv, const Reg& target_reg, const LinearExpression& addr,
10,482✔
505
                                               const int width) {
506
    if (inv.is_bottom()) {
10,482✔
UNCOV
507
        return;
×
508
    }
509
    const RegPack& target = reg_pack(target_reg);
10,482✔
510

511
    type_inv.assign_type(inv, target_reg, T_NUM);
10,482✔
512
    havoc_register(inv, target_reg);
10,482✔
513

514
    // A 1 or 2 byte copy results in a limited range of values that may be used as array indices.
515
    if (width == 1 || width == 2) {
10,482✔
516
        const Interval full = Interval::unsigned_int(width * 8);
6,456✔
517
        inv.set(target.svalue, full);
6,456✔
518
        inv.set(target.uvalue, full);
6,456✔
519
    }
6,456✔
520
}
521

522
void EbpfTransformer::do_load(const Mem& b, const Reg& target_reg) {
27,118✔
523
    using namespace dsl_syntax;
13,559✔
524

525
    const auto mem_reg = reg_pack(b.access.basereg);
27,118✔
526
    const int width = b.access.width;
27,118✔
527
    const int offset = b.access.offset;
27,118✔
528

529
    if (b.access.basereg.v == R10_STACK_POINTER) {
27,118✔
530
        const LinearExpression addr = mem_reg.stack_offset + offset;
17,955✔
531
        do_load_stack(m_inv, target_reg, addr, width, b.access.basereg);
11,970✔
532
        return;
11,970✔
533
    }
11,970✔
534

535
    m_inv = type_inv.join_over_types(m_inv, b.access.basereg, [&](NumAbsDomain& inv, TypeEncoding type) {
30,296✔
536
        switch (type) {
15,378✔
537
        case T_UNINIT: return;
407✔
538
        case T_MAP: return;
539
        case T_MAP_PROGRAMS: return;
540
        case T_NUM: return;
541
        case T_CTX: {
3,534✔
542
            LinearExpression addr = mem_reg.ctx_offset + offset;
5,301✔
543
            do_load_ctx(inv, target_reg, addr, width);
3,534✔
544
            break;
3,534✔
545
        }
3,534✔
546
        case T_STACK: {
548✔
547
            LinearExpression addr = mem_reg.stack_offset + offset;
822✔
548
            do_load_stack(inv, target_reg, addr, width, b.access.basereg);
548✔
549
            break;
548✔
550
        }
548✔
551
        case T_PACKET: {
3,270✔
552
            LinearExpression addr = mem_reg.packet_offset + offset;
4,905✔
553
            do_load_packet_or_shared(inv, target_reg, addr, width);
3,270✔
554
            break;
3,270✔
555
        }
3,270✔
556
        default: {
7,212✔
557
            LinearExpression addr = mem_reg.shared_offset + offset;
10,818✔
558
            do_load_packet_or_shared(inv, target_reg, addr, width);
7,212✔
559
            break;
7,212✔
560
        }
7,212✔
561
        }
562
    });
15,148✔
563
}
564

565
void EbpfTransformer::do_store_stack(NumAbsDomain& inv, const LinearExpression& addr, const int width,
31,670✔
566
                                     const LinearExpression& val_type, const LinearExpression& val_svalue,
567
                                     const LinearExpression& val_uvalue, const std::optional<RegPack>& opt_val_reg) {
568
    {
15,835✔
569
        const std::optional<Variable> var = stack.store_type(inv, addr, width, val_type);
31,670✔
570
        type_inv.assign_type(inv, var, val_type);
31,670✔
571
    }
572
    if (width == 8) {
31,670✔
573
        inv.assign(stack.store(inv, DataKind::svalues, addr, width, val_svalue), val_svalue);
12,834✔
574
        inv.assign(stack.store(inv, DataKind::uvalues, addr, width, val_uvalue), val_uvalue);
12,834✔
575

576
        if (opt_val_reg && type_inv.has_type(m_inv, val_type, T_CTX)) {
12,834✔
577
            inv.assign(stack.store(inv, DataKind::ctx_offsets, addr, width, opt_val_reg->ctx_offset),
399✔
578
                       opt_val_reg->ctx_offset);
266✔
579
        } else {
580
            stack.havoc(inv, DataKind::ctx_offsets, addr, width);
12,568✔
581
        }
582

583
        if (opt_val_reg &&
25,658✔
584
            (type_inv.has_type(m_inv, val_type, T_MAP) || type_inv.has_type(m_inv, val_type, T_MAP_PROGRAMS))) {
25,611✔
585
            inv.assign(stack.store(inv, DataKind::map_fds, addr, width, opt_val_reg->map_fd), opt_val_reg->map_fd);
141✔
586
        } else {
587
            stack.havoc(inv, DataKind::map_fds, addr, width);
12,740✔
588
        }
589

590
        if (opt_val_reg && type_inv.has_type(m_inv, val_type, T_PACKET)) {
12,834✔
591
            inv.assign(stack.store(inv, DataKind::packet_offsets, addr, width, opt_val_reg->packet_offset),
39✔
592
                       opt_val_reg->packet_offset);
26✔
593
        } else {
594
            stack.havoc(inv, DataKind::packet_offsets, addr, width);
12,808✔
595
        }
596

597
        if (opt_val_reg && type_inv.has_type(m_inv, val_type, T_SHARED)) {
12,834✔
598
            inv.assign(stack.store(inv, DataKind::shared_offsets, addr, width, opt_val_reg->shared_offset),
225✔
599
                       opt_val_reg->shared_offset);
150✔
600
            inv.assign(stack.store(inv, DataKind::shared_region_sizes, addr, width, opt_val_reg->shared_region_size),
225✔
601
                       opt_val_reg->shared_region_size);
150✔
602
        } else {
603
            stack.havoc(inv, DataKind::shared_region_sizes, addr, width);
12,684✔
604
            stack.havoc(inv, DataKind::shared_offsets, addr, width);
12,684✔
605
        }
606

607
        if (opt_val_reg && type_inv.has_type(m_inv, val_type, T_STACK)) {
12,834✔
608
            inv.assign(stack.store(inv, DataKind::stack_offsets, addr, width, opt_val_reg->stack_offset),
45✔
609
                       opt_val_reg->stack_offset);
30✔
610
            inv.assign(stack.store(inv, DataKind::stack_numeric_sizes, addr, width, opt_val_reg->stack_numeric_size),
45✔
611
                       opt_val_reg->stack_numeric_size);
30✔
612
        } else {
613
            stack.havoc(inv, DataKind::stack_offsets, addr, width);
12,804✔
614
            stack.havoc(inv, DataKind::stack_numeric_sizes, addr, width);
12,804✔
615
        }
616
    } else {
617
        if ((width == 1 || width == 2 || width == 4) && type_inv.get_type(m_inv, val_type) == T_NUM) {
18,836✔
618
            // Keep track of numbers on the stack that might be used as array indices.
619
            if (const auto stack_svalue = stack.store(inv, DataKind::svalues, addr, width, val_svalue)) {
18,638✔
620
                inv.assign(stack_svalue, val_svalue);
18,636✔
621
                inv->overflow_bounds(*stack_svalue, width * 8, true);
18,636✔
622
            }
623
            if (const auto stack_uvalue = stack.store(inv, DataKind::uvalues, addr, width, val_uvalue)) {
18,638✔
624
                inv.assign(stack_uvalue, val_uvalue);
18,636✔
625
                inv->overflow_bounds(*stack_uvalue, width * 8, false);
18,636✔
626
            }
627
        } else {
628
            stack.havoc(inv, DataKind::svalues, addr, width);
198✔
629
            stack.havoc(inv, DataKind::uvalues, addr, width);
198✔
630
        }
631
        stack.havoc(inv, DataKind::ctx_offsets, addr, width);
18,836✔
632
        stack.havoc(inv, DataKind::map_fds, addr, width);
18,836✔
633
        stack.havoc(inv, DataKind::packet_offsets, addr, width);
18,836✔
634
        stack.havoc(inv, DataKind::shared_offsets, addr, width);
18,836✔
635
        stack.havoc(inv, DataKind::stack_offsets, addr, width);
18,836✔
636
        stack.havoc(inv, DataKind::shared_region_sizes, addr, width);
18,836✔
637
        stack.havoc(inv, DataKind::stack_numeric_sizes, addr, width);
18,836✔
638
    }
639

640
    // Update stack_numeric_size for any stack type variables.
641
    // stack_numeric_size holds the number of continuous bytes starting from stack_offset that are known to be numeric.
642
    auto updated_lb = m_inv.eval_interval(addr).lb();
47,505✔
643
    auto updated_ub = m_inv.eval_interval(addr).ub() + width;
79,175✔
644
    for (const Variable type_variable : Variable::get_type_variables()) {
1,832,896✔
645
        if (!type_inv.has_type(inv, type_variable, T_STACK)) {
1,801,226✔
646
            continue;
936,457✔
647
        }
648
        const Variable stack_offset_variable = Variable::kind_var(DataKind::stack_offsets, type_variable);
864,769✔
649
        const Variable stack_numeric_size_variable = Variable::kind_var(DataKind::stack_numeric_sizes, type_variable);
864,769✔
650

651
        using namespace dsl_syntax;
432,385✔
652
        // See if the variable's numeric interval overlaps with changed bytes.
653
        if (m_inv.intersect(dsl_syntax::operator<=(addr, stack_offset_variable + stack_numeric_size_variable)) &&
2,591,458✔
654
            m_inv.intersect(operator>=(addr + width, stack_offset_variable))) {
1,723,840✔
655
            m_inv.havoc(stack_numeric_size_variable);
827,035✔
656
            recompute_stack_numeric_size(m_inv, type_variable);
827,035✔
657
        }
658
    }
31,670✔
659
}
31,670✔
660

661
void EbpfTransformer::operator()(const Mem& b) {
64,610✔
662
    if (m_inv.is_bottom()) {
64,610✔
663
        return;
664
    }
665
    if (const auto preg = std::get_if<Reg>(&b.value)) {
64,610✔
666
        if (b.is_load) {
64,574✔
667
            do_load(b, *preg);
27,118✔
668
        } else {
669
            const auto data_reg = reg_pack(*preg);
37,456✔
670
            do_mem_store(b, data_reg.type, data_reg.svalue, data_reg.uvalue, data_reg);
37,456✔
671
        }
672
    } else {
673
        const uint64_t imm = std::get<Imm>(b.value).v;
36✔
674
        do_mem_store(b, T_NUM, to_signed(imm), imm, {});
36✔
675
    }
676
}
677

678
void EbpfTransformer::do_mem_store(const Mem& b, const LinearExpression& val_type, const LinearExpression& val_svalue,
37,492✔
679
                                   const LinearExpression& val_uvalue, const std::optional<RegPack>& opt_val_reg) {
680
    if (m_inv.is_bottom()) {
37,492✔
681
        return;
31,808✔
682
    }
683
    const int width = b.access.width;
37,492✔
684
    const Number offset{b.access.offset};
37,492✔
685
    if (b.access.basereg.v == R10_STACK_POINTER) {
37,492✔
686
        const auto r10_stack_offset = reg_pack(b.access.basereg).stack_offset;
31,808✔
687
        const auto r10_interval = m_inv.eval_interval(r10_stack_offset);
31,808✔
688
        if (r10_interval.is_singleton()) {
31,808✔
689
            const int32_t stack_offset = r10_interval.singleton()->cast_to<int32_t>();
31,524✔
690
            const Number base_addr{stack_offset};
31,524✔
691
            do_store_stack(m_inv, base_addr + offset, width, val_type, val_svalue, val_uvalue, opt_val_reg);
47,286✔
692
        }
31,524✔
693
        return;
31,808✔
694
    }
31,808✔
695
    m_inv = type_inv.join_over_types(m_inv, b.access.basereg, [&](NumAbsDomain& inv, const TypeEncoding type) {
11,368✔
696
        if (type == T_STACK) {
5,684✔
697
            const auto base_addr = LinearExpression(dom.get_type_offset_variable(b.access.basereg, type).value());
219✔
698
            do_store_stack(inv, dsl_syntax::operator+(base_addr, offset), width, val_type, val_svalue, val_uvalue,
219✔
699
                           opt_val_reg);
700
        }
146✔
701
        // do nothing for any other type
702
    });
11,368✔
703
}
37,492✔
704

705
// Construct a Bin operation that does the main operation that a given Atomic operation does atomically.
706
static Bin atomic_to_bin(const Atomic& a) {
84✔
707
    Bin bin{.dst = Reg{R11_ATOMIC_SCRATCH}, .v = a.valreg, .is64 = a.access.width == sizeof(uint64_t), .lddw = false};
84✔
708
    switch (a.op) {
84✔
709
    case Atomic::Op::ADD: bin.op = Bin::Op::ADD; break;
16✔
710
    case Atomic::Op::OR: bin.op = Bin::Op::OR; break;
16✔
711
    case Atomic::Op::AND: bin.op = Bin::Op::AND; break;
16✔
712
    case Atomic::Op::XOR: bin.op = Bin::Op::XOR; break;
16✔
713
    case Atomic::Op::XCHG:
10✔
714
    case Atomic::Op::CMPXCHG: bin.op = Bin::Op::MOV; break;
10✔
UNCOV
715
    default: throw std::exception();
×
716
    }
717
    return bin;
84✔
718
}
719

720
void EbpfTransformer::operator()(const Atomic& a) {
414✔
721
    if (m_inv.is_bottom()) {
414✔
722
        return;
330✔
723
    }
724
    if (!m_inv.entail(type_is_pointer(reg_pack(a.access.basereg))) ||
1,447✔
725
        !m_inv.entail(type_is_number(reg_pack(a.valreg)))) {
1,025✔
726
        return;
8✔
727
    }
728
    if (m_inv.entail(type_is_not_stack(reg_pack(a.access.basereg)))) {
597✔
729
        // Shared memory regions are volatile so we can just havoc
730
        // any register that will be updated.
731
        if (a.op == Atomic::Op::CMPXCHG) {
314✔
732
            havoc_register(m_inv, Reg{R0_RETURN_VALUE});
4✔
733
        } else if (a.fetch) {
310✔
734
            havoc_register(m_inv, a.valreg);
20✔
735
        }
736
        return;
314✔
737
    }
738

739
    // Fetch the current value into the R11 pseudo-register.
740
    constexpr Reg r11{R11_ATOMIC_SCRATCH};
84✔
741
    (*this)(Mem{.access = a.access, .value = r11, .is_load = true});
84✔
742

743
    // Compute the new value in R11.
744
    (*this)(atomic_to_bin(a));
84✔
745

746
    if (a.op == Atomic::Op::CMPXCHG) {
84✔
747
        // For CMPXCHG, store the original value in r0.
748
        (*this)(Mem{.access = a.access, .value = Reg{R0_RETURN_VALUE}, .is_load = true});
12✔
749

750
        // For the destination, there are 3 possibilities:
751
        // 1) dst.value == r0.value : set R11 to valreg
752
        // 2) dst.value != r0.value : don't modify R11
753
        // 3) dst.value may or may not == r0.value : set R11 to the union of R11 and valreg
754
        // For now we just havoc the value of R11.
755
        havoc_register(m_inv, r11);
12✔
756
    } else if (a.fetch) {
72✔
757
        // For other FETCH operations, store the original value in the src register.
758
        (*this)(Mem{.access = a.access, .value = a.valreg, .is_load = true});
40✔
759
    }
760

761
    // Store the new value back in the original shared memory location.
762
    // Note that do_mem_store() currently doesn't track shared memory values,
763
    // but stack memory values are tracked and are legal here.
764
    (*this)(Mem{.access = a.access, .value = r11, .is_load = false});
84✔
765

766
    // Clear the R11 pseudo-register.
767
    havoc_register(m_inv, r11);
84✔
768
    type_inv.havoc_type(m_inv, r11);
84✔
769
}
770

771
void EbpfTransformer::operator()(const Call& call) {
19,792✔
772
    using namespace dsl_syntax;
9,896✔
773
    if (m_inv.is_bottom()) {
19,792✔
UNCOV
774
        return;
×
775
    }
776
    std::optional<Reg> maybe_fd_reg{};
19,792✔
777
    for (ArgSingle param : call.singles) {
66,488✔
778
        switch (param.kind) {
46,696✔
779
        case ArgSingle::Kind::MAP_FD: maybe_fd_reg = param.reg; break;
35,462✔
780
        case ArgSingle::Kind::ANYTHING:
17,291✔
781
        case ArgSingle::Kind::MAP_FD_PROGRAMS:
782
        case ArgSingle::Kind::PTR_TO_MAP_KEY:
783
        case ArgSingle::Kind::PTR_TO_MAP_VALUE:
784
        case ArgSingle::Kind::PTR_TO_CTX:
785
            // Do nothing. We don't track the content of relevant memory regions
786
            break;
17,291✔
787
        }
788
    }
789
    for (ArgPair param : call.pairs) {
24,112✔
790
        switch (param.kind) {
4,320✔
791
        case ArgPair::Kind::PTR_TO_READABLE_MEM_OR_NULL:
1,729✔
792
        case ArgPair::Kind::PTR_TO_READABLE_MEM:
793
            // Do nothing. No side effect allowed.
794
            break;
1,729✔
795

796
        case ArgPair::Kind::PTR_TO_WRITABLE_MEM: {
862✔
797
            bool store_numbers = true;
862✔
798
            auto variable = dom.get_type_offset_variable(param.mem);
862✔
799
            if (!variable.has_value()) {
862✔
800
                // checked by the checker
801
                break;
802
            }
803
            Variable addr = variable.value();
862✔
804
            Variable width = reg_pack(param.size).svalue;
862✔
805

806
            m_inv = type_inv.join_over_types(m_inv, param.mem, [&](NumAbsDomain& inv, const TypeEncoding type) {
1,724✔
807
                if (type == T_STACK) {
862✔
808
                    // Pointer to a memory region that the called function may change,
809
                    // so we must havoc.
810
                    stack.havoc(inv, DataKind::types, addr, width);
850✔
811
                    stack.havoc(inv, DataKind::svalues, addr, width);
850✔
812
                    stack.havoc(inv, DataKind::uvalues, addr, width);
850✔
813
                    stack.havoc(inv, DataKind::ctx_offsets, addr, width);
850✔
814
                    stack.havoc(inv, DataKind::map_fds, addr, width);
850✔
815
                    stack.havoc(inv, DataKind::packet_offsets, addr, width);
850✔
816
                    stack.havoc(inv, DataKind::shared_offsets, addr, width);
850✔
817
                    stack.havoc(inv, DataKind::stack_offsets, addr, width);
850✔
818
                    stack.havoc(inv, DataKind::shared_region_sizes, addr, width);
850✔
819
                } else {
820
                    store_numbers = false;
12✔
821
                }
822
            });
1,724✔
823
            if (store_numbers) {
862✔
824
                // Functions are not allowed to write sensitive data,
825
                // and initialization is guaranteed
826
                stack.store_numbers(m_inv, addr, width);
850✔
827
            }
828
        }
829
        }
830
    }
831

832
    constexpr Reg r0_reg{R0_RETURN_VALUE};
19,792✔
833
    const auto r0_pack = reg_pack(r0_reg);
19,792✔
834
    m_inv.havoc(r0_pack.stack_numeric_size);
19,792✔
835
    if (call.is_map_lookup) {
19,792✔
836
        // This is the only way to get a null pointer
837
        if (maybe_fd_reg) {
4,228✔
838
            if (const auto map_type = dom.get_map_type(*maybe_fd_reg)) {
4,228✔
839
                if (thread_local_program_info->platform->get_map_type(*map_type).value_type == EbpfMapValueType::MAP) {
4,222✔
840
                    if (const auto inner_map_fd = dom.get_map_inner_map_fd(*maybe_fd_reg)) {
34✔
841
                        do_load_mapfd(r0_reg, to_signed(*inner_map_fd), true);
34✔
842
                        goto out;
34✔
843
                    }
844
                } else {
845
                    assign_valid_ptr(r0_reg, true);
4,188✔
846
                    m_inv.assign(r0_pack.shared_offset, 0);
4,188✔
847
                    m_inv.set(r0_pack.shared_region_size, dom.get_map_value_size(*maybe_fd_reg));
4,188✔
848
                    type_inv.assign_type(m_inv, r0_reg, T_SHARED);
6,282✔
849
                }
850
            }
851
        }
852
        assign_valid_ptr(r0_reg, true);
4,192✔
853
        m_inv.assign(r0_pack.shared_offset, 0);
4,192✔
854
        type_inv.assign_type(m_inv, r0_reg, T_SHARED);
6,288✔
855
    } else {
856
        m_inv.havoc(r0_pack.svalue);
15,564✔
857
        m_inv.havoc(r0_pack.uvalue);
15,564✔
858
        havoc_offsets(r0_reg);
15,564✔
859
        type_inv.assign_type(m_inv, r0_reg, T_NUM);
23,346✔
860
        // m_inv.add_constraint(r0_pack.value < 0); for INTEGER_OR_NO_RETURN_IF_SUCCEED.
861
    }
862
out:
19,790✔
863
    scratch_caller_saved_registers();
19,790✔
864
    if (call.reallocate_packet) {
19,790✔
865
        forget_packet_pointers();
42✔
866
    }
867
}
868

869
void EbpfTransformer::operator()(const CallLocal& call) {
58✔
870
    using namespace dsl_syntax;
29✔
871
    if (m_inv.is_bottom()) {
58✔
UNCOV
872
        return;
×
873
    }
874
    save_callee_saved_registers(call.stack_frame_prefix);
58✔
875

876
    // Update r10.
877
    constexpr Reg r10_reg{R10_STACK_POINTER};
58✔
878
    add(r10_reg, -EBPF_SUBPROGRAM_STACK_SIZE, 64);
58✔
879
}
880

881
void EbpfTransformer::operator()(const Callx& callx) {
50✔
882
    using namespace dsl_syntax;
25✔
883
    if (m_inv.is_bottom()) {
50✔
884
        return;
4✔
885
    }
886

887
    // Look up the helper function id.
888
    const RegPack& reg = reg_pack(callx.func);
50✔
889
    const auto src_interval = m_inv.eval_interval(reg.svalue);
50✔
890
    if (const auto sn = src_interval.singleton()) {
50✔
891
        if (sn->fits<int32_t>()) {
48✔
892
            // We can now process it as if the id was immediate.
893
            const int32_t imm = sn->cast_to<int32_t>();
48✔
894
            if (!thread_local_program_info->platform->is_helper_usable(imm)) {
48✔
895
                return;
4✔
896
            }
897
            const Call call = make_call(imm, *thread_local_program_info->platform);
44✔
898
            (*this)(call);
44✔
899
        }
44✔
900
    }
50✔
901
}
50✔
902

903
void EbpfTransformer::do_load_mapfd(const Reg& dst_reg, const int mapfd, const bool maybe_null) {
12,518✔
904
    const EbpfMapDescriptor& desc = thread_local_program_info->platform->get_map_descriptor(mapfd);
12,518✔
905
    const EbpfMapType& type = thread_local_program_info->platform->get_map_type(desc.type);
12,518✔
906
    if (type.value_type == EbpfMapValueType::PROGRAM) {
12,518✔
907
        type_inv.assign_type(m_inv, dst_reg, T_MAP_PROGRAMS);
621✔
908
    } else {
909
        type_inv.assign_type(m_inv, dst_reg, T_MAP);
18,156✔
910
    }
911
    const RegPack& dst = reg_pack(dst_reg);
12,518✔
912
    m_inv.assign(dst.map_fd, mapfd);
12,518✔
913
    assign_valid_ptr(dst_reg, maybe_null);
12,518✔
914
}
12,518✔
915

916
void EbpfTransformer::operator()(const LoadMapFd& ins) {
12,484✔
917
    if (m_inv.is_bottom()) {
12,484✔
918
        return;
919
    }
920
    do_load_mapfd(ins.dst, ins.mapfd, false);
12,484✔
921
}
922

923
void EbpfTransformer::do_load_map_address(const Reg& dst_reg, const int mapfd, const int32_t offset) {
10✔
924
    const EbpfMapDescriptor& desc = thread_local_program_info->platform->get_map_descriptor(mapfd);
10✔
925
    const EbpfMapType& type = thread_local_program_info->platform->get_map_type(desc.type);
10✔
926

927
    if (type.value_type == EbpfMapValueType::PROGRAM) {
10✔
UNCOV
928
        throw std::invalid_argument("Cannot load address of program map type - only data maps are supported");
×
929
    }
930

931
    // Set the shared region size and offset for the map.
932
    type_inv.assign_type(m_inv, dst_reg, T_SHARED);
10✔
933
    const RegPack& dst = reg_pack(dst_reg);
10✔
934
    m_inv.assign(dst.shared_offset, offset);
10✔
935
    m_inv.assign(dst.shared_region_size, desc.value_size);
10✔
936
    assign_valid_ptr(dst_reg, false);
10✔
937
}
10✔
938

939
void EbpfTransformer::operator()(const LoadMapAddress& ins) {
10✔
940
    if (m_inv.is_bottom()) {
10✔
941
        return;
942
    }
943
    do_load_map_address(ins.dst, ins.mapfd, ins.offset);
10✔
944
}
945

946
void EbpfTransformer::assign_valid_ptr(const Reg& dst_reg, const bool maybe_null) {
20,908✔
947
    using namespace dsl_syntax;
10,454✔
948
    const RegPack& reg = reg_pack(dst_reg);
20,908✔
949
    m_inv.havoc(reg.svalue);
20,908✔
950
    m_inv.havoc(reg.uvalue);
20,908✔
951
    if (maybe_null) {
20,908✔
952
        m_inv.add_constraint(0 <= reg.svalue);
16,828✔
953
    } else {
954
        m_inv.add_constraint(0 < reg.svalue);
24,988✔
955
    }
956
    m_inv.add_constraint(reg.svalue <= PTR_MAX);
31,362✔
957
    m_inv.assign(reg.uvalue, reg.svalue);
20,908✔
958
}
20,908✔
959

960
// If nothing is known of the stack_numeric_size,
961
// try to recompute the stack_numeric_size.
962
void EbpfTransformer::recompute_stack_numeric_size(NumAbsDomain& inv, const Variable type_variable) const {
847,203✔
963
    const Variable stack_numeric_size_variable = Variable::kind_var(DataKind::stack_numeric_sizes, type_variable);
847,203✔
964

965
    if (!inv.eval_interval(stack_numeric_size_variable).is_top()) {
1,694,407✔
UNCOV
966
        return;
×
967
    }
968

969
    if (type_inv.has_type(inv, type_variable, T_STACK)) {
847,203✔
970
        const int numeric_size =
423,078✔
971
            stack.min_all_num_size(inv, Variable::kind_var(DataKind::stack_offsets, type_variable));
846,155✔
972
        if (numeric_size > 0) {
846,155✔
973
            inv.assign(stack_numeric_size_variable, numeric_size);
28,614✔
974
        }
975
    }
976
}
977

978
void EbpfTransformer::recompute_stack_numeric_size(NumAbsDomain& inv, const Reg& reg) const {
20,162✔
979
    recompute_stack_numeric_size(inv, reg_pack(reg).type);
20,162✔
980
}
20,162✔
981

982
void EbpfTransformer::add(const Reg& dst_reg, const int imm, const int finite_width) {
25,066✔
983
    const auto dst = reg_pack(dst_reg);
25,066✔
984
    m_inv->add_overflow(dst.svalue, dst.uvalue, imm, finite_width);
25,066✔
985
    if (const auto offset = dom.get_type_offset_variable(dst_reg)) {
25,066✔
986
        m_inv->add(*offset, imm);
20,162✔
987
        if (imm > 0) {
20,162✔
988
            // Since the start offset is increasing but
989
            // the end offset is not, the numeric size decreases.
990
            m_inv->sub(dst.stack_numeric_size, imm);
1,557✔
991
        } else if (imm < 0) {
19,124✔
992
            m_inv.havoc(dst.stack_numeric_size);
19,124✔
993
        }
994
        recompute_stack_numeric_size(dom.m_inv, dst_reg);
20,162✔
995
    }
996
}
25,066✔
997

998
void EbpfTransformer::shl(const Reg& dst_reg, int imm, const int finite_width) {
5,178✔
999
    havoc_offsets(dst_reg);
5,178✔
1000

1001
    // The BPF ISA requires masking the imm.
1002
    imm &= finite_width - 1;
5,178✔
1003
    const RegPack dst = reg_pack(dst_reg);
5,178✔
1004
    if (m_inv.entail(type_is_number(dst))) {
7,767✔
1005
        m_inv->shl(dst.svalue, dst.uvalue, imm, finite_width);
5,176✔
1006
    } else {
1007
        m_inv.havoc(dst.svalue);
2✔
1008
        m_inv.havoc(dst.uvalue);
2✔
1009
    }
1010
}
5,178✔
1011

1012
void EbpfTransformer::lshr(const Reg& dst_reg, int imm, int finite_width) {
4,870✔
1013
    havoc_offsets(dst_reg);
4,870✔
1014

1015
    // The BPF ISA requires masking the imm.
1016
    imm &= finite_width - 1;
4,870✔
1017
    const RegPack dst = reg_pack(dst_reg);
4,870✔
1018
    if (m_inv.entail(type_is_number(dst))) {
7,305✔
1019
        m_inv->lshr(dst.svalue, dst.uvalue, imm, finite_width);
4,868✔
1020
    } else {
1021
        m_inv.havoc(dst.svalue);
2✔
1022
        m_inv.havoc(dst.uvalue);
2✔
1023
    }
1024
}
4,870✔
1025

1026
void EbpfTransformer::ashr(const Reg& dst_reg, const LinearExpression& right_svalue, const int finite_width) {
694✔
1027
    havoc_offsets(dst_reg);
694✔
1028

1029
    const RegPack dst = reg_pack(dst_reg);
694✔
1030
    if (m_inv.entail(type_is_number(dst))) {
1,041✔
1031
        m_inv->ashr(dst.svalue, dst.uvalue, right_svalue, finite_width);
692✔
1032
    } else {
1033
        m_inv.havoc(dst.svalue);
2✔
1034
        m_inv.havoc(dst.uvalue);
2✔
1035
    }
1036
}
694✔
1037

1038
void EbpfTransformer::sign_extend(const Reg& dst_reg, const LinearExpression& right_svalue, const int target_width,
2,234✔
1039
                                  const int source_width) {
1040
    havoc_offsets(dst_reg);
2,234✔
1041

1042
    type_inv.assign_type(dom.m_inv, dst_reg, T_NUM);
2,234✔
1043

1044
    const RegPack dst = reg_pack(dst_reg);
2,234✔
1045
    m_inv->sign_extend(dst.svalue, dst.uvalue, right_svalue, target_width, source_width);
2,234✔
1046
}
2,234✔
1047

1048
static int _movsx_bits(const Bin::Op op) {
2,238✔
1049
    switch (op) {
2,238✔
1050
    case Bin::Op::MOVSX8: return 8;
8✔
1051
    case Bin::Op::MOVSX16: return 16;
8✔
1052
    case Bin::Op::MOVSX32: return 32;
1,103✔
UNCOV
1053
    default: throw std::exception();
×
1054
    }
1055
}
1056

1057
void EbpfTransformer::operator()(const Bin& bin) {
152,568✔
1058
    if (m_inv.is_bottom()) {
152,568✔
1059
        return;
2,960✔
1060
    }
1061
    using namespace dsl_syntax;
76,284✔
1062

1063
    auto dst = reg_pack(bin.dst);
152,568✔
1064
    int finite_width = bin.is64 ? 64 : 32;
152,568✔
1065

1066
    // TODO: Unusable states and values should be better handled.
1067
    //       Probably by propagating an error state.
1068
    if (type_inv.has_type(m_inv, bin.dst, T_UNINIT) &&
203,430✔
1069
        !std::set{Bin::Op::MOV, Bin::Op::MOVSX8, Bin::Op::MOVSX16, Bin::Op::MOVSX32}.contains(bin.op)) {
278,815✔
1070
        havoc_register(m_inv, bin.dst);
1,816✔
1071
        type_inv.havoc_type(m_inv, bin.dst);
1,816✔
1072
        return;
1,816✔
1073
    }
1074
    if (auto pimm = std::get_if<Imm>(&bin.v)) {
150,752✔
1075
        // dst += K
1076
        int64_t imm;
43,544✔
1077
        if (bin.is64) {
87,088✔
1078
            // Use the full signed value.
1079
            imm = to_signed(pimm->v);
63,824✔
1080
        } else {
1081
            // Use only the low 32 bits of the value.
1082
            imm = gsl::narrow_cast<int32_t>(pimm->v);
23,264✔
1083
            m_inv->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
23,264✔
1084
            // If this is a 32-bit operation and the destination is not a number, forget everything about the register.
1085
            if (!type_inv.has_type(m_inv, bin.dst, T_NUM)) {
23,264✔
1086
                havoc_register(m_inv, bin.dst);
742✔
1087
                havoc_offsets(bin.dst);
742✔
1088
                m_inv.havoc(dst.type);
742✔
1089
            }
1090
        }
1091
        switch (bin.op) {
87,088✔
1092
        case Bin::Op::MOV:
39,344✔
1093
            m_inv.assign(dst.svalue, imm);
39,344✔
1094
            m_inv.assign(dst.uvalue, imm);
39,344✔
1095
            m_inv->overflow_bounds(dst.uvalue, bin.is64 ? 64 : 32, false);
44,676✔
1096
            type_inv.assign_type(m_inv, bin.dst, T_NUM);
39,344✔
1097
            havoc_offsets(bin.dst);
39,344✔
1098
            break;
19,672✔
UNCOV
1099
        case Bin::Op::MOVSX8:
×
1100
        case Bin::Op::MOVSX16:
UNCOV
1101
        case Bin::Op::MOVSX32: CRAB_ERROR("Unsupported operation");
×
1102
        case Bin::Op::ADD:
24,928✔
1103
            if (imm == 0) {
24,928✔
1104
                return;
1,483✔
1105
            }
1106
            add(bin.dst, gsl::narrow<int>(imm), finite_width);
24,924✔
1107
            break;
12,462✔
1108
        case Bin::Op::SUB:
28✔
1109
            if (imm == 0) {
28✔
1110
                return;
1✔
1111
            }
1112
            add(bin.dst, gsl::narrow<int>(-imm), finite_width);
26✔
1113
            break;
13✔
1114
        case Bin::Op::MUL:
102✔
1115
            m_inv->mul(dst.svalue, dst.uvalue, imm, finite_width);
102✔
1116
            havoc_offsets(bin.dst);
102✔
1117
            break;
51✔
1118
        case Bin::Op::UDIV:
2,182✔
1119
            m_inv->udiv(dst.svalue, dst.uvalue, imm, finite_width);
2,182✔
1120
            havoc_offsets(bin.dst);
2,182✔
1121
            break;
1,091✔
1122
        case Bin::Op::UMOD:
24✔
1123
            m_inv->urem(dst.svalue, dst.uvalue, imm, finite_width);
24✔
1124
            havoc_offsets(bin.dst);
24✔
1125
            break;
12✔
1126
        case Bin::Op::SDIV:
18✔
1127
            m_inv->sdiv(dst.svalue, dst.uvalue, imm, finite_width);
18✔
1128
            havoc_offsets(bin.dst);
18✔
1129
            break;
9✔
1130
        case Bin::Op::SMOD:
34✔
1131
            m_inv->srem(dst.svalue, dst.uvalue, imm, finite_width);
34✔
1132
            havoc_offsets(bin.dst);
34✔
1133
            break;
17✔
1134
        case Bin::Op::OR:
2,650✔
1135
            m_inv->bitwise_or(dst.svalue, dst.uvalue, imm);
2,650✔
1136
            havoc_offsets(bin.dst);
2,650✔
1137
            break;
1,325✔
1138
        case Bin::Op::AND:
6,782✔
1139
            // FIX: what to do with ptr&-8 as in counter/simple_loop_unrolled?
1140
            m_inv->bitwise_and(dst.svalue, dst.uvalue, imm);
6,782✔
1141
            if (gsl::narrow<int32_t>(imm) > 0) {
6,782✔
1142
                // AND with immediate is only a 32-bit operation so svalue and uvalue are the same.
1143
                m_inv.add_constraint(dst.svalue <= imm);
9,369✔
1144
                m_inv.add_constraint(dst.uvalue <= imm);
9,369✔
1145
                m_inv.add_constraint(0 <= dst.svalue);
12,492✔
1146
                m_inv.add_constraint(0 <= dst.uvalue);
12,492✔
1147
            }
1148
            havoc_offsets(bin.dst);
6,782✔
1149
            break;
3,391✔
1150
        case Bin::Op::LSH: shl(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
5,132✔
1151
        case Bin::Op::RSH: lshr(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
4,836✔
1152
        case Bin::Op::ARSH: ashr(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
660✔
1153
        case Bin::Op::XOR:
368✔
1154
            m_inv->bitwise_xor(dst.svalue, dst.uvalue, imm);
368✔
1155
            havoc_offsets(bin.dst);
368✔
1156
            break;
184✔
1157
        }
1158
    } else {
1159
        // dst op= src
1160
        auto src_reg = std::get<Reg>(bin.v);
63,664✔
1161
        auto src = reg_pack(src_reg);
63,664✔
1162
        if (type_inv.has_type(m_inv, src_reg, T_UNINIT)) {
63,664✔
1163
            havoc_register(m_inv, bin.dst);
978✔
1164
            type_inv.havoc_type(m_inv, bin.dst);
978✔
1165
            return;
1,627✔
1166
        }
1167
        switch (bin.op) {
62,686✔
1168
        case Bin::Op::ADD: {
4,294✔
1169
            if (type_inv.same_type(m_inv, bin.dst, std::get<Reg>(bin.v))) {
4,294✔
1170
                // both must have been checked to be numbers
1171
                m_inv->add_overflow(dst.svalue, dst.uvalue, src.svalue, finite_width);
3,622✔
1172
            } else {
1173
                // Here we're not sure that lhs and rhs are the same type; they might be.
1174
                // But previous assertions should fail unless we know that exactly one of lhs or rhs is a pointer.
1175
                m_inv = type_inv.join_over_types(m_inv, bin.dst, [&](NumAbsDomain& inv, const TypeEncoding dst_type) {
1,344✔
1176
                    inv = type_inv.join_over_types(inv, src_reg, [&](NumAbsDomain& inv, const TypeEncoding src_type) {
1,352✔
1177
                        if (dst_type == T_NUM && src_type != T_NUM) {
682✔
1178
                            // num += ptr
1179
                            type_inv.assign_type(inv, bin.dst, src_type);
10✔
1180
                            if (const auto dst_offset = dom.get_type_offset_variable(bin.dst, src_type)) {
10✔
1181
                                inv->apply(ArithBinOp::ADD, dst_offset.value(), dst.svalue,
15✔
1182
                                           dom.get_type_offset_variable(src_reg, src_type).value());
20✔
1183
                            }
1184
                            if (src_type == T_SHARED) {
10✔
UNCOV
1185
                                inv.assign(dst.shared_region_size, src.shared_region_size);
×
1186
                            }
1187
                        } else if (dst_type != T_NUM && src_type == T_NUM) {
677✔
1188
                            // ptr += num
1189
                            type_inv.assign_type(inv, bin.dst, dst_type);
668✔
1190
                            if (const auto dst_offset = dom.get_type_offset_variable(bin.dst, dst_type)) {
668✔
1191
                                inv->apply(ArithBinOp::ADD, dst_offset.value(), dst_offset.value(), src.svalue);
668✔
1192
                                if (dst_type == T_STACK) {
668✔
1193
                                    // Reduce the numeric size.
1194
                                    using namespace dsl_syntax;
8✔
1195
                                    if (inv.intersect(src.svalue < 0)) {
24✔
1196
                                        inv.havoc(dst.stack_numeric_size);
2✔
1197
                                        recompute_stack_numeric_size(inv, dst.type);
2✔
1198
                                    } else {
1199
                                        inv->apply_signed(ArithBinOp::SUB, dst.stack_numeric_size,
28✔
1200
                                                          dst.stack_numeric_size, dst.stack_numeric_size, src.svalue,
14✔
1201
                                                          0);
1202
                                    }
1203
                                }
1204
                            }
1205
                        } else if (dst_type == T_NUM && src_type == T_NUM) {
672✔
1206
                            // dst and src don't necessarily have the same type, but among the possibilities
1207
                            // enumerated is the case where they are both numbers.
1208
                            inv->apply_signed(ArithBinOp::ADD, dst.svalue, dst.uvalue, dst.svalue, src.svalue,
2✔
1209
                                              finite_width);
1210
                        } else {
1211
                            // We ignore the cases here that do not match the assumption described
1212
                            // above.  Joining bottom with another results will leave the other
1213
                            // results unchanged.
1214
                            inv.set_to_bottom();
2✔
1215
                        }
1216
                    });
1,358✔
1217
                });
1,348✔
1218
                // careful: change dst.value only after dealing with offset
1219
                m_inv->apply_signed(ArithBinOp::ADD, dst.svalue, dst.uvalue, dst.svalue, src.svalue, finite_width);
672✔
1220
            }
1221
            break;
2,147✔
1222
        }
1223
        case Bin::Op::SUB: {
780✔
1224
            if (type_inv.same_type(m_inv, bin.dst, std::get<Reg>(bin.v))) {
780✔
1225
                // src and dest have the same type.
1226
                m_inv = type_inv.join_over_types(m_inv, bin.dst, [&](NumAbsDomain& inv, const TypeEncoding type) {
1,544✔
1227
                    switch (type) {
776✔
1228
                    case T_NUM:
504✔
1229
                        // This is: sub_overflow(inv, dst.value, src.value, finite_width);
1230
                        inv->apply_signed(ArithBinOp::SUB, dst.svalue, dst.uvalue, dst.svalue, src.svalue,
504✔
1231
                                          finite_width);
1232
                        type_inv.assign_type(inv, bin.dst, T_NUM);
504✔
1233
                        prevail::havoc_offsets(inv, bin.dst);
504✔
1234
                        break;
504✔
1235
                    default:
272✔
1236
                        // ptr -= ptr
1237
                        // Assertions should make sure we only perform this on non-shared pointers.
1238
                        if (const auto dst_offset = dom.get_type_offset_variable(bin.dst, type)) {
272✔
1239
                            inv->apply_signed(ArithBinOp::SUB, dst.svalue, dst.uvalue, dst_offset.value(),
544✔
1240
                                              dom.get_type_offset_variable(src_reg, type).value(), finite_width);
408✔
1241
                            inv.havoc(dst_offset.value());
272✔
1242
                        }
1243
                        prevail::havoc_offsets(inv, bin.dst);
272✔
1244
                        type_inv.assign_type(inv, bin.dst, T_NUM);
272✔
1245
                        break;
272✔
1246
                    }
1247
                });
1,548✔
1248
            } else {
1249
                // We're not sure that lhs and rhs are the same type.
1250
                // Either they're different, or at least one is not a singleton.
1251
                if (type_inv.get_type(m_inv, std::get<Reg>(bin.v)) != T_NUM) {
8✔
1252
                    type_inv.havoc_type(m_inv, bin.dst);
4✔
1253
                    m_inv.havoc(dst.svalue);
4✔
1254
                    m_inv.havoc(dst.uvalue);
4✔
1255
                    havoc_offsets(bin.dst);
4✔
1256
                } else {
1257
                    m_inv->sub_overflow(dst.svalue, dst.uvalue, src.svalue, finite_width);
4✔
1258
                    if (auto dst_offset = dom.get_type_offset_variable(bin.dst)) {
4✔
1259
                        m_inv->sub(dst_offset.value(), src.svalue);
4✔
1260
                        if (type_inv.has_type(m_inv, dst.type, T_STACK)) {
4✔
1261
                            // Reduce the numeric size.
1262
                            using namespace dsl_syntax;
2✔
1263
                            if (m_inv.intersect(src.svalue > 0)) {
8✔
1264
                                m_inv.havoc(dst.stack_numeric_size);
4✔
1265
                                recompute_stack_numeric_size(m_inv, dst.type);
4✔
1266
                            } else {
UNCOV
1267
                                m_inv->apply(ArithBinOp::ADD, dst.stack_numeric_size, dst.stack_numeric_size,
×
1268
                                             src.svalue);
1269
                            }
1270
                        }
1271
                    }
1272
                }
1273
            }
1274
            break;
390✔
1275
        }
1276
        case Bin::Op::MUL:
72✔
1277
            m_inv->mul(dst.svalue, dst.uvalue, src.svalue, finite_width);
72✔
1278
            havoc_offsets(bin.dst);
72✔
1279
            break;
36✔
1280
        case Bin::Op::UDIV:
104✔
1281
            m_inv->udiv(dst.svalue, dst.uvalue, src.uvalue, finite_width);
104✔
1282
            havoc_offsets(bin.dst);
104✔
1283
            break;
52✔
1284
        case Bin::Op::UMOD:
34✔
1285
            m_inv->urem(dst.svalue, dst.uvalue, src.uvalue, finite_width);
34✔
1286
            havoc_offsets(bin.dst);
34✔
1287
            break;
17✔
1288
        case Bin::Op::SDIV:
32✔
1289
            m_inv->sdiv(dst.svalue, dst.uvalue, src.svalue, finite_width);
32✔
1290
            havoc_offsets(bin.dst);
32✔
1291
            break;
16✔
1292
        case Bin::Op::SMOD:
46✔
1293
            m_inv->srem(dst.svalue, dst.uvalue, src.svalue, finite_width);
46✔
1294
            havoc_offsets(bin.dst);
46✔
1295
            break;
23✔
1296
        case Bin::Op::OR:
5,694✔
1297
            m_inv->bitwise_or(dst.svalue, dst.uvalue, src.uvalue, finite_width);
5,694✔
1298
            havoc_offsets(bin.dst);
5,694✔
1299
            break;
2,847✔
1300
        case Bin::Op::AND:
476✔
1301
            m_inv->bitwise_and(dst.svalue, dst.uvalue, src.uvalue, finite_width);
476✔
1302
            havoc_offsets(bin.dst);
476✔
1303
            break;
238✔
1304
        case Bin::Op::LSH:
588✔
1305
            if (m_inv.entail(type_is_number(src_reg))) {
882✔
1306
                auto src_interval = m_inv.eval_interval(src.uvalue);
588✔
1307
                if (std::optional<Number> sn = src_interval.singleton()) {
588✔
1308
                    // truncate to uint64?
1309
                    uint64_t imm = sn->cast_to<uint64_t>() & (bin.is64 ? 63 : 31);
46✔
1310
                    if (imm <= std::numeric_limits<int32_t>::max()) {
46✔
1311
                        if (!bin.is64) {
46✔
1312
                            // Use only the low 32 bits of the value.
1313
                            m_inv->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
39✔
1314
                        }
1315
                        shl(bin.dst, gsl::narrow_cast<int32_t>(imm), finite_width);
46✔
1316
                        break;
46✔
1317
                    }
1318
                }
588✔
1319
            }
588✔
1320
            m_inv->shl_overflow(dst.svalue, dst.uvalue, src.uvalue);
542✔
1321
            havoc_offsets(bin.dst);
542✔
1322
            break;
271✔
1323
        case Bin::Op::RSH:
78✔
1324
            if (m_inv.entail(type_is_number(src_reg))) {
117✔
1325
                auto src_interval = m_inv.eval_interval(src.uvalue);
78✔
1326
                if (std::optional<Number> sn = src_interval.singleton()) {
78✔
1327
                    uint64_t imm = sn->cast_to<uint64_t>() & (bin.is64 ? 63 : 31);
34✔
1328
                    if (imm <= std::numeric_limits<int32_t>::max()) {
34✔
1329
                        if (!bin.is64) {
34✔
1330
                            // Use only the low 32 bits of the value.
1331
                            m_inv->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
24✔
1332
                        }
1333
                        lshr(bin.dst, gsl::narrow_cast<int32_t>(imm), finite_width);
34✔
1334
                        break;
34✔
1335
                    }
1336
                }
78✔
1337
            }
78✔
1338
            m_inv.havoc(dst.svalue);
44✔
1339
            m_inv.havoc(dst.uvalue);
44✔
1340
            havoc_offsets(bin.dst);
44✔
1341
            break;
22✔
1342
        case Bin::Op::ARSH:
34✔
1343
            if (m_inv.entail(type_is_number(src_reg))) {
51✔
1344
                ashr(bin.dst, src.svalue, finite_width);
34✔
1345
                break;
34✔
1346
            }
UNCOV
1347
            m_inv.havoc(dst.svalue);
×
UNCOV
1348
            m_inv.havoc(dst.uvalue);
×
UNCOV
1349
            havoc_offsets(bin.dst);
×
1350
            break;
1351
        case Bin::Op::XOR:
474✔
1352
            m_inv->bitwise_xor(dst.svalue, dst.uvalue, src.uvalue, finite_width);
474✔
1353
            havoc_offsets(bin.dst);
474✔
1354
            break;
237✔
1355
        case Bin::Op::MOVSX8:
2,238✔
1356
        case Bin::Op::MOVSX16:
1,119✔
1357
        case Bin::Op::MOVSX32: {
1,119✔
1358
            const int source_width = _movsx_bits(bin.op);
2,238✔
1359
            // Keep relational information if operation is a no-op.
1360
            if (dst.svalue == src.svalue && m_inv.eval_interval(dst.svalue) <= Interval::signed_int(source_width)) {
4,436✔
1361
                return;
2✔
1362
            }
1363
            if (m_inv.entail(type_is_number(src_reg))) {
3,351✔
1364
                sign_extend(bin.dst, src.svalue, finite_width, source_width);
2,234✔
1365
                break;
2,234✔
1366
            }
UNCOV
1367
            m_inv.havoc(dst.svalue);
×
UNCOV
1368
            m_inv.havoc(dst.uvalue);
×
1369
            havoc_offsets(bin.dst);
31,263✔
1370
            break;
1371
        }
1372
        case Bin::Op::MOV:
47,742✔
1373
            // Keep relational information if operation is a no-op.
1374
            if (dst.svalue == src.svalue &&
48,488✔
1375
                m_inv.eval_interval(dst.uvalue) <= Interval::unsigned_int(bin.is64 ? 64 : 32)) {
50,721✔
1376
                return;
78✔
1377
            }
1378
            m_inv.assign(dst.svalue, src.svalue);
47,586✔
1379
            m_inv.assign(dst.uvalue, src.uvalue);
47,586✔
1380
            havoc_offsets(bin.dst);
47,586✔
1381
            m_inv = type_inv.join_over_types(m_inv, src_reg, [&](NumAbsDomain& inv, const TypeEncoding type) {
95,172✔
1382
                switch (type) {
47,608✔
1383
                case T_CTX:
5,066✔
1384
                    if (bin.is64) {
5,066✔
1385
                        inv.assign(dst.type, type);
5,062✔
1386
                        inv.assign(dst.ctx_offset, src.ctx_offset);
7,593✔
1387
                    }
1388
                    break;
2,533✔
1389
                case T_MAP:
302✔
1390
                case T_MAP_PROGRAMS:
151✔
1391
                    if (bin.is64) {
302✔
1392
                        inv.assign(dst.type, type);
298✔
1393
                        inv.assign(dst.map_fd, src.map_fd);
447✔
1394
                    }
1395
                    break;
151✔
1396
                case T_PACKET:
1,026✔
1397
                    if (bin.is64) {
1,026✔
1398
                        inv.assign(dst.type, type);
1,024✔
1399
                        inv.assign(dst.packet_offset, src.packet_offset);
1,536✔
1400
                    }
1401
                    break;
513✔
1402
                case T_SHARED:
908✔
1403
                    if (bin.is64) {
908✔
1404
                        inv.assign(dst.type, type);
906✔
1405
                        inv.assign(dst.shared_region_size, src.shared_region_size);
906✔
1406
                        inv.assign(dst.shared_offset, src.shared_offset);
1,359✔
1407
                    }
1408
                    break;
454✔
1409
                case T_STACK:
19,876✔
1410
                    if (bin.is64) {
19,876✔
1411
                        inv.assign(dst.type, type);
19,874✔
1412
                        inv.assign(dst.stack_offset, src.stack_offset);
19,874✔
1413
                        inv.assign(dst.stack_numeric_size, src.stack_numeric_size);
29,811✔
1414
                    }
1415
                    break;
9,938✔
1416
                default: inv.assign(dst.type, type); break;
30,645✔
1417
                }
1418
            });
47,586✔
1419
            if (bin.is64) {
47,586✔
1420
                // Add dst.type=src.type invariant.
1421
                if (bin.dst.v != std::get<Reg>(bin.v).v || type_inv.get_type(m_inv, dst.type) == T_UNINIT) {
36,521✔
1422
                    // Only forget the destination type if we're copying from a different register,
1423
                    // or from the same uninitialized register.
1424
                    m_inv.havoc(dst.type);
36,518✔
1425
                }
1426
                type_inv.assign_type(m_inv, bin.dst, std::get<Reg>(bin.v));
36,520✔
1427
            }
1428
            break;
23,793✔
1429
        }
1430
    }
1431
    if (!bin.is64) {
149,608✔
1432
        m_inv->bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
58,686✔
1433
    }
1434
}
1435

1436
void EbpfTransformer::initialize_loop_counter(const Label& label) {
32✔
1437
    m_inv.assign(Variable::loop_counter(to_string(label)), 0);
48✔
1438
}
32✔
1439

1440
void EbpfTransformer::operator()(const IncrementLoopCounter& ins) {
164✔
1441
    if (m_inv.is_bottom()) {
164✔
UNCOV
1442
        return;
×
1443
    }
1444
    const auto counter = Variable::loop_counter(to_string(ins.name));
164✔
1445
    m_inv->add(counter, 1);
246✔
1446
}
1447

1448
void ebpf_domain_initialize_loop_counter(EbpfDomain& dom, const Label& label) {
32✔
1449
    EbpfTransformer{dom}.initialize_loop_counter(label);
32✔
1450
}
32✔
1451

1452
} // namespace prevail
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc