• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vbpf / ebpf-verifier / 12951530094

24 Jan 2025 02:26PM UTC coverage: 88.133% (-0.04%) from 88.169%
12951530094

push

github

web-flow
add support for 64bit immediate with type 2 (#820)

Add support for 64bit immediate with type 2

From the ISA RFC:
5.4.  64-bit immediate instructions

   Instructions with the IMM 'mode' modifier use the wide instruction
   encoding defined in Instruction encoding (Section 3), and use the
   'src_reg' field of the basic instruction to hold an opcode subtype.

   The following table defines a set of {IMM, DW, LD} instructions with
   opcode subtypes in the 'src_reg' field, using new terms such as "map"
   defined further below:

    +=========+================================+==========+==========+
    | src_reg | pseudocode                     | imm type | dst type |
    +=========+================================+==========+==========+
    | 0x0     | dst = (next_imm << 32) | imm   | integer  | integer  |
    +---------+--------------------------------+----------+----------+
    | 0x1     | dst = map_by_fd(imm)           | map fd   | map      |
    +---------+--------------------------------+----------+----------+
    | 0x2     | dst = map_val(map_by_fd(imm))  | map fd   | data     |
    |         | + next_imm                     |          | address  |
    +---------+--------------------------------+----------+----------+
    | 0x3     | dst = var_addr(imm)            | variable | data     |
    |         |                                | id       | address  |
    +---------+--------------------------------+----------+----------+
    | 0x4     | dst = code_addr(imm)           | integer  | code     |
    |         |                                |          | address  |
    +---------+--------------------------------+----------+----------+
    | 0x5     | dst = map_by_idx(imm)          | map      | map      |
    |         |                                | index    |          |
    +---------+--------------------------------+----------+----------+
    | 0x6     | dst = map_val(map_by_idx(imm)) | map      | data     |
    |         | + next_imm    ... (continued)

81 of 92 new or added lines in 12 files covered. (88.04%)

7 existing lines in 1 file now uncovered.

8533 of 9682 relevant lines covered (88.13%)

7382636.56 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.2
/src/crab/ebpf_transformer.cpp
1
// Copyright (c) Prevail Verifier contributors.
2
// SPDX-License-Identifier: MIT
3

4
// This file is eBPF-specific, not derived from CRAB.
5

6
#include <bitset>
7
#include <optional>
8
#include <utility>
9
#include <vector>
10

11
#include "boost/endian/conversion.hpp"
12

13
#include "asm_unmarshal.hpp"
14
#include "config.hpp"
15
#include "crab/array_domain.hpp"
16
#include "crab/ebpf_domain.hpp"
17
#include "crab_utils/num_safety.hpp"
18
#include "dsl_syntax.hpp"
19
#include "platform.hpp"
20
#include "string_constraints.hpp"
21

22
using crab::domains::NumAbsDomain;
23
namespace crab {
24

25
class ebpf_transformer final {
26
    ebpf_domain_t& dom;
27
    // shorthands:
28
    NumAbsDomain& m_inv;
29
    domains::array_domain_t& stack;
30
    TypeDomain& type_inv;
31

32
  public:
33
    explicit ebpf_transformer(ebpf_domain_t& dom)
247,816✔
34
        : dom(dom), m_inv(dom.m_inv), stack(dom.stack), type_inv(dom.type_inv) {}
247,816✔
35

36
    // abstract transformers
37
    void operator()(const Assume&);
38
    void operator()(const Atomic&);
39
    void operator()(const Bin&);
40
    void operator()(const Call&);
41
    void operator()(const CallLocal&);
42
    void operator()(const Callx&);
43
    void operator()(const Exit&);
44
    void operator()(const IncrementLoopCounter&);
45
    void operator()(const Jmp&) const;
46
    void operator()(const LoadMapFd&);
47
    void operator()(const LoadMapAddress&);
48
    void operator()(const Mem&);
49
    void operator()(const Packet&);
50
    void operator()(const Un&);
51
    void operator()(const Undefined&);
52

53
    void initialize_loop_counter(const label_t& label);
54

55
    static ebpf_domain_t setup_entry(bool init_r1);
56

57
  private:
58
    void assign(variable_t lhs, variable_t rhs);
59
    void assign(variable_t x, const linear_expression_t& e);
60
    void assign(variable_t x, int64_t e);
61

62
    void apply(arith_binop_t op, variable_t x, variable_t y, const number_t& z, int finite_width);
63
    void apply(arith_binop_t op, variable_t x, variable_t y, variable_t z, int finite_width);
64
    void apply(bitwise_binop_t op, variable_t x, variable_t y, variable_t z, int finite_width);
65
    void apply(bitwise_binop_t op, variable_t x, variable_t y, const number_t& k, int finite_width);
66
    void apply(binop_t op, variable_t x, variable_t y, const number_t& z, int finite_width);
67
    void apply(binop_t op, variable_t x, variable_t y, variable_t z, int finite_width);
68

69
    void add(const Reg& reg, int imm, int finite_width);
70
    void add(variable_t lhs, variable_t op2);
71
    void add(variable_t lhs, const number_t& op2);
72
    void sub(variable_t lhs, variable_t op2);
73
    void sub(variable_t lhs, const number_t& op2);
74
    void add_overflow(variable_t lhss, variable_t lhsu, variable_t op2, int finite_width);
75
    void add_overflow(variable_t lhss, variable_t lhsu, const number_t& op2, int finite_width);
76
    void sub_overflow(variable_t lhss, variable_t lhsu, variable_t op2, int finite_width);
77
    void sub_overflow(variable_t lhss, variable_t lhsu, const number_t& op2, int finite_width);
78
    void neg(variable_t lhss, variable_t lhsu, int finite_width);
79
    void mul(variable_t lhss, variable_t lhsu, variable_t op2, int finite_width);
80
    void mul(variable_t lhss, variable_t lhsu, const number_t& op2, int finite_width);
81
    void sdiv(variable_t lhss, variable_t lhsu, variable_t op2, int finite_width);
82
    void sdiv(variable_t lhss, variable_t lhsu, const number_t& op2, int finite_width);
83
    void udiv(variable_t lhss, variable_t lhsu, variable_t op2, int finite_width);
84
    void udiv(variable_t lhss, variable_t lhsu, const number_t& op2, int finite_width);
85
    void srem(variable_t lhss, variable_t lhsu, variable_t op2, int finite_width);
86
    void srem(variable_t lhss, variable_t lhsu, const number_t& op2, int finite_width);
87
    void urem(variable_t lhss, variable_t lhsu, variable_t op2, int finite_width);
88
    void urem(variable_t lhss, variable_t lhsu, const number_t& op2, int finite_width);
89

90
    void bitwise_and(variable_t lhss, variable_t lhsu, variable_t op2, int finite_width);
91
    void bitwise_and(variable_t lhss, variable_t lhsu, const number_t& op2);
92
    void bitwise_or(variable_t lhss, variable_t lhsu, variable_t op2, int finite_width);
93
    void bitwise_or(variable_t lhss, variable_t lhsu, const number_t& op2);
94
    void bitwise_xor(variable_t lhsss, variable_t lhsu, variable_t op2, int finite_width);
95
    void bitwise_xor(variable_t lhss, variable_t lhsu, const number_t& op2);
96
    void shl(const Reg& reg, int imm, int finite_width);
97
    void shl_overflow(variable_t lhss, variable_t lhsu, variable_t op2);
98
    void shl_overflow(variable_t lhss, variable_t lhsu, const number_t& op2);
99
    void lshr(const Reg& reg, int imm, int finite_width);
100
    void ashr(const Reg& dst_reg, const linear_expression_t& right_svalue, int finite_width);
101
    void sign_extend(const Reg& dst_reg, const linear_expression_t& right_svalue, int finite_width, Bin::Op op);
102

103
    void assume(const linear_constraint_t& cst);
104

105
    /// Forget everything we know about the value of a variable.
106
    void havoc(variable_t v);
107

108
    /// Forget everything about all offset variables for a given register.
109
    void havoc_offsets(const Reg& reg);
110

111
    void scratch_caller_saved_registers();
112
    void save_callee_saved_registers(const std::string& prefix);
113
    void restore_callee_saved_registers(const std::string& prefix);
114
    void havoc_subprogram_stack(const std::string& prefix);
115
    void forget_packet_pointers();
116
    void do_load_mapfd(const Reg& dst_reg, int mapfd, bool maybe_null);
117
    void do_load_map_address(const Reg& dst_reg, const int mapfd, int32_t offset);
118

119
    void assign_valid_ptr(const Reg& dst_reg, bool maybe_null);
120

121
    void recompute_stack_numeric_size(NumAbsDomain& inv, const Reg& reg) const;
122
    void recompute_stack_numeric_size(NumAbsDomain& inv, variable_t type_variable) const;
123
    void do_load_stack(NumAbsDomain& inv, const Reg& target_reg, const linear_expression_t& addr, int width,
124
                       const Reg& src_reg);
125
    void do_load_ctx(NumAbsDomain& inv, const Reg& target_reg, const linear_expression_t& addr_vague, int width);
126
    void do_load_packet_or_shared(NumAbsDomain& inv, const Reg& target_reg, const linear_expression_t& addr, int width);
127
    void do_load(const Mem& b, const Reg& target_reg);
128

129
    void do_store_stack(NumAbsDomain& inv, const linear_expression_t& addr, int width,
130
                        const linear_expression_t& val_type, const linear_expression_t& val_svalue,
131
                        const linear_expression_t& val_uvalue, const std::optional<reg_pack_t>& opt_val_reg);
132

133
    void do_mem_store(const Mem& b, const linear_expression_t& val_type, const linear_expression_t& val_svalue,
134
                      const linear_expression_t& val_uvalue, const std::optional<reg_pack_t>& opt_val_reg);
135
}; // end ebpf_domain_t
136

137
void ebpf_domain_transform(ebpf_domain_t& inv, const Instruction& ins) {
249,842✔
138
    if (inv.is_bottom()) {
249,842✔
139
        return;
1,029✔
140
    }
141
    std::visit(ebpf_transformer{inv}, ins);
247,784✔
142
    if (inv.is_bottom() && !std::holds_alternative<Assume>(ins)) {
247,782✔
143
        // Fail. raise an exception to stop the analysis.
144
        std::stringstream msg;
36✔
145
        msg << "Bug! pre-invariant:\n"
36✔
146
            << inv << "\n followed by instruction: " << ins << "\n"
36✔
147
            << "leads to bottom";
36✔
148
        throw std::logic_error(msg.str());
54✔
149
    }
36✔
150
}
151

152
/** Linear constraint for a pointer comparison.
153
 */
154
static linear_constraint_t assume_cst_offsets_reg(const Condition::Op op, const variable_t dst_offset,
1,620✔
155
                                                  const variable_t src_offset) {
156
    using namespace crab::dsl_syntax;
810✔
157
    using Op = Condition::Op;
810✔
158
    switch (op) {
1,620✔
159
    case Op::EQ: return eq(dst_offset, src_offset);
6✔
160
    case Op::NE: return neq(dst_offset, src_offset);
12✔
161
    case Op::GE: return dst_offset >= src_offset;
6✔
162
    case Op::SGE: return dst_offset >= src_offset; // pointer comparison is unsigned
×
163
    case Op::LE: return dst_offset <= src_offset;
796✔
164
    case Op::SLE: return dst_offset <= src_offset; // pointer comparison is unsigned
×
165
    case Op::GT: return dst_offset > src_offset;
1,194✔
166
    case Op::SGT: return dst_offset > src_offset; // pointer comparison is unsigned
×
167
    case Op::SLT: return src_offset > dst_offset;
×
168
    // Note: reverse the test as a workaround strange lookup:
169
    case Op::LT: return src_offset > dst_offset; // FIX unsigned
9✔
170
    default: return dst_offset - dst_offset == 0;
×
171
    }
172
}
173

174
static std::vector<linear_constraint_t> assume_bit_cst_interval(const NumAbsDomain& inv, Condition::Op op, bool is64,
52✔
175
                                                                variable_t dst_uvalue, interval_t src_interval) {
176
    using namespace crab::dsl_syntax;
26✔
177
    using Op = Condition::Op;
26✔
178

179
    auto dst_interval = inv.eval_interval(dst_uvalue);
52✔
180
    std::optional<number_t> dst_n = dst_interval.singleton();
52✔
181
    if (!dst_n || !dst_n.value().fits_cast_to<int64_t>()) {
52✔
182
        return {};
8✔
183
    }
184

185
    std::optional<number_t> src_n = src_interval.singleton();
44✔
186
    if (!src_n || !src_n->fits_cast_to<int64_t>()) {
44✔
187
        return {};
×
188
    }
189
    uint64_t src_int_value = src_n.value().cast_to<uint64_t>();
44✔
190
    if (!is64) {
44✔
191
        src_int_value = gsl::narrow_cast<uint32_t>(src_int_value);
20✔
192
    }
193

194
    bool result;
22✔
195
    switch (op) {
44✔
196
    case Op::SET: result = (dst_n.value().cast_to<uint64_t>() & src_int_value) != 0; break;
22✔
197
    case Op::NSET: result = (dst_n.value().cast_to<uint64_t>() & src_int_value) == 0; break;
22✔
198
    default: throw std::exception();
×
199
    }
200

201
    return {result ? linear_constraint_t::true_const() : linear_constraint_t::false_const()};
132✔
202
}
144✔
203

204
static std::vector<linear_constraint_t> assume_signed_64bit_eq(const NumAbsDomain& inv, const variable_t left_svalue,
8,350✔
205
                                                               const variable_t left_uvalue,
206
                                                               const interval_t& right_interval,
207
                                                               const linear_expression_t& right_svalue,
208
                                                               const linear_expression_t& right_uvalue) {
209
    using namespace crab::dsl_syntax;
4,175✔
210
    if (right_interval <= interval_t::nonnegative(64) && !right_interval.is_singleton()) {
16,696✔
211
        return {(left_svalue == right_svalue), (left_uvalue == right_uvalue), eq(left_svalue, left_uvalue)};
×
212
    } else {
213
        return {(left_svalue == right_svalue), (left_uvalue == right_uvalue)};
29,225✔
214
    }
215
}
16,700✔
216

217
static std::vector<linear_constraint_t> assume_signed_32bit_eq(const NumAbsDomain& inv, const variable_t left_svalue,
4,124✔
218
                                                               const variable_t left_uvalue,
219
                                                               const interval_t& right_interval) {
220
    using namespace crab::dsl_syntax;
2,062✔
221

222
    if (const auto rn = right_interval.singleton()) {
4,124✔
223
        const auto left_svalue_interval = inv.eval_interval(left_svalue);
3,962✔
224
        if (auto size = left_svalue_interval.finite_size()) {
3,962✔
225
            // Find the lowest 64-bit svalue whose low 32 bits match the singleton.
226

227
            // Get lower bound as a 64-bit value.
228
            int64_t lb = left_svalue_interval.lb().number()->cast_to<int64_t>();
5,656✔
229

230
            // Use the high 32-bits from the left lower bound and the low 32-bits from the right singleton.
231
            // The result might be lower than the lower bound.
232
            const int64_t lb_match = (lb & 0xFFFFFFFF00000000) | (rn->cast_to<int64_t>() & 0xFFFFFFFF);
2,828✔
233
            if (lb_match < lb) {
2,828✔
234
                // The result is lower than the left interval, so try the next higher matching 64-bit value.
235
                // It's ok if this goes higher than the left upper bound.
236
                lb += 0x100000000;
1,546✔
237
            }
238

239
            // Find the highest 64-bit svalue whose low 32 bits match the singleton.
240

241
            // Get upper bound as a 64-bit value.
242
            const int64_t ub = left_svalue_interval.ub().number()->cast_to<int64_t>();
5,656✔
243

244
            // Use the high 32-bits from the left upper bound and the low 32-bits from the right singleton.
245
            // The result might be higher than the upper bound.
246
            const int64_t ub_match = (ub & 0xFFFFFFFF00000000) | (rn->cast_to<int64_t>() & 0xFFFFFFFF);
2,828✔
247
            if (ub_match > ub) {
2,828✔
248
                // The result is higher than the left interval, so try the next lower matching 64-bit value.
249
                // It's ok if this goes lower than the left lower bound.
250
                lb -= 0x100000000;
1,429✔
251
            }
252

253
            if (to_unsigned(lb_match) <= to_unsigned(ub_match)) {
2,828✔
254
                // The interval is also valid when cast to a uvalue, meaning
255
                // both bounds are positive or both are negative.
256
                return {left_svalue >= lb_match, left_svalue <= ub_match, left_uvalue >= to_unsigned(lb_match),
1,289✔
257
                        left_uvalue <= to_unsigned(ub_match)};
14,179✔
258
            } else {
259
                // The interval can only be represented as an svalue.
260
                return {left_svalue >= lb_match, left_svalue <= ub_match};
875✔
261
            }
262
        }
3,962✔
263
    }
6,024✔
264
    return {};
1,296✔
265
}
8,359✔
266

267
// Given left and right values, get the left and right intervals, and also split
268
// the left interval into separate negative and positive intervals.
269
static void get_signed_intervals(const NumAbsDomain& inv, bool is64, const variable_t left_svalue,
15,536✔
270
                                 const variable_t left_uvalue, const linear_expression_t& right_svalue,
271
                                 interval_t& left_interval, interval_t& right_interval,
272
                                 interval_t& left_interval_positive, interval_t& left_interval_negative) {
273
    using crab::interval_t;
7,768✔
274
    using namespace crab::dsl_syntax;
7,768✔
275

276
    // Get intervals as 32-bit or 64-bit as appropriate.
277
    left_interval = inv.eval_interval(left_svalue);
23,304✔
278
    right_interval = inv.eval_interval(right_svalue);
15,536✔
279
    if (!is64) {
15,536✔
280
        if ((left_interval <= interval_t::nonnegative(32) && right_interval <= interval_t::nonnegative(32)) ||
16,880✔
281
            (left_interval <= interval_t::negative(32) && right_interval <= interval_t::negative(32))) {
10,274✔
282
            is64 = true;
8,567✔
283
            // fallthrough as 64bit, including deduction of relational information
284
        } else {
285
            left_interval = left_interval.truncate_to<int32_t>();
3,448✔
286
            right_interval = right_interval.truncate_to<int32_t>();
5,172✔
287
            // continue as 32bit
288
        }
289
    }
290

291
    if (!left_interval.is_top()) {
15,536✔
292
        left_interval_positive = left_interval & interval_t::nonnegative(64);
20,652✔
293
        left_interval_negative = left_interval & interval_t::negative(64);
27,536✔
294
    } else {
295
        left_interval = inv.eval_interval(left_uvalue);
2,652✔
296
        if (!left_interval.is_top()) {
1,768✔
297
            // The interval is TOP as a signed interval but is represented precisely as an unsigned interval,
298
            // so split into two signed intervals that can be treated separately.
299
            left_interval_positive = left_interval & interval_t::nonnegative(64);
318✔
300
            const number_t lih_ub =
106✔
301
                left_interval.ub().number() ? left_interval.ub().number()->truncate_to<int64_t>() : -1;
568✔
302
            left_interval_negative = interval_t{std::numeric_limits<int64_t>::min(), lih_ub};
424✔
303
        } else {
212✔
304
            left_interval_positive = interval_t::nonnegative(64);
1,556✔
305
            left_interval_negative = interval_t::negative(64);
2,334✔
306
        }
307
    }
308

309
    left_interval = left_interval.truncate_to<int64_t>();
15,536✔
310
    right_interval = right_interval.truncate_to<int64_t>();
15,536✔
311
}
15,536✔
312

313
// Given left and right values, get the left and right intervals, and also split
314
// the left interval into separate low and high intervals.
315
static void get_unsigned_intervals(const NumAbsDomain& inv, bool is64, const variable_t left_svalue,
15,634✔
316
                                   const variable_t left_uvalue, const linear_expression_t& right_uvalue,
317
                                   interval_t& left_interval, interval_t& right_interval, interval_t& left_interval_low,
318
                                   interval_t& left_interval_high) {
319
    using crab::interval_t;
7,817✔
320
    using namespace crab::dsl_syntax;
7,817✔
321

322
    // Get intervals as 32-bit or 64-bit as appropriate.
323
    left_interval = inv.eval_interval(left_uvalue);
23,451✔
324
    right_interval = inv.eval_interval(right_uvalue);
15,634✔
325
    if (!is64) {
15,634✔
326
        if ((left_interval <= interval_t::nonnegative(32) && right_interval <= interval_t::nonnegative(32)) ||
15,451✔
327
            (left_interval <= interval_t::unsigned_high(32) && right_interval <= interval_t::unsigned_high(32))) {
9,038✔
328
            is64 = true;
8,692✔
329
            // fallthrough as 64bit, including deduction of relational information
330
        } else {
331
            left_interval = left_interval.truncate_to<uint32_t>();
2,914✔
332
            right_interval = right_interval.truncate_to<uint32_t>();
4,371✔
333
            // continue as 32bit
334
        }
335
    }
336

337
    if (!left_interval.is_top()) {
15,634✔
338
        left_interval_low = left_interval & interval_t::nonnegative(64);
20,358✔
339
        left_interval_high = left_interval & interval_t::unsigned_high(64);
27,144✔
340
    } else {
341
        left_interval = inv.eval_interval(left_svalue);
3,093✔
342
        if (!left_interval.is_top()) {
2,062✔
343
            // The interval is TOP as an unsigned interval but is represented precisely as a signed interval,
344
            // so split into two unsigned intervals that can be treated separately.
345
            left_interval_low = interval_t(0, left_interval.ub()).truncate_to<uint64_t>();
272✔
346
            left_interval_high = interval_t(left_interval.lb(), -1).truncate_to<uint64_t>();
340✔
347
        } else {
348
            left_interval_low = interval_t::nonnegative(64);
1,926✔
349
            left_interval_high = interval_t::unsigned_high(64);
2,889✔
350
        }
351
    }
352

353
    left_interval = left_interval.truncate_to<uint64_t>();
15,634✔
354
    right_interval = right_interval.truncate_to<uint64_t>();
15,634✔
355
}
15,634✔
356

357
static std::vector<linear_constraint_t>
358
assume_signed_64bit_lt(const bool strict, const variable_t left_svalue, const variable_t left_uvalue,
756✔
359
                       const interval_t& left_interval_positive, const interval_t& left_interval_negative,
360
                       const linear_expression_t& right_svalue, const linear_expression_t& right_uvalue,
361
                       const interval_t& right_interval) {
362
    using crab::interval_t;
378✔
363
    using namespace crab::dsl_syntax;
378✔
364

365
    if (right_interval <= interval_t::negative(64)) {
1,134✔
366
        // Interval can be represented as both an svalue and a uvalue since it fits in [INT_MIN, -1].
367
        return {strict ? left_svalue < right_svalue : left_svalue <= right_svalue, 0 <= left_uvalue,
102✔
368
                strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue};
510✔
369
    } else if ((left_interval_negative | left_interval_positive) <= interval_t::nonnegative(64) &&
1,683✔
370
               right_interval <= interval_t::nonnegative(64)) {
750✔
371
        // Interval can be represented as both an svalue and a uvalue since it fits in [0, INT_MAX].
372
        return {strict ? left_svalue < right_svalue : left_svalue <= right_svalue, 0 <= left_uvalue,
96✔
373
                strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue};
480✔
374
    } else {
375
        // Interval can only be represented as an svalue.
376
        return {strict ? left_svalue < right_svalue : left_svalue <= right_svalue};
1,674✔
377
    }
378
}
1,710✔
379

380
static std::vector<linear_constraint_t>
381
assume_signed_32bit_lt(const NumAbsDomain& inv, const bool strict, const variable_t left_svalue,
376✔
382
                       const variable_t left_uvalue, const interval_t& left_interval_positive,
383
                       const interval_t& left_interval_negative, const linear_expression_t& right_svalue,
384
                       const linear_expression_t& right_uvalue, const interval_t& right_interval) {
385
    using crab::interval_t;
188✔
386
    using namespace crab::dsl_syntax;
188✔
387

388
    if (right_interval <= interval_t::negative(32)) {
564✔
389
        // Interval can be represented as both an svalue and a uvalue since it fits in [INT_MIN, -1],
390
        // aka [INT_MAX+1, UINT_MAX].
391
        return {std::numeric_limits<int32_t>::max() < left_uvalue,
17✔
392
                strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue,
34✔
393
                strict ? left_svalue < right_svalue : left_svalue <= right_svalue};
170✔
394
    } else if ((left_interval_negative | left_interval_positive) <= interval_t::nonnegative(32) &&
870✔
395
               right_interval <= interval_t::nonnegative(32)) {
372✔
396
        // Interval can be represented as both an svalue and a uvalue since it fits in [0, INT_MAX]
397
        const auto lpub = left_interval_positive.truncate_to<int32_t>().ub();
45✔
398
        return {left_svalue >= 0,
15✔
399
                strict ? left_svalue < right_svalue : left_svalue <= right_svalue,
30✔
400
                left_svalue <= left_uvalue,
401
                left_svalue >= left_uvalue,
402
                left_uvalue >= 0,
403
                strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue,
30✔
404
                left_uvalue <= *lpub.number()};
285✔
405
    } else if (inv.eval_interval(left_svalue) <= interval_t::signed_int(32) &&
933✔
406
               inv.eval_interval(right_svalue) <= interval_t::signed_int(32)) {
726✔
407
        // Interval can only be represented as an svalue.
408
        return {strict ? left_svalue < right_svalue : left_svalue <= right_svalue};
828✔
409
    } else {
410
        // We can't directly compare the svalues since they may differ in high order bits.
411
        return {};
36✔
412
    }
413
}
806✔
414

415
static std::vector<linear_constraint_t>
416
assume_signed_64bit_gt(const bool strict, const variable_t left_svalue, const variable_t left_uvalue,
772✔
417
                       const interval_t& left_interval_positive, const interval_t& left_interval_negative,
418
                       const linear_expression_t& right_svalue, const linear_expression_t& right_uvalue,
419
                       const interval_t& right_interval) {
420
    using crab::interval_t;
386✔
421
    using namespace crab::dsl_syntax;
386✔
422

423
    if (right_interval <= interval_t::nonnegative(64)) {
1,158✔
424
        // Interval can be represented as both an svalue and a uvalue since it fits in [0, INT_MAX].
425
        const auto lpub = left_interval_positive.truncate_to<int64_t>().ub();
984✔
426
        return {left_svalue >= 0,
328✔
427
                strict ? left_svalue > right_svalue : left_svalue >= right_svalue,
656✔
428
                left_svalue <= left_uvalue,
429
                left_svalue >= left_uvalue,
430
                left_uvalue >= 0,
431
                strict ? left_uvalue > right_uvalue : left_uvalue >= right_uvalue,
656✔
432
                left_uvalue <= *lpub.number()};
6,232✔
433
    } else if ((left_interval_negative | left_interval_positive) <= interval_t::negative(64) &&
619✔
434
               right_interval <= interval_t::negative(64)) {
118✔
435
        // Interval can be represented as both an svalue and a uvalue since it fits in [INT_MIN, -1],
436
        // aka [INT_MAX+1, UINT_MAX].
437
        return {std::numeric_limits<int64_t>::max() < left_uvalue,
1✔
438
                strict ? left_uvalue > right_uvalue : left_uvalue >= right_uvalue,
2✔
439
                strict ? left_svalue > right_svalue : left_svalue >= right_svalue};
9✔
440
    } else {
441
        // Interval can only be represented as an svalue.
442
        return {strict ? left_svalue > right_svalue : left_svalue >= right_svalue};
342✔
443
    }
444
}
3,185✔
445

446
static std::vector<linear_constraint_t>
447
assume_signed_32bit_gt(const NumAbsDomain& inv, const bool strict, const variable_t left_svalue,
378✔
448
                       const variable_t left_uvalue, const interval_t& left_interval_positive,
449
                       const interval_t& left_interval_negative, const linear_expression_t& right_svalue,
450
                       const linear_expression_t& right_uvalue, const interval_t& right_interval) {
451
    using crab::interval_t;
189✔
452
    using namespace crab::dsl_syntax;
189✔
453

454
    if (right_interval <= interval_t::nonnegative(32)) {
567✔
455
        // Interval can be represented as both an svalue and a uvalue since it fits in [0, INT_MAX].
456
        const auto lpub = left_interval_positive.truncate_to<int32_t>().ub();
513✔
457
        return {left_svalue >= 0,
171✔
458
                strict ? left_svalue > right_svalue : left_svalue >= right_svalue,
342✔
459
                left_svalue <= left_uvalue,
460
                left_svalue >= left_uvalue,
461
                left_uvalue >= 0,
462
                strict ? left_uvalue > right_uvalue : left_uvalue >= right_uvalue,
342✔
463
                left_uvalue <= *lpub.number()};
3,249✔
464
    } else if ((left_interval_negative | left_interval_positive) <= interval_t::negative(32) &&
262✔
465
               right_interval <= interval_t::negative(32)) {
38✔
466
        // Interval can be represented as both an svalue and a uvalue since it fits in [INT_MIN, -1],
467
        // aka [INT_MAX+1, UINT_MAX].
468
        return {left_uvalue >= number_t{std::numeric_limits<int32_t>::max()} + 1,
4✔
469
                strict ? left_uvalue > right_uvalue : left_uvalue >= right_uvalue,
2✔
470
                strict ? left_svalue > right_svalue : left_svalue >= right_svalue};
9✔
471
    } else if (inv.eval_interval(left_svalue) <= interval_t::signed_int(32) &&
92✔
472
               inv.eval_interval(right_svalue) <= interval_t::signed_int(32)) {
55✔
473
        // Interval can only be represented as an svalue.
474
        return {strict ? left_svalue > right_svalue : left_svalue >= right_svalue};
42✔
475
    } else {
476
        // We can't directly compare the svalues since they may differ in high order bits.
477
        return {};
20✔
478
    }
479
}
1,575✔
480

481
static std::vector<linear_constraint_t> assume_signed_cst_interval(const NumAbsDomain& inv, Condition::Op op, bool is64,
14,940✔
482
                                                                   variable_t left_svalue, variable_t left_uvalue,
483
                                                                   const linear_expression_t& right_svalue,
484
                                                                   const linear_expression_t& right_uvalue) {
485
    using crab::interval_t;
7,470✔
486
    using namespace crab::dsl_syntax;
7,470✔
487

488
    interval_t left_interval = interval_t::bottom();
14,940✔
489
    interval_t right_interval = interval_t::bottom();
14,940✔
490
    interval_t left_interval_positive = interval_t::bottom();
14,940✔
491
    interval_t left_interval_negative = interval_t::bottom();
14,940✔
492
    get_signed_intervals(inv, is64, left_svalue, left_uvalue, right_svalue, left_interval, right_interval,
14,940✔
493
                         left_interval_positive, left_interval_negative);
494

495
    if (op == Condition::Op::EQ) {
14,940✔
496
        // Handle svalue == right.
497
        if (is64) {
12,474✔
498
            return assume_signed_64bit_eq(inv, left_svalue, left_uvalue, right_interval, right_svalue, right_uvalue);
8,350✔
499
        } else {
500
            return assume_signed_32bit_eq(inv, left_svalue, left_uvalue, right_interval);
4,124✔
501
        }
502
    }
503

504
    const bool is_lt = op == Condition::Op::SLT || op == Condition::Op::SLE;
2,466✔
505
    bool strict = op == Condition::Op::SLT || op == Condition::Op::SGT;
2,466✔
506

507
    auto llb = left_interval.lb();
2,466✔
508
    auto lub = left_interval.ub();
2,466✔
509
    auto rlb = right_interval.lb();
2,466✔
510
    auto rub = right_interval.ub();
2,466✔
511
    if (!is_lt && (strict ? lub <= rlb : lub < rlb)) {
2,889✔
512
        // Left signed interval is lower than right signed interval.
513
        return {linear_constraint_t::false_const()};
125✔
514
    } else if (is_lt && (strict ? llb >= rub : llb > rub)) {
2,597✔
515
        // Left signed interval is higher than right signed interval.
516
        return {linear_constraint_t::false_const()};
95✔
517
    }
518
    if (is_lt && (strict ? lub < rlb : lub <= rlb)) {
2,798✔
519
        // Left signed interval is lower than right signed interval.
520
        return {linear_constraint_t::true_const()};
145✔
521
    } else if (!is_lt && (strict ? llb > rub : llb >= rub)) {
2,501✔
522
        // Left signed interval is higher than right signed interval.
523
        return {linear_constraint_t::true_const()};
95✔
524
    }
525

526
    if (is64) {
2,282✔
527
        if (is_lt) {
1,528✔
528
            return assume_signed_64bit_lt(strict, left_svalue, left_uvalue, left_interval_positive,
378✔
529
                                          left_interval_negative, right_svalue, right_uvalue, right_interval);
756✔
530
        } else {
531
            return assume_signed_64bit_gt(strict, left_svalue, left_uvalue, left_interval_positive,
386✔
532
                                          left_interval_negative, right_svalue, right_uvalue, right_interval);
772✔
533
        }
534
    } else {
535
        // 32-bit compare.
536
        if (is_lt) {
754✔
537
            return assume_signed_32bit_lt(inv, strict, left_svalue, left_uvalue, left_interval_positive,
188✔
538
                                          left_interval_negative, right_svalue, right_uvalue, right_interval);
376✔
539
        } else {
540
            return assume_signed_32bit_gt(inv, strict, left_svalue, left_uvalue, left_interval_positive,
189✔
541
                                          left_interval_negative, right_svalue, right_uvalue, right_interval);
378✔
542
        }
543
    }
544
    return {};
545
}
38,767✔
546

547
static std::vector<linear_constraint_t>
548
assume_unsigned_64bit_lt(const NumAbsDomain& inv, bool strict, variable_t left_svalue, variable_t left_uvalue,
1,364✔
549
                         const interval_t& left_interval_low, const interval_t& left_interval_high,
550
                         const linear_expression_t& right_svalue, const linear_expression_t& right_uvalue,
551
                         const interval_t& right_interval) {
552
    using crab::interval_t;
682✔
553
    using namespace crab::dsl_syntax;
682✔
554

555
    auto rub = right_interval.ub();
1,364✔
556
    auto lllb = left_interval_low.truncate_to<uint64_t>().lb();
2,046✔
557
    if (right_interval <= interval_t::nonnegative(64) && (strict ? lllb >= rub : lllb > rub)) {
2,348✔
558
        // The high interval is out of range.
559
        if (auto lsubn = inv.eval_interval(left_svalue).ub().number()) {
×
560
            return {left_uvalue >= 0, (strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue),
×
561
                    left_uvalue <= *lsubn, left_svalue >= 0};
×
562
        } else {
563
            return {left_uvalue >= 0, (strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue),
×
564
                    left_svalue >= 0};
×
565
        }
×
566
    }
567
    auto lhlb = left_interval_high.truncate_to<uint64_t>().lb();
2,046✔
568
    if (right_interval <= interval_t::unsigned_high(64) && (strict ? lhlb >= rub : lhlb > rub)) {
2,047✔
569
        // The high interval is out of range.
570
        if (auto lsubn = inv.eval_interval(left_svalue).ub().number()) {
10✔
571
            return {left_uvalue >= 0, (strict ? left_uvalue < *rub.number() : left_uvalue <= *rub.number()),
6✔
572
                    left_uvalue <= *lsubn, left_svalue >= 0};
24✔
573
        } else {
574
            return {left_uvalue >= 0, (strict ? left_uvalue < *rub.number() : left_uvalue <= *rub.number()),
×
575
                    left_svalue >= 0};
×
576
        }
4✔
577
    }
578
    if (right_interval <= interval_t::signed_int(64)) {
2,040✔
579
        // Interval can be represented as both an svalue and a uvalue since it fits in [0, INT_MAX].
580
        auto llub = left_interval_low.truncate_to<uint64_t>().ub();
1,677✔
581
        return {0 <= left_uvalue, strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue,
1,118✔
582
                left_uvalue <= *llub.number(), 0 <= left_svalue,
1,118✔
583
                strict ? left_svalue < right_svalue : left_svalue <= right_svalue};
7,826✔
584
    } else if (left_interval_low.is_bottom() && right_interval <= interval_t::unsigned_high(64)) {
803✔
585
        // Interval can be represented as both an svalue and a uvalue since it fits in [INT_MAX+1, UINT_MAX].
586
        return {0 <= left_uvalue, strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue,
2✔
587
                strict ? left_svalue < right_svalue : left_svalue <= right_svalue};
10✔
588
    } else if ((left_interval_low | left_interval_high) == interval_t::unsigned_int(64)) {
480✔
589
        // Interval can only be represented as a uvalue, and was TOP before.
590
        return {strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue};
48✔
591
    } else {
592
        // Interval can only be represented as a uvalue.
593
        return {0 <= left_uvalue, strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue};
896✔
594
    }
595
}
8,250✔
596

597
static std::vector<linear_constraint_t> assume_unsigned_32bit_lt(const NumAbsDomain& inv, const bool strict,
340✔
598
                                                                 const variable_t left_svalue,
599
                                                                 const variable_t left_uvalue,
600
                                                                 const linear_expression_t& right_svalue,
601
                                                                 const linear_expression_t& right_uvalue) {
602
    using crab::interval_t;
170✔
603
    using namespace crab::dsl_syntax;
170✔
604

605
    if (inv.eval_interval(left_uvalue) <= interval_t::nonnegative(32) &&
984✔
606
        inv.eval_interval(right_uvalue) <= interval_t::nonnegative(32)) {
742✔
607
        // Interval can be represented as both an svalue and a uvalue since it fits in [0, INT32_MAX].
608
        return {0 <= left_uvalue, strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue,
266✔
609
                strict ? left_svalue < right_svalue : left_svalue <= right_svalue};
1,330✔
610
    } else if (inv.eval_interval(left_svalue) <= interval_t::negative(32) &&
186✔
611
               inv.eval_interval(right_svalue) <= interval_t::negative(32)) {
77✔
612
        // Interval can be represented as both an svalue and a uvalue since it fits in [INT32_MIN, -1].
613
        return {0 <= left_uvalue, strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue,
2✔
614
                strict ? left_svalue < right_svalue : left_svalue <= right_svalue};
10✔
615
    } else if (inv.eval_interval(left_uvalue) <= interval_t::unsigned_int(32) &&
214✔
616
               inv.eval_interval(right_uvalue) <= interval_t::unsigned_int(32)) {
174✔
617
        // Interval can only be represented as a uvalue.
618
        return {0 <= left_uvalue, strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue};
256✔
619
    } else {
620
        // We can't directly compare the uvalues since they may differ in high order bits.
621
        return {};
8✔
622
    }
623
}
1,130✔
624

625
static std::vector<linear_constraint_t>
626
assume_unsigned_64bit_gt(const NumAbsDomain& inv, const bool strict, const variable_t left_svalue,
6,370✔
627
                         const variable_t left_uvalue, const interval_t& left_interval_low,
628
                         const interval_t& left_interval_high, const linear_expression_t& right_svalue,
629
                         const linear_expression_t& right_uvalue, const interval_t& right_interval) {
630
    using crab::interval_t;
3,185✔
631
    using namespace crab::dsl_syntax;
3,185✔
632

633
    const auto rlb = right_interval.lb();
6,370✔
634
    const auto llub = left_interval_low.truncate_to<uint64_t>().ub();
9,555✔
635
    const auto lhlb = left_interval_high.truncate_to<uint64_t>().lb();
9,555✔
636

637
    if (right_interval <= interval_t::nonnegative(64) && (strict ? llub <= rlb : llub < rlb)) {
9,685✔
638
        // The low interval is out of range.
639
        return {strict ? left_uvalue > right_uvalue : left_uvalue >= right_uvalue,
16✔
640
                *lhlb.number() == std::numeric_limits<uint64_t>::max() ? left_uvalue == *lhlb.number()
46✔
641
                                                                       : left_uvalue >= *lhlb.number(),
6✔
642
                left_svalue < 0};
72✔
643
    } else if (right_interval <= interval_t::unsigned_high(64)) {
9,531✔
644
        // Interval can be represented as both an svalue and a uvalue since it fits in [INT_MAX+1, UINT_MAX].
645
        return {0 <= left_uvalue, strict ? left_uvalue > right_uvalue : left_uvalue >= right_uvalue,
4✔
646
                strict ? left_svalue > right_svalue : left_svalue >= right_svalue};
18✔
647
    } else if ((left_interval_low | left_interval_high) <= interval_t::nonnegative(64) &&
18,601✔
648
               right_interval <= interval_t::nonnegative(64)) {
11,802✔
649
        // Interval can be represented as both an svalue and a uvalue since it fits in [0, INT_MAX].
650
        return {0 <= left_uvalue, strict ? left_uvalue > right_uvalue : left_uvalue >= right_uvalue,
5,230✔
651
                strict ? left_svalue > right_svalue : left_svalue >= right_svalue};
23,535✔
652
    } else {
653
        // Interval can only be represented as a uvalue.
654
        return {0 <= left_uvalue, strict ? left_uvalue > right_uvalue : left_uvalue >= right_uvalue};
4,480✔
655
    }
656
}
22,331✔
657

658
static std::vector<linear_constraint_t>
659
assume_unsigned_32bit_gt(const NumAbsDomain& inv, const bool strict, const variable_t left_svalue,
3,202✔
660
                         const variable_t left_uvalue, const interval_t& left_interval_low,
661
                         const interval_t& left_interval_high, const linear_expression_t& right_svalue,
662
                         const linear_expression_t& right_uvalue, const interval_t& right_interval) {
663
    using crab::interval_t;
1,601✔
664
    using namespace crab::dsl_syntax;
1,601✔
665

666
    if (right_interval <= interval_t::unsigned_high(32)) {
4,803✔
667
        // Interval can be represented as both an svalue and a uvalue since it fits in [INT_MAX+1, UINT_MAX].
668
        return {0 <= left_uvalue, strict ? left_uvalue > right_uvalue : left_uvalue >= right_uvalue,
4✔
669
                strict ? left_svalue > right_svalue : left_svalue >= right_svalue};
18✔
670
    } else if (inv.eval_interval(left_uvalue) <= interval_t::unsigned_int(32) &&
8,961✔
671
               inv.eval_interval(right_uvalue) <= interval_t::unsigned_int(32)) {
6,096✔
672
        // Interval can only be represented as a uvalue.
673
        return {0 <= left_uvalue, strict ? left_uvalue > right_uvalue : left_uvalue >= right_uvalue};
7,712✔
674
    } else {
675
        // We can't directly compare the uvalues since they may differ in high order bits.
676
        return {};
1,270✔
677
    };
678
}
5,796✔
679

680
static std::vector<linear_constraint_t> assume_unsigned_cst_interval(const NumAbsDomain& inv, Condition::Op op,
15,634✔
681
                                                                     bool is64, variable_t left_svalue,
682
                                                                     variable_t left_uvalue,
683
                                                                     const linear_expression_t& right_svalue,
684
                                                                     const linear_expression_t& right_uvalue) {
685
    using crab::interval_t;
7,817✔
686
    using namespace crab::dsl_syntax;
7,817✔
687

688
    interval_t left_interval = interval_t::bottom();
15,634✔
689
    interval_t right_interval = interval_t::bottom();
15,634✔
690
    interval_t left_interval_low = interval_t::bottom();
15,634✔
691
    interval_t left_interval_high = interval_t::bottom();
15,634✔
692
    get_unsigned_intervals(inv, is64, left_svalue, left_uvalue, right_uvalue, left_interval, right_interval,
15,634✔
693
                           left_interval_low, left_interval_high);
694

695
    // Handle uvalue != right.
696
    if (op == Condition::Op::NE) {
15,634✔
697
        if (auto rn = right_interval.singleton()) {
12,512✔
698
            if (rn == left_interval.truncate_to_uint(is64 ? 64 : 32).lb().number()) {
32,853✔
699
                // "NE lower bound" is equivalent to "GT lower bound".
700
                op = Condition::Op::GT;
8,484✔
701
                right_interval = interval_t{left_interval.lb()};
16,968✔
702
            } else if (rn == left_interval.ub().number()) {
7,732✔
703
                // "NE upper bound" is equivalent to "LT upper bound".
704
                op = Condition::Op::LT;
402✔
705
                right_interval = interval_t{left_interval.ub()};
804✔
706
            } else {
707
                return {};
3,464✔
708
            }
709
        } else {
710
            return {};
162✔
711
        }
12,512✔
712
    }
713

714
    const bool is_lt = op == Condition::Op::LT || op == Condition::Op::LE;
12,008✔
715
    bool strict = op == Condition::Op::LT || op == Condition::Op::GT;
12,008✔
716

717
    auto [llb, lub] = left_interval.pair();
18,012✔
718
    auto [rlb, rub] = right_interval.pair();
18,012✔
719
    if (is_lt ? (strict ? llb >= rub : llb > rub) : (strict ? lub <= rlb : lub < rlb)) {
12,725✔
720
        // Left unsigned interval is lower than right unsigned interval.
721
        return {linear_constraint_t::false_const()};
1,210✔
722
    }
723
    if (is_lt && (strict ? lub < rlb : lub <= rlb)) {
11,938✔
724
        // Left unsigned interval is lower than right unsigned interval. We still add a
725
        // relationship for use when widening, such as is used in the prime conformance test.
726
        if (is64) {
114✔
727
            return {strict ? left_uvalue < right_uvalue : left_uvalue <= right_uvalue};
246✔
728
        }
729
        return {};
32✔
730
    } else if (!is_lt && (strict ? llb > rub : llb >= rub)) {
16,070✔
731
        // Left unsigned interval is higher than right unsigned interval. We still add a
732
        // relationship for use when widening, such as is used in the prime conformance test.
733
        if (is64) {
134✔
734
            return {strict ? left_uvalue > right_uvalue : left_uvalue >= right_uvalue};
276✔
735
        } else {
736
            return {};
42✔
737
        }
738
    }
739

740
    if (is64) {
11,276✔
741
        if (is_lt) {
7,734✔
742
            return assume_unsigned_64bit_lt(inv, strict, left_svalue, left_uvalue, left_interval_low,
682✔
743
                                            left_interval_high, right_svalue, right_uvalue, right_interval);
1,364✔
744
        } else {
745
            return assume_unsigned_64bit_gt(inv, strict, left_svalue, left_uvalue, left_interval_low,
3,185✔
746
                                            left_interval_high, right_svalue, right_uvalue, right_interval);
6,370✔
747
        }
748
    } else {
749
        if (is_lt) {
3,542✔
750
            return assume_unsigned_32bit_lt(inv, strict, left_svalue, left_uvalue, right_svalue, right_uvalue);
340✔
751
        } else {
752
            return assume_unsigned_32bit_gt(inv, strict, left_svalue, left_uvalue, left_interval_low,
1,601✔
753
                                            left_interval_high, right_svalue, right_uvalue, right_interval);
3,202✔
754
        }
755
    }
756
}
51,838✔
757

758
/** Linear constraints for a comparison with a constant.
759
 */
760
static std::vector<linear_constraint_t> assume_cst_imm(const NumAbsDomain& inv, const Condition::Op op, const bool is64,
28,798✔
761
                                                       const variable_t dst_svalue, const variable_t dst_uvalue,
762
                                                       const int64_t imm) {
763
    using namespace crab::dsl_syntax;
14,399✔
764
    using Op = Condition::Op;
14,399✔
765
    switch (op) {
28,798✔
766
    case Op::EQ:
14,696✔
767
    case Op::SGE:
7,348✔
768
    case Op::SLE:
7,348✔
769
    case Op::SGT:
7,348✔
770
    case Op::SLT:
7,348✔
771
        return assume_signed_cst_interval(inv, op, is64, dst_svalue, dst_uvalue, imm, gsl::narrow_cast<uint64_t>(imm));
14,696✔
772
    case Op::SET:
28✔
773
    case Op::NSET: return assume_bit_cst_interval(inv, op, is64, dst_uvalue, interval_t{imm});
42✔
774
    case Op::NE:
14,074✔
775
    case Op::GE:
7,037✔
776
    case Op::LE:
7,037✔
777
    case Op::GT:
7,037✔
778
    case Op::LT:
7,037✔
779
        return assume_unsigned_cst_interval(inv, op, is64, dst_svalue, dst_uvalue, imm,
7,037✔
780
                                            gsl::narrow_cast<uint64_t>(imm));
14,074✔
781
    }
782
    return {};
×
783
}
784

785
/** Linear constraint for a numerical comparison between registers.
786
 */
787
static std::vector<linear_constraint_t> assume_cst_reg(const NumAbsDomain& inv, const Condition::Op op, const bool is64,
6,436✔
788
                                                       const variable_t dst_svalue, const variable_t dst_uvalue,
789
                                                       const variable_t src_svalue, const variable_t src_uvalue) {
790
    using namespace crab::dsl_syntax;
3,218✔
791
    using Op = Condition::Op;
3,218✔
792
    if (is64) {
6,436✔
793
        switch (op) {
5,804✔
794
        case Op::EQ: {
866✔
795
            const interval_t src_interval = inv.eval_interval(src_svalue);
866✔
796
            if (!src_interval.is_singleton() && src_interval <= interval_t::nonnegative(64)) {
1,316✔
797
                return {eq(dst_svalue, src_svalue), eq(dst_uvalue, src_uvalue), eq(dst_svalue, dst_uvalue)};
558✔
798
            } else {
799
                return {eq(dst_svalue, src_svalue), eq(dst_uvalue, src_uvalue)};
2,597✔
800
            }
801
        }
866✔
802
        case Op::NE: return {neq(dst_svalue, src_svalue)};
2,175✔
803
        case Op::SGE: return {dst_svalue >= src_svalue};
35✔
804
        case Op::SLE: return {dst_svalue <= src_svalue};
3,555✔
805
        case Op::SGT: return {dst_svalue > src_svalue};
3,555✔
806
        // Note: reverse the test as a workaround strange lookup:
807
        case Op::SLT: return {src_svalue > dst_svalue};
35✔
808
        case Op::SET:
12✔
809
        case Op::NSET: return assume_bit_cst_interval(inv, op, is64, dst_uvalue, inv.eval_interval(src_uvalue));
18✔
810
        case Op::GE:
1,184✔
811
        case Op::LE:
592✔
812
        case Op::GT:
592✔
813
        case Op::LT: return assume_unsigned_cst_interval(inv, op, is64, dst_svalue, dst_uvalue, src_svalue, src_uvalue);
1,184✔
814
        }
815
    } else {
816
        switch (op) {
632✔
817
        case Op::EQ:
244✔
818
        case Op::SGE:
122✔
819
        case Op::SLE:
122✔
820
        case Op::SGT:
122✔
821
        case Op::SLT: return assume_signed_cst_interval(inv, op, is64, dst_svalue, dst_uvalue, src_svalue, src_uvalue);
244✔
822
        case Op::SET:
12✔
823
        case Op::NSET: return assume_bit_cst_interval(inv, op, is64, dst_uvalue, inv.eval_interval(src_uvalue));
18✔
824
        case Op::NE:
376✔
825
        case Op::GE:
188✔
826
        case Op::LE:
188✔
827
        case Op::GT:
188✔
828
        case Op::LT: return assume_unsigned_cst_interval(inv, op, is64, dst_svalue, dst_uvalue, src_svalue, src_uvalue);
376✔
829
        }
830
    }
831
    assert(false);
×
832
    throw std::exception();
833
}
8,205✔
834

835
void ebpf_transformer::assign(const variable_t x, const linear_expression_t& e) { m_inv.assign(x, e); }
×
836
void ebpf_transformer::assign(const variable_t x, const int64_t e) { m_inv.set(x, interval_t(e)); }
116,145✔
837

838
void ebpf_transformer::apply(const arith_binop_t op, const variable_t x, const variable_t y, const number_t& z,
×
839
                             const int finite_width) {
840
    m_inv.apply(op, x, y, z, finite_width);
×
841
}
×
842

843
void ebpf_transformer::apply(const arith_binop_t op, const variable_t x, const variable_t y, const variable_t z,
×
844
                             const int finite_width) {
845
    m_inv.apply(op, x, y, z, finite_width);
×
846
}
×
847

848
void ebpf_transformer::apply(const bitwise_binop_t op, const variable_t x, const variable_t y, const variable_t z,
×
849
                             const int finite_width) {
850
    m_inv.apply(op, x, y, z, finite_width);
×
851
}
×
852

853
void ebpf_transformer::apply(const bitwise_binop_t op, const variable_t x, const variable_t y, const number_t& k,
×
854
                             const int finite_width) {
855
    m_inv.apply(op, x, y, k, finite_width);
×
856
}
×
857

858
void ebpf_transformer::apply(binop_t op, variable_t x, variable_t y, const number_t& z, int finite_width) {
×
859
    std::visit([&](auto top) { apply(top, x, y, z, finite_width); }, op);
×
860
}
×
861

862
void ebpf_transformer::apply(binop_t op, variable_t x, variable_t y, variable_t z, int finite_width) {
×
863
    std::visit([&](auto top) { apply(top, x, y, z, finite_width); }, op);
×
864
}
×
865

866
static void havoc_offsets(NumAbsDomain& inv, const Reg& reg) {
188,944✔
867
    const reg_pack_t r = reg_pack(reg);
188,944✔
868
    inv -= r.ctx_offset;
188,944✔
869
    inv -= r.map_fd;
188,944✔
870
    inv -= r.packet_offset;
188,944✔
871
    inv -= r.shared_offset;
188,944✔
872
    inv -= r.shared_region_size;
188,944✔
873
    inv -= r.stack_offset;
188,944✔
874
    inv -= r.stack_numeric_size;
188,944✔
875
}
188,944✔
876
static void havoc_register(NumAbsDomain& inv, const Reg& reg) {
91,090✔
877
    const reg_pack_t r = reg_pack(reg);
91,090✔
878
    havoc_offsets(inv, reg);
91,090✔
879
    inv -= r.svalue;
91,090✔
880
    inv -= r.uvalue;
91,090✔
881
}
91,090✔
882

883
void ebpf_transformer::scratch_caller_saved_registers() {
13,730✔
884
    for (int i = R1_ARG; i <= R5_ARG; i++) {
82,380✔
885
        Reg r{gsl::narrow<uint8_t>(i)};
68,650✔
886
        havoc_register(m_inv, r);
68,650✔
887
        type_inv.havoc_type(m_inv, r);
68,650✔
888
    }
889
}
13,730✔
890

891
void ebpf_transformer::save_callee_saved_registers(const std::string& prefix) {
58✔
892
    // Create variables specific to the new call stack frame that store
893
    // copies of the states of r6 through r9.
894
    for (int r = R6; r <= R9; r++) {
290✔
895
        for (const data_kind_t kind : iterate_kinds()) {
2,552✔
896
            const variable_t src_var = variable_t::reg(kind, r);
2,320✔
897
            if (!m_inv[src_var].is_top()) {
4,640✔
898
                assign(variable_t::stack_frame_var(kind, r, prefix), src_var);
100✔
899
            }
900
        }
232✔
901
    }
902
}
58✔
903

904
void ebpf_transformer::restore_callee_saved_registers(const std::string& prefix) {
58✔
905
    for (int r = R6; r <= R9; r++) {
290✔
906
        for (const data_kind_t kind : iterate_kinds()) {
2,552✔
907
            const variable_t src_var = variable_t::stack_frame_var(kind, r, prefix);
2,320✔
908
            if (!m_inv[src_var].is_top()) {
4,640✔
909
                assign(variable_t::reg(kind, r), src_var);
100✔
910
            } else {
911
                havoc(variable_t::reg(kind, r));
2,220✔
912
            }
913
            havoc(src_var);
2,320✔
914
        }
232✔
915
    }
916
}
58✔
917

918
void ebpf_transformer::havoc_subprogram_stack(const std::string& prefix) {
58✔
919
    const variable_t r10_stack_offset = reg_pack(R10_STACK_POINTER).stack_offset;
58✔
920
    const auto intv = m_inv.eval_interval(r10_stack_offset);
58✔
921
    if (!intv.is_singleton()) {
58✔
922
        return;
36✔
923
    }
924
    const int64_t stack_start = intv.singleton()->cast_to<int64_t>() - EBPF_SUBPROGRAM_STACK_SIZE;
22✔
925
    for (const data_kind_t kind : iterate_kinds()) {
242✔
926
        stack.havoc(m_inv, kind, stack_start, EBPF_SUBPROGRAM_STACK_SIZE);
220✔
927
    }
22✔
928
}
58✔
929

930
void ebpf_transformer::forget_packet_pointers() {
38✔
931
    using namespace crab::dsl_syntax;
19✔
932

933
    for (const variable_t type_variable : variable_t::get_type_variables()) {
1,258✔
934
        if (type_inv.has_type(m_inv, type_variable, T_PACKET)) {
1,220✔
935
            havoc(variable_t::kind_var(data_kind_t::types, type_variable));
720✔
936
            havoc(variable_t::kind_var(data_kind_t::packet_offsets, type_variable));
720✔
937
            havoc(variable_t::kind_var(data_kind_t::svalues, type_variable));
720✔
938
            havoc(variable_t::kind_var(data_kind_t::uvalues, type_variable));
1,330✔
939
        }
940
    }
38✔
941

942
    dom.initialize_packet();
38✔
943
}
38✔
944

945
static void overflow_bounds(NumAbsDomain& inv, variable_t lhs, number_t span, int finite_width, bool issigned) {
187,076✔
946
    using namespace crab::dsl_syntax;
93,538✔
947
    auto interval = inv[lhs];
187,076✔
948
    if (interval.ub() - interval.lb() >= span) {
467,690✔
949
        // Interval covers the full space.
950
        inv -= lhs;
19,376✔
951
        return;
9,688✔
952
    }
953
    if (interval.is_bottom()) {
167,700✔
954
        inv -= lhs;
88✔
955
        return;
44✔
956
    }
957
    number_t lb_value = interval.lb().number().value();
335,224✔
958
    number_t ub_value = interval.ub().number().value();
335,224✔
959

960
    // Compute the interval, taking overflow into account.
961
    // For a signed result, we need to ensure the signed and unsigned results match
962
    // so for a 32-bit operation, 0x80000000 should be a positive 64-bit number not
963
    // a sign extended negative one.
964
    number_t lb = lb_value.truncate_to_uint(finite_width);
167,612✔
965
    number_t ub = ub_value.truncate_to_uint(finite_width);
167,612✔
966
    if (issigned) {
167,612✔
967
        lb = lb.truncate_to<int64_t>();
67,824✔
968
        ub = ub.truncate_to<int64_t>();
67,824✔
969
    }
970
    if (lb > ub) {
167,612✔
971
        // Range wraps in the middle, so we cannot represent as an unsigned interval.
972
        inv -= lhs;
3,438✔
973
        return;
3,438✔
974
    }
975
    auto new_interval = interval_t{lb, ub};
410,435✔
976
    if (new_interval != interval) {
164,174✔
977
        // Update the variable, which will lose any relationships to other variables.
978
        inv.set(lhs, new_interval);
1,352✔
979
    }
980
}
277,758✔
981

982
static void overflow_signed(NumAbsDomain& inv, const variable_t lhs, const int finite_width) {
77,556✔
983
    const auto span{finite_width == 64   ? number_t{std::numeric_limits<uint64_t>::max()}
77,556✔
984
                    : finite_width == 32 ? number_t{std::numeric_limits<uint32_t>::max()}
5,454✔
985
                                         : throw std::exception()};
77,556✔
986
    overflow_bounds(inv, lhs, span, finite_width, true);
116,334✔
987
}
77,556✔
988

989
static void overflow_unsigned(NumAbsDomain& inv, const variable_t lhs, const int finite_width) {
109,520✔
990
    const auto span{finite_width == 64   ? number_t{std::numeric_limits<uint64_t>::max()}
109,520✔
991
                    : finite_width == 32 ? number_t{std::numeric_limits<uint32_t>::max()}
11,368✔
992
                                         : throw std::exception()};
109,520✔
993
    overflow_bounds(inv, lhs, span, finite_width, false);
164,280✔
994
}
109,520✔
995
static void apply_signed(NumAbsDomain& inv, const binop_t& op, const variable_t xs, const variable_t xu,
31,158✔
996
                         const variable_t y, const number_t& z, const int finite_width) {
997
    inv.apply(op, xs, y, z, finite_width);
31,158✔
998
    if (finite_width) {
24,076✔
999
        inv.assign(xu, xs);
16,994✔
1000
        overflow_signed(inv, xs, finite_width);
16,994✔
1001
        overflow_unsigned(inv, xu, finite_width);
16,994✔
1002
    }
1003
}
24,076✔
1004

1005
static void apply_unsigned(NumAbsDomain& inv, const binop_t& op, const variable_t xs, const variable_t xu,
42,674✔
1006
                           const variable_t y, const number_t& z, const int finite_width) {
1007
    inv.apply(op, xu, y, z, finite_width);
42,674✔
1008
    if (finite_width) {
42,674✔
1009
        inv.assign(xs, xu);
42,674✔
1010
        overflow_signed(inv, xs, finite_width);
42,674✔
1011
        overflow_unsigned(inv, xu, finite_width);
42,674✔
1012
    }
1013
}
42,674✔
1014

1015
static void apply_signed(NumAbsDomain& inv, const binop_t& op, const variable_t xs, const variable_t xu,
3,382✔
1016
                         const variable_t y, const variable_t z, const int finite_width) {
1017
    inv.apply(op, xs, y, z, finite_width);
3,382✔
1018
    if (finite_width) {
3,373✔
1019
        inv.assign(xu, xs);
3,364✔
1020
        overflow_signed(inv, xs, finite_width);
3,364✔
1021
        overflow_unsigned(inv, xu, finite_width);
3,364✔
1022
    }
1023
}
3,373✔
1024

1025
static void apply_unsigned(NumAbsDomain& inv, const binop_t& op, const variable_t xs, const variable_t xu,
5,846✔
1026
                           const variable_t y, const variable_t z, const int finite_width) {
1027
    inv.apply(op, xu, y, z, finite_width);
5,846✔
1028
    if (finite_width) {
5,846✔
1029
        inv.assign(xs, xu);
5,846✔
1030
        overflow_signed(inv, xs, finite_width);
5,846✔
1031
        overflow_unsigned(inv, xu, finite_width);
5,846✔
1032
    }
1033
}
5,846✔
1034

1035
void ebpf_transformer::add(const variable_t lhs, const variable_t op2) {
×
1036
    apply_signed(m_inv, arith_binop_t::ADD, lhs, lhs, lhs, op2, 0);
×
1037
}
×
1038
void ebpf_transformer::add(const variable_t lhs, const number_t& op2) {
13,292✔
1039
    apply_signed(m_inv, arith_binop_t::ADD, lhs, lhs, lhs, op2, 0);
13,292✔
1040
}
13,292✔
1041
void ebpf_transformer::sub(const variable_t lhs, const variable_t op2) {
4✔
1042
    apply_signed(m_inv, arith_binop_t::SUB, lhs, lhs, lhs, op2, 0);
4✔
1043
}
4✔
1044
void ebpf_transformer::sub(const variable_t lhs, const number_t& op2) {
872✔
1045
    apply_signed(m_inv, arith_binop_t::SUB, lhs, lhs, lhs, op2, 0);
872✔
1046
}
872✔
1047

1048
// Add/subtract with overflow are both signed and unsigned. We can use either one of the two to compute the
1049
// result before adjusting for overflow, though if one is top we want to use the other to retain precision.
1050
void ebpf_transformer::add_overflow(const variable_t lhss, const variable_t lhsu, const variable_t op2,
2,180✔
1051
                                    const int finite_width) {
1052
    apply_signed(m_inv, arith_binop_t::ADD, lhss, lhsu, !m_inv.eval_interval(lhss).is_top() ? lhss : lhsu, op2,
4,360✔
1053
                 finite_width);
1054
}
2,180✔
1055
void ebpf_transformer::add_overflow(const variable_t lhss, const variable_t lhsu, const number_t& op2,
16,732✔
1056
                                    const int finite_width) {
1057
    apply_signed(m_inv, arith_binop_t::ADD, lhss, lhsu, !m_inv.eval_interval(lhss).is_top() ? lhss : lhsu, op2,
33,464✔
1058
                 finite_width);
1059
}
16,732✔
1060
void ebpf_transformer::sub_overflow(const variable_t lhss, const variable_t lhsu, const variable_t op2,
4✔
1061
                                    const int finite_width) {
1062
    apply_signed(m_inv, arith_binop_t::SUB, lhss, lhsu, !m_inv.eval_interval(lhss).is_top() ? lhss : lhsu, op2,
8✔
1063
                 finite_width);
1064
}
4✔
1065
void ebpf_transformer::sub_overflow(const variable_t lhss, const variable_t lhsu, const number_t& op2,
×
1066
                                    const int finite_width) {
1067
    apply_signed(m_inv, arith_binop_t::SUB, lhss, lhsu, !m_inv.eval_interval(lhss).is_top() ? lhss : lhsu, op2,
×
1068
                 finite_width);
1069
}
×
1070

1071
void ebpf_transformer::neg(const variable_t lhss, const variable_t lhsu, const int finite_width) {
120✔
1072
    apply_signed(m_inv, arith_binop_t::MUL, lhss, lhsu, lhss, -1, finite_width);
120✔
1073
}
120✔
1074
void ebpf_transformer::mul(const variable_t lhss, const variable_t lhsu, const variable_t op2, const int finite_width) {
74✔
1075
    apply_signed(m_inv, arith_binop_t::MUL, lhss, lhsu, lhss, op2, finite_width);
74✔
1076
}
74✔
1077
void ebpf_transformer::mul(const variable_t lhss, const variable_t lhsu, const number_t& op2, const int finite_width) {
90✔
1078
    apply_signed(m_inv, arith_binop_t::MUL, lhss, lhsu, lhss, op2, finite_width);
90✔
1079
}
90✔
1080
void ebpf_transformer::sdiv(const variable_t lhss, const variable_t lhsu, const variable_t op2,
36✔
1081
                            const int finite_width) {
1082
    apply_signed(m_inv, arith_binop_t::SDIV, lhss, lhsu, lhss, op2, finite_width);
36✔
1083
}
36✔
1084
void ebpf_transformer::sdiv(const variable_t lhss, const variable_t lhsu, const number_t& op2, const int finite_width) {
18✔
1085
    apply_signed(m_inv, arith_binop_t::SDIV, lhss, lhsu, lhss, op2, finite_width);
18✔
1086
}
18✔
1087
void ebpf_transformer::udiv(const variable_t lhss, const variable_t lhsu, const variable_t op2,
108✔
1088
                            const int finite_width) {
1089
    apply_unsigned(m_inv, arith_binop_t::UDIV, lhss, lhsu, lhsu, op2, finite_width);
108✔
1090
}
108✔
1091
void ebpf_transformer::udiv(const variable_t lhss, const variable_t lhsu, const number_t& op2, const int finite_width) {
1,130✔
1092
    apply_unsigned(m_inv, arith_binop_t::UDIV, lhss, lhsu, lhsu, op2, finite_width);
1,130✔
1093
}
1,130✔
1094
void ebpf_transformer::srem(const variable_t lhss, const variable_t lhsu, const variable_t op2,
48✔
1095
                            const int finite_width) {
1096
    apply_signed(m_inv, arith_binop_t::SREM, lhss, lhsu, lhss, op2, finite_width);
48✔
1097
}
48✔
1098
void ebpf_transformer::srem(const variable_t lhss, const variable_t lhsu, const number_t& op2, const int finite_width) {
34✔
1099
    apply_signed(m_inv, arith_binop_t::SREM, lhss, lhsu, lhss, op2, finite_width);
34✔
1100
}
34✔
1101
void ebpf_transformer::urem(const variable_t lhss, const variable_t lhsu, const variable_t op2,
36✔
1102
                            const int finite_width) {
1103
    apply_unsigned(m_inv, arith_binop_t::UREM, lhss, lhsu, lhsu, op2, finite_width);
36✔
1104
}
36✔
1105
void ebpf_transformer::urem(const variable_t lhss, const variable_t lhsu, const number_t& op2, const int finite_width) {
24✔
1106
    apply_unsigned(m_inv, arith_binop_t::UREM, lhss, lhsu, lhsu, op2, finite_width);
24✔
1107
}
24✔
1108

1109
void ebpf_transformer::bitwise_and(const variable_t lhss, const variable_t lhsu, const variable_t op2,
468✔
1110
                                   const int finite_width) {
1111
    apply_unsigned(m_inv, bitwise_binop_t::AND, lhss, lhsu, lhsu, op2, finite_width);
468✔
1112
}
468✔
1113
void ebpf_transformer::bitwise_and(const variable_t lhss, const variable_t lhsu, const number_t& op2) {
38,362✔
1114
    // Use finite width 64 to make the svalue be set as well as the uvalue.
1115
    apply_unsigned(m_inv, bitwise_binop_t::AND, lhss, lhsu, lhsu, op2, 64);
38,362✔
1116
}
38,362✔
1117
void ebpf_transformer::bitwise_or(const variable_t lhss, const variable_t lhsu, const variable_t op2,
4,440✔
1118
                                  const int finite_width) {
1119
    apply_unsigned(m_inv, bitwise_binop_t::OR, lhss, lhsu, lhsu, op2, finite_width);
4,440✔
1120
}
4,440✔
1121
void ebpf_transformer::bitwise_or(const variable_t lhss, const variable_t lhsu, const number_t& op2) {
1,542✔
1122
    apply_unsigned(m_inv, bitwise_binop_t::OR, lhss, lhsu, lhsu, op2, 64);
1,542✔
1123
}
1,542✔
1124
void ebpf_transformer::bitwise_xor(const variable_t lhss, const variable_t lhsu, const variable_t op2,
248✔
1125
                                   const int finite_width) {
1126
    apply_unsigned(m_inv, bitwise_binop_t::XOR, lhss, lhsu, lhsu, op2, finite_width);
248✔
1127
}
248✔
1128
void ebpf_transformer::bitwise_xor(const variable_t lhss, const variable_t lhsu, const number_t& op2) {
224✔
1129
    apply_unsigned(m_inv, bitwise_binop_t::XOR, lhss, lhsu, lhsu, op2, 64);
224✔
1130
}
224✔
1131
void ebpf_transformer::shl_overflow(const variable_t lhss, const variable_t lhsu, const variable_t op2) {
546✔
1132
    apply_unsigned(m_inv, bitwise_binop_t::SHL, lhss, lhsu, lhsu, op2, 64);
546✔
1133
}
546✔
1134
void ebpf_transformer::shl_overflow(const variable_t lhss, const variable_t lhsu, const number_t& op2) {
1,392✔
1135
    apply_unsigned(m_inv, bitwise_binop_t::SHL, lhss, lhsu, lhsu, op2, 64);
1,392✔
1136
}
1,392✔
1137

1138
static void assume(NumAbsDomain& inv, const linear_constraint_t& cst) { inv += cst; }
79,342✔
1139
void ebpf_transformer::assume(const linear_constraint_t& cst) { crab::assume(m_inv, cst); }
79,342✔
1140

1141
/// Forget everything we know about the value of a variable.
1142
void ebpf_transformer::havoc(const variable_t v) { m_inv -= v; }
360,946✔
1143
void ebpf_transformer::havoc_offsets(const Reg& reg) { crab::havoc_offsets(m_inv, reg); }
48,690✔
1144

1145
void ebpf_transformer::assign(const variable_t lhs, const variable_t rhs) { m_inv.assign(lhs, rhs); }
80,696✔
1146

1147
static linear_constraint_t type_is_pointer(const reg_pack_t& r) {
350✔
1148
    using namespace crab::dsl_syntax;
175✔
1149
    return r.type >= T_CTX;
525✔
1150
}
1151

1152
static linear_constraint_t type_is_number(const reg_pack_t& r) {
14,516✔
1153
    using namespace crab::dsl_syntax;
7,258✔
1154
    return r.type == T_NUM;
14,516✔
1155
}
1156

1157
static linear_constraint_t type_is_number(const Reg& r) { return type_is_number(reg_pack(r)); }
6,170✔
1158

1159
static linear_constraint_t type_is_not_stack(const reg_pack_t& r) {
334✔
1160
    using namespace crab::dsl_syntax;
167✔
1161
    return r.type != T_STACK;
334✔
1162
}
1163

1164
void ebpf_transformer::operator()(const Assume& s) {
36,882✔
1165
    const Condition cond = s.cond;
36,882✔
1166
    const auto dst = reg_pack(cond.left);
36,882✔
1167
    if (const auto psrc_reg = std::get_if<Reg>(&cond.right)) {
36,882✔
1168
        const auto src_reg = *psrc_reg;
8,084✔
1169
        const auto src = reg_pack(src_reg);
8,084✔
1170
        if (type_inv.same_type(m_inv, cond.left, std::get<Reg>(cond.right))) {
8,084✔
1171
            m_inv = type_inv.join_over_types(m_inv, cond.left, [&](NumAbsDomain& inv, const type_encoding_t type) {
16,104✔
1172
                if (type == T_NUM) {
8,056✔
1173
                    for (const linear_constraint_t& cst :
11,121✔
1174
                         assume_cst_reg(m_inv, cond.op, cond.is64, dst.svalue, dst.uvalue, src.svalue, src.uvalue)) {
19,024✔
1175
                        inv += cst;
9,370✔
1176
                    }
6,436✔
1177
                } else {
1178
                    // Either pointers to a singleton region,
1179
                    // or an equality comparison on map descriptors/pointers to non-singleton locations
1180
                    if (const auto dst_offset = dom.get_type_offset_variable(cond.left, type)) {
1,620✔
1181
                        if (const auto src_offset = dom.get_type_offset_variable(src_reg, type)) {
1,620✔
1182
                            inv += assume_cst_offsets_reg(cond.op, dst_offset.value(), src_offset.value());
2,430✔
1183
                        }
1184
                    }
1185
                }
1186
            });
16,108✔
1187
        } else {
1188
            // We should only reach here if `--assume-assert` is off
1189
            assert(!thread_local_options.assume_assertions || dom.is_bottom());
32✔
1190
            // be sound in any case, it happens to flush out bugs:
1191
            m_inv.set_to_top();
32✔
1192
        }
1193
    } else {
1194
        const int64_t imm = gsl::narrow_cast<int64_t>(std::get<Imm>(cond.right).v);
28,798✔
1195
        for (const linear_constraint_t& cst : assume_cst_imm(m_inv, cond.op, cond.is64, dst.svalue, dst.uvalue, imm)) {
91,156✔
1196
            assume(cst);
93,537✔
1197
        }
28,798✔
1198
    }
1199
}
36,882✔
1200

1201
void ebpf_transformer::operator()(const Undefined& a) {}
2,368✔
1202

1203
// Simple truncation function usable with swap_endianness().
1204
template <class T>
1205
constexpr T truncate(T x) noexcept {
24✔
1206
    return x;
24✔
1207
}
1208

1209
void ebpf_transformer::operator()(const Un& stmt) {
1,812✔
1210
    const auto dst = reg_pack(stmt.dst);
1,812✔
1211
    auto swap_endianness = [&](const variable_t v, auto be_or_le) {
4,274✔
1212
        if (m_inv.entail(type_is_number(stmt.dst))) {
5,052✔
1213
            if (const auto n = m_inv.eval_interval(v).singleton()) {
6,786✔
1214
                if (n->fits_cast_to<int64_t>()) {
244✔
1215
                    m_inv.set(v, interval_t{be_or_le(n->cast_to<int64_t>())});
244✔
1216
                    return;
244✔
1217
                }
1218
            }
1219
        }
1220
        havoc(v);
3,124✔
1221
        havoc_offsets(stmt.dst);
3,124✔
1222
    };
1,812✔
1223
    // Swap bytes if needed.  For 64-bit types we need the weights to fit in a
1224
    // signed int64, but for smaller types we don't want sign extension,
1225
    // so we use unsigned which still fits in a signed int64.
1226
    switch (stmt.op) {
1,812✔
1227
    case Un::Op::BE16:
1,536✔
1228
        if (!thread_local_options.big_endian) {
1,536✔
1229
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
1,532✔
1230
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
1,532✔
1231
        } else {
1232
            swap_endianness(dst.svalue, truncate<uint16_t>);
4✔
1233
            swap_endianness(dst.uvalue, truncate<uint16_t>);
4✔
1234
        }
1235
        break;
768✔
1236
    case Un::Op::BE32:
96✔
1237
        if (!thread_local_options.big_endian) {
96✔
1238
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
94✔
1239
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
94✔
1240
        } else {
1241
            swap_endianness(dst.svalue, truncate<uint32_t>);
2✔
1242
            swap_endianness(dst.uvalue, truncate<uint32_t>);
2✔
1243
        }
1244
        break;
48✔
1245
    case Un::Op::BE64:
10✔
1246
        if (!thread_local_options.big_endian) {
10✔
1247
            swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
8✔
1248
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
8✔
1249
        }
1250
        break;
5✔
1251
    case Un::Op::LE16:
12✔
1252
        if (thread_local_options.big_endian) {
12✔
1253
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
2✔
1254
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
2✔
1255
        } else {
1256
            swap_endianness(dst.svalue, truncate<uint16_t>);
10✔
1257
            swap_endianness(dst.uvalue, truncate<uint16_t>);
10✔
1258
        }
1259
        break;
6✔
1260
    case Un::Op::LE32:
8✔
1261
        if (thread_local_options.big_endian) {
8✔
1262
            swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
2✔
1263
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
2✔
1264
        } else {
1265
            swap_endianness(dst.svalue, truncate<uint32_t>);
6✔
1266
            swap_endianness(dst.uvalue, truncate<uint32_t>);
6✔
1267
        }
1268
        break;
4✔
1269
    case Un::Op::LE64:
8✔
1270
        if (thread_local_options.big_endian) {
8✔
1271
            swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
2✔
1272
            swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
2✔
1273
        }
1274
        break;
4✔
1275
    case Un::Op::SWAP16:
10✔
1276
        swap_endianness(dst.svalue, boost::endian::endian_reverse<uint16_t>);
10✔
1277
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint16_t>);
10✔
1278
        break;
5✔
1279
    case Un::Op::SWAP32:
6✔
1280
        swap_endianness(dst.svalue, boost::endian::endian_reverse<uint32_t>);
6✔
1281
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint32_t>);
6✔
1282
        break;
3✔
1283
    case Un::Op::SWAP64:
6✔
1284
        swap_endianness(dst.svalue, boost::endian::endian_reverse<int64_t>);
6✔
1285
        swap_endianness(dst.uvalue, boost::endian::endian_reverse<uint64_t>);
6✔
1286
        break;
3✔
1287
    case Un::Op::NEG:
120✔
1288
        neg(dst.svalue, dst.uvalue, stmt.is64 ? 64 : 32);
132✔
1289
        havoc_offsets(stmt.dst);
120✔
1290
        break;
60✔
1291
    }
1292
}
1,812✔
1293

1294
void ebpf_transformer::operator()(const Exit& a) {
1,220✔
1295
    // Clean up any state for the current stack frame.
1296
    const std::string prefix = a.stack_frame_prefix;
1,220✔
1297
    if (prefix.empty()) {
1,220✔
1298
        return;
1,162✔
1299
    }
1300
    havoc_subprogram_stack(prefix);
58✔
1301
    restore_callee_saved_registers(prefix);
58✔
1302

1303
    // Restore r10.
1304
    constexpr Reg r10_reg{R10_STACK_POINTER};
58✔
1305
    add(r10_reg, EBPF_SUBPROGRAM_STACK_SIZE, 64);
58✔
1306
}
1,220✔
1307

1308
void ebpf_transformer::operator()(const Jmp&) const {
10,844✔
1309
    // This is a NOP. It only exists to hold the jump preconditions.
1310
}
10,844✔
1311

1312
void ebpf_transformer::operator()(const Packet& a) {
224✔
1313
    const auto reg = reg_pack(R0_RETURN_VALUE);
224✔
1314
    constexpr Reg r0_reg{R0_RETURN_VALUE};
224✔
1315
    type_inv.assign_type(m_inv, r0_reg, T_NUM);
224✔
1316
    havoc_offsets(r0_reg);
224✔
1317
    havoc(reg.svalue);
224✔
1318
    havoc(reg.uvalue);
224✔
1319
    scratch_caller_saved_registers();
224✔
1320
}
224✔
1321

1322
void ebpf_transformer::do_load_stack(NumAbsDomain& inv, const Reg& target_reg, const linear_expression_t& addr,
9,846✔
1323
                                     const int width, const Reg& src_reg) {
1324
    type_inv.assign_type(inv, target_reg, stack.load(inv, data_kind_t::types, addr, width));
9,846✔
1325
    using namespace crab::dsl_syntax;
4,923✔
1326
    if (inv.entail(width <= reg_pack(src_reg).stack_numeric_size)) {
19,692✔
1327
        type_inv.assign_type(inv, target_reg, T_NUM);
357✔
1328
    }
1329

1330
    const reg_pack_t& target = reg_pack(target_reg);
9,846✔
1331
    if (width == 1 || width == 2 || width == 4 || width == 8) {
9,846✔
1332
        // Use the addr before we havoc the destination register since we might be getting the
1333
        // addr from that same register.
1334
        const std::optional<linear_expression_t> sresult = stack.load(inv, data_kind_t::svalues, addr, width);
9,846✔
1335
        const std::optional<linear_expression_t> uresult = stack.load(inv, data_kind_t::uvalues, addr, width);
9,846✔
1336
        havoc_register(inv, target_reg);
9,846✔
1337
        inv.assign(target.svalue, sresult);
9,846✔
1338
        inv.assign(target.uvalue, uresult);
9,846✔
1339

1340
        if (type_inv.has_type(inv, target.type, T_CTX)) {
9,846✔
1341
            inv.assign(target.ctx_offset, stack.load(inv, data_kind_t::ctx_offsets, addr, width));
2,268✔
1342
        }
1343
        if (type_inv.has_type(inv, target.type, T_MAP) || type_inv.has_type(inv, target.type, T_MAP_PROGRAMS)) {
9,846✔
1344
            inv.assign(target.map_fd, stack.load(inv, data_kind_t::map_fds, addr, width));
267✔
1345
        }
1346
        if (type_inv.has_type(inv, target.type, T_PACKET)) {
9,846✔
1347
            inv.assign(target.packet_offset, stack.load(inv, data_kind_t::packet_offsets, addr, width));
156✔
1348
        }
1349
        if (type_inv.has_type(inv, target.type, T_SHARED)) {
9,846✔
1350
            inv.assign(target.shared_offset, stack.load(inv, data_kind_t::shared_offsets, addr, width));
1,119✔
1351
            inv.assign(target.shared_region_size, stack.load(inv, data_kind_t::shared_region_sizes, addr, width));
1,119✔
1352
        }
1353
        if (type_inv.has_type(inv, target.type, T_STACK)) {
9,846✔
1354
            inv.assign(target.stack_offset, stack.load(inv, data_kind_t::stack_offsets, addr, width));
165✔
1355
            inv.assign(target.stack_numeric_size, stack.load(inv, data_kind_t::stack_numeric_sizes, addr, width));
165✔
1356
        }
1357
    } else {
9,846✔
1358
        havoc_register(inv, target_reg);
×
1359
    }
1360
}
9,846✔
1361

1362
void ebpf_transformer::do_load_ctx(NumAbsDomain& inv, const Reg& target_reg, const linear_expression_t& addr_vague,
3,016✔
1363
                                   const int width) {
1364
    using namespace crab::dsl_syntax;
1,508✔
1365
    if (inv.is_bottom()) {
3,016✔
1366
        return;
1,420✔
1367
    }
1368

1369
    const ebpf_context_descriptor_t* desc = thread_local_program_info->type.context_descriptor;
3,016✔
1370

1371
    const reg_pack_t& target = reg_pack(target_reg);
3,016✔
1372

1373
    if (desc->end < 0) {
3,016✔
1374
        havoc_register(inv, target_reg);
290✔
1375
        type_inv.assign_type(inv, target_reg, T_NUM);
290✔
1376
        return;
290✔
1377
    }
1378

1379
    const interval_t interval = inv.eval_interval(addr_vague);
2,726✔
1380
    const std::optional<number_t> maybe_addr = interval.singleton();
2,726✔
1381
    havoc_register(inv, target_reg);
2,726✔
1382

1383
    const bool may_touch_ptr =
1,363✔
1384
        interval.contains(desc->data) || interval.contains(desc->meta) || interval.contains(desc->end);
4,649✔
1385

1386
    if (!maybe_addr) {
2,726✔
1387
        if (may_touch_ptr) {
2✔
1388
            type_inv.havoc_type(inv, target_reg);
2✔
1389
        } else {
1390
            type_inv.assign_type(inv, target_reg, T_NUM);
×
1391
        }
1392
        return;
2✔
1393
    }
1394

1395
    const number_t addr = *maybe_addr;
2,724✔
1396

1397
    // We use offsets for packet data, data_end, and meta during verification,
1398
    // but at runtime they will be 64-bit pointers.  We can use the offset values
1399
    // for verification like we use map_fd's as a proxy for maps which
1400
    // at runtime are actually 64-bit memory pointers.
1401
    const int offset_width = desc->end - desc->data;
2,724✔
1402
    if (addr == desc->data) {
2,724✔
1403
        if (width == offset_width) {
794✔
1404
            inv.assign(target.packet_offset, 0);
1,191✔
1405
        }
1406
    } else if (addr == desc->end) {
1,930✔
1407
        if (width == offset_width) {
788✔
1408
            inv.assign(target.packet_offset, variable_t::packet_size());
1,176✔
1409
        }
1410
    } else if (addr == desc->meta) {
1,142✔
1411
        if (width == offset_width) {
14✔
1412
            inv.assign(target.packet_offset, variable_t::meta_offset());
21✔
1413
        }
1414
    } else {
1415
        if (may_touch_ptr) {
1,128✔
1416
            type_inv.havoc_type(inv, target_reg);
×
1417
        } else {
1418
            type_inv.assign_type(inv, target_reg, T_NUM);
1,692✔
1419
        }
1420
        return;
1,128✔
1421
    }
1422
    if (width == offset_width) {
1,596✔
1423
        type_inv.assign_type(inv, target_reg, T_PACKET);
1,592✔
1424
        inv += 4098 <= target.svalue;
3,184✔
1425
        inv += target.svalue <= PTR_MAX;
2,388✔
1426
    }
1427
}
5,782✔
1428

1429
void ebpf_transformer::do_load_packet_or_shared(NumAbsDomain& inv, const Reg& target_reg,
8,958✔
1430
                                                const linear_expression_t& addr, const int width) {
1431
    if (inv.is_bottom()) {
8,958✔
1432
        return;
×
1433
    }
1434
    const reg_pack_t& target = reg_pack(target_reg);
8,958✔
1435

1436
    type_inv.assign_type(inv, target_reg, T_NUM);
8,958✔
1437
    havoc_register(inv, target_reg);
8,958✔
1438

1439
    // A 1 or 2 byte copy results in a limited range of values that may be used as array indices.
1440
    if (width == 1) {
8,958✔
1441
        const interval_t full = interval_t::full<uint8_t>();
3,326✔
1442
        inv.set(target.svalue, full);
3,326✔
1443
        inv.set(target.uvalue, full);
3,326✔
1444
    } else if (width == 2) {
8,958✔
1445
        const interval_t full = interval_t::full<uint16_t>();
1,978✔
1446
        inv.set(target.svalue, full);
1,978✔
1447
        inv.set(target.uvalue, full);
1,978✔
1448
    }
1,978✔
1449
}
1450

1451
void ebpf_transformer::do_load(const Mem& b, const Reg& target_reg) {
21,832✔
1452
    using namespace crab::dsl_syntax;
10,916✔
1453

1454
    const auto mem_reg = reg_pack(b.access.basereg);
21,832✔
1455
    const int width = b.access.width;
21,832✔
1456
    const int offset = b.access.offset;
21,832✔
1457

1458
    if (b.access.basereg.v == R10_STACK_POINTER) {
21,832✔
1459
        const linear_expression_t addr = mem_reg.stack_offset + offset;
14,313✔
1460
        do_load_stack(m_inv, target_reg, addr, width, b.access.basereg);
9,542✔
1461
        return;
9,542✔
1462
    }
9,542✔
1463

1464
    m_inv = type_inv.join_over_types(m_inv, b.access.basereg, [&](NumAbsDomain& inv, type_encoding_t type) {
24,580✔
1465
        switch (type) {
12,308✔
1466
        case T_UNINIT: return;
15✔
1467
        case T_MAP: return;
1468
        case T_MAP_PROGRAMS: return;
1469
        case T_NUM: return;
1470
        case T_CTX: {
3,016✔
1471
            linear_expression_t addr = mem_reg.ctx_offset + offset;
4,524✔
1472
            do_load_ctx(inv, target_reg, addr, width);
3,016✔
1473
            break;
3,016✔
1474
        }
3,016✔
1475
        case T_STACK: {
304✔
1476
            linear_expression_t addr = mem_reg.stack_offset + offset;
456✔
1477
            do_load_stack(inv, target_reg, addr, width, b.access.basereg);
304✔
1478
            break;
304✔
1479
        }
304✔
1480
        case T_PACKET: {
2,918✔
1481
            linear_expression_t addr = mem_reg.packet_offset + offset;
4,377✔
1482
            do_load_packet_or_shared(inv, target_reg, addr, width);
2,918✔
1483
            break;
2,918✔
1484
        }
2,918✔
1485
        default: {
6,040✔
1486
            linear_expression_t addr = mem_reg.shared_offset + offset;
9,060✔
1487
            do_load_packet_or_shared(inv, target_reg, addr, width);
6,040✔
1488
            break;
6,040✔
1489
        }
6,040✔
1490
        }
1491
    });
12,290✔
1492
}
1493

1494
void ebpf_transformer::do_store_stack(NumAbsDomain& inv, const linear_expression_t& addr, const int width,
24,206✔
1495
                                      const linear_expression_t& val_type, const linear_expression_t& val_svalue,
1496
                                      const linear_expression_t& val_uvalue,
1497
                                      const std::optional<reg_pack_t>& opt_val_reg) {
1498
    {
12,103✔
1499
        const std::optional<variable_t> var = stack.store_type(inv, addr, width, val_type);
24,206✔
1500
        type_inv.assign_type(inv, var, val_type);
24,206✔
1501
    }
1502
    if (width == 8) {
24,206✔
1503
        inv.assign(stack.store(inv, data_kind_t::svalues, addr, width, val_svalue), val_svalue);
9,714✔
1504
        inv.assign(stack.store(inv, data_kind_t::uvalues, addr, width, val_uvalue), val_uvalue);
9,714✔
1505

1506
        if (opt_val_reg && type_inv.has_type(m_inv, val_type, T_CTX)) {
9,714✔
1507
            inv.assign(stack.store(inv, data_kind_t::ctx_offsets, addr, width, opt_val_reg->ctx_offset),
399✔
1508
                       opt_val_reg->ctx_offset);
266✔
1509
        } else {
1510
            stack.havoc(inv, data_kind_t::ctx_offsets, addr, width);
9,448✔
1511
        }
1512

1513
        if (opt_val_reg &&
19,418✔
1514
            (type_inv.has_type(m_inv, val_type, T_MAP) || type_inv.has_type(m_inv, val_type, T_MAP_PROGRAMS))) {
19,379✔
1515
            inv.assign(stack.store(inv, data_kind_t::map_fds, addr, width, opt_val_reg->map_fd), opt_val_reg->map_fd);
117✔
1516
        } else {
1517
            stack.havoc(inv, data_kind_t::map_fds, addr, width);
9,636✔
1518
        }
1519

1520
        if (opt_val_reg && type_inv.has_type(m_inv, val_type, T_PACKET)) {
9,714✔
1521
            inv.assign(stack.store(inv, data_kind_t::packet_offsets, addr, width, opt_val_reg->packet_offset),
30✔
1522
                       opt_val_reg->packet_offset);
20✔
1523
        } else {
1524
            stack.havoc(inv, data_kind_t::packet_offsets, addr, width);
9,694✔
1525
        }
1526

1527
        if (opt_val_reg && type_inv.has_type(m_inv, val_type, T_SHARED)) {
9,714✔
1528
            inv.assign(stack.store(inv, data_kind_t::shared_offsets, addr, width, opt_val_reg->shared_offset),
171✔
1529
                       opt_val_reg->shared_offset);
114✔
1530
            inv.assign(stack.store(inv, data_kind_t::shared_region_sizes, addr, width, opt_val_reg->shared_region_size),
171✔
1531
                       opt_val_reg->shared_region_size);
114✔
1532
        } else {
1533
            stack.havoc(inv, data_kind_t::shared_region_sizes, addr, width);
9,600✔
1534
            stack.havoc(inv, data_kind_t::shared_offsets, addr, width);
9,600✔
1535
        }
1536

1537
        if (opt_val_reg && type_inv.has_type(m_inv, val_type, T_STACK)) {
9,714✔
1538
            inv.assign(stack.store(inv, data_kind_t::stack_offsets, addr, width, opt_val_reg->stack_offset),
39✔
1539
                       opt_val_reg->stack_offset);
26✔
1540
            inv.assign(stack.store(inv, data_kind_t::stack_numeric_sizes, addr, width, opt_val_reg->stack_numeric_size),
39✔
1541
                       opt_val_reg->stack_numeric_size);
26✔
1542
        } else {
1543
            stack.havoc(inv, data_kind_t::stack_offsets, addr, width);
9,688✔
1544
            stack.havoc(inv, data_kind_t::stack_numeric_sizes, addr, width);
9,688✔
1545
        }
1546
    } else {
1547
        if ((width == 1 || width == 2 || width == 4) && type_inv.get_type(m_inv, val_type) == T_NUM) {
14,492✔
1548
            // Keep track of numbers on the stack that might be used as array indices.
1549
            inv.assign(stack.store(inv, data_kind_t::svalues, addr, width, val_svalue), val_svalue);
14,362✔
1550
            inv.assign(stack.store(inv, data_kind_t::uvalues, addr, width, val_uvalue), val_uvalue);
14,362✔
1551
        } else {
1552
            stack.havoc(inv, data_kind_t::svalues, addr, width);
130✔
1553
            stack.havoc(inv, data_kind_t::uvalues, addr, width);
130✔
1554
        }
1555
        stack.havoc(inv, data_kind_t::ctx_offsets, addr, width);
14,492✔
1556
        stack.havoc(inv, data_kind_t::map_fds, addr, width);
14,492✔
1557
        stack.havoc(inv, data_kind_t::packet_offsets, addr, width);
14,492✔
1558
        stack.havoc(inv, data_kind_t::shared_offsets, addr, width);
14,492✔
1559
        stack.havoc(inv, data_kind_t::stack_offsets, addr, width);
14,492✔
1560
        stack.havoc(inv, data_kind_t::shared_region_sizes, addr, width);
14,492✔
1561
        stack.havoc(inv, data_kind_t::stack_numeric_sizes, addr, width);
14,492✔
1562
    }
1563

1564
    // Update stack_numeric_size for any stack type variables.
1565
    // stack_numeric_size holds the number of continuous bytes starting from stack_offset that are known to be numeric.
1566
    auto updated_lb = m_inv.eval_interval(addr).lb();
36,309✔
1567
    auto updated_ub = m_inv.eval_interval(addr).ub() + width;
60,515✔
1568
    for (const variable_t type_variable : variable_t::get_type_variables()) {
1,320,794✔
1569
        if (!type_inv.has_type(inv, type_variable, T_STACK)) {
1,296,588✔
1570
            continue;
667,923✔
1571
        }
1572
        const variable_t stack_offset_variable = variable_t::kind_var(data_kind_t::stack_offsets, type_variable);
628,665✔
1573
        const variable_t stack_numeric_size_variable =
314,333✔
1574
            variable_t::kind_var(data_kind_t::stack_numeric_sizes, type_variable);
628,665✔
1575

1576
        using namespace crab::dsl_syntax;
314,333✔
1577
        // See if the variable's numeric interval overlaps with changed bytes.
1578
        if (m_inv.intersect(dsl_syntax::operator<=(addr, stack_offset_variable + stack_numeric_size_variable)) &&
1,885,510✔
1579
            m_inv.intersect(operator>=(addr + width, stack_offset_variable))) {
1,256,360✔
1580
            havoc(stack_numeric_size_variable);
603,751✔
1581
            recompute_stack_numeric_size(m_inv, type_variable);
603,751✔
1582
        }
1583
    }
24,206✔
1584
}
24,206✔
1585

1586
void ebpf_transformer::operator()(const Mem& b) {
50,338✔
1587
    if (m_inv.is_bottom()) {
50,338✔
1588
        return;
1589
    }
1590
    if (const auto preg = std::get_if<Reg>(&b.value)) {
50,338✔
1591
        if (b.is_load) {
50,302✔
1592
            do_load(b, *preg);
21,832✔
1593
        } else {
1594
            const auto data_reg = reg_pack(*preg);
28,470✔
1595
            do_mem_store(b, data_reg.type, data_reg.svalue, data_reg.uvalue, data_reg);
28,470✔
1596
        }
1597
    } else {
1598
        const uint64_t imm = std::get<Imm>(b.value).v;
36✔
1599
        do_mem_store(b, T_NUM, to_signed(imm), imm, {});
36✔
1600
    }
1601
}
1602

1603
void ebpf_transformer::do_mem_store(const Mem& b, const linear_expression_t& val_type,
28,506✔
1604
                                    const linear_expression_t& val_svalue, const linear_expression_t& val_uvalue,
1605
                                    const std::optional<reg_pack_t>& opt_val_reg) {
1606
    if (m_inv.is_bottom()) {
28,506✔
1607
        return;
24,064✔
1608
    }
1609
    const int width = b.access.width;
28,506✔
1610
    const number_t offset{b.access.offset};
28,506✔
1611
    if (b.access.basereg.v == R10_STACK_POINTER) {
28,506✔
1612
        const auto r10_stack_offset = reg_pack(b.access.basereg).stack_offset;
24,064✔
1613
        const auto r10_interval = m_inv.eval_interval(r10_stack_offset);
24,064✔
1614
        if (r10_interval.is_singleton()) {
24,064✔
1615
            const int32_t stack_offset = r10_interval.singleton()->cast_to<int32_t>();
24,060✔
1616
            const number_t base_addr{stack_offset};
24,060✔
1617
            do_store_stack(m_inv, base_addr + offset, width, val_type, val_svalue, val_uvalue, opt_val_reg);
36,090✔
1618
        }
24,060✔
1619
        return;
24,064✔
1620
    }
24,064✔
1621
    m_inv = type_inv.join_over_types(m_inv, b.access.basereg, [&](NumAbsDomain& inv, const type_encoding_t type) {
8,884✔
1622
        if (type == T_STACK) {
4,442✔
1623
            const auto base_addr = linear_expression_t(dom.get_type_offset_variable(b.access.basereg, type).value());
219✔
1624
            do_store_stack(inv, dsl_syntax::operator+(base_addr, offset), width, val_type, val_svalue, val_uvalue,
219✔
1625
                           opt_val_reg);
1626
        }
146✔
1627
        // do nothing for any other type
1628
    });
8,884✔
1629
}
28,506✔
1630

1631
// Construct a Bin operation that does the main operation that a given Atomic operation does atomically.
1632
static Bin atomic_to_bin(const Atomic& a) {
84✔
1633
    Bin bin{.dst = Reg{R11_ATOMIC_SCRATCH}, .v = a.valreg, .is64 = a.access.width == sizeof(uint64_t), .lddw = false};
84✔
1634
    switch (a.op) {
84✔
1635
    case Atomic::Op::ADD: bin.op = Bin::Op::ADD; break;
16✔
1636
    case Atomic::Op::OR: bin.op = Bin::Op::OR; break;
16✔
1637
    case Atomic::Op::AND: bin.op = Bin::Op::AND; break;
16✔
1638
    case Atomic::Op::XOR: bin.op = Bin::Op::XOR; break;
16✔
1639
    case Atomic::Op::XCHG:
10✔
1640
    case Atomic::Op::CMPXCHG: bin.op = Bin::Op::MOV; break;
10✔
1641
    default: throw std::exception();
×
1642
    }
1643
    return bin;
84✔
1644
}
1645

1646
void ebpf_transformer::operator()(const Atomic& a) {
350✔
1647
    if (m_inv.is_bottom()) {
350✔
1648
        return;
266✔
1649
    }
1650
    if (!m_inv.entail(type_is_pointer(reg_pack(a.access.basereg))) ||
1,223✔
1651
        !m_inv.entail(type_is_number(reg_pack(a.valreg)))) {
865✔
1652
        return;
8✔
1653
    }
1654
    if (m_inv.entail(type_is_not_stack(reg_pack(a.access.basereg)))) {
501✔
1655
        // Shared memory regions are volatile so we can just havoc
1656
        // any register that will be updated.
1657
        if (a.op == Atomic::Op::CMPXCHG) {
250✔
1658
            havoc_register(m_inv, Reg{R0_RETURN_VALUE});
4✔
1659
        } else if (a.fetch) {
246✔
1660
            havoc_register(m_inv, a.valreg);
20✔
1661
        }
1662
        return;
250✔
1663
    }
1664

1665
    // Fetch the current value into the R11 pseudo-register.
1666
    constexpr Reg r11{R11_ATOMIC_SCRATCH};
84✔
1667
    (*this)(Mem{.access = a.access, .value = r11, .is_load = true});
84✔
1668

1669
    // Compute the new value in R11.
1670
    (*this)(atomic_to_bin(a));
84✔
1671

1672
    if (a.op == Atomic::Op::CMPXCHG) {
84✔
1673
        // For CMPXCHG, store the original value in r0.
1674
        (*this)(Mem{.access = a.access, .value = Reg{R0_RETURN_VALUE}, .is_load = true});
12✔
1675

1676
        // For the destination, there are 3 possibilities:
1677
        // 1) dst.value == r0.value : set R11 to valreg
1678
        // 2) dst.value != r0.value : don't modify R11
1679
        // 3) dst.value may or may not == r0.value : set R11 to the union of R11 and valreg
1680
        // For now we just havoc the value of R11.
1681
        havoc_register(m_inv, r11);
12✔
1682
    } else if (a.fetch) {
72✔
1683
        // For other FETCH operations, store the original value in the src register.
1684
        (*this)(Mem{.access = a.access, .value = a.valreg, .is_load = true});
40✔
1685
    }
1686

1687
    // Store the new value back in the original shared memory location.
1688
    // Note that do_mem_store() currently doesn't track shared memory values,
1689
    // but stack memory values are tracked and are legal here.
1690
    (*this)(Mem{.access = a.access, .value = r11, .is_load = false});
84✔
1691

1692
    // Clear the R11 pseudo-register.
1693
    havoc_register(m_inv, r11);
84✔
1694
    type_inv.havoc_type(m_inv, r11);
84✔
1695
}
1696

1697
void ebpf_transformer::operator()(const Call& call) {
13,508✔
1698
    using namespace crab::dsl_syntax;
6,754✔
1699
    if (m_inv.is_bottom()) {
13,508✔
1700
        return;
×
1701
    }
1702
    std::optional<Reg> maybe_fd_reg{};
13,508✔
1703
    for (ArgSingle param : call.singles) {
45,350✔
1704
        switch (param.kind) {
31,842✔
1705
        case ArgSingle::Kind::MAP_FD: maybe_fd_reg = param.reg; break;
23,377✔
1706
        case ArgSingle::Kind::ANYTHING:
12,193✔
1707
        case ArgSingle::Kind::MAP_FD_PROGRAMS:
1708
        case ArgSingle::Kind::PTR_TO_MAP_KEY:
1709
        case ArgSingle::Kind::PTR_TO_MAP_VALUE:
1710
        case ArgSingle::Kind::PTR_TO_CTX:
1711
            // Do nothing. We don't track the content of relevant memory regions
1712
            break;
12,193✔
1713
        }
1714
    }
1715
    for (ArgPair param : call.pairs) {
17,494✔
1716
        switch (param.kind) {
3,986✔
1717
        case ArgPair::Kind::PTR_TO_READABLE_MEM_OR_NULL:
1,564✔
1718
        case ArgPair::Kind::PTR_TO_READABLE_MEM:
1719
            // Do nothing. No side effect allowed.
1720
            break;
1,564✔
1721

1722
        case ArgPair::Kind::PTR_TO_WRITABLE_MEM: {
858✔
1723
            bool store_numbers = true;
858✔
1724
            auto variable = dom.get_type_offset_variable(param.mem);
858✔
1725
            if (!variable.has_value()) {
858✔
1726
                // checked by the checker
1727
                break;
1728
            }
1729
            variable_t addr = variable.value();
858✔
1730
            variable_t width = reg_pack(param.size).svalue;
858✔
1731

1732
            m_inv = type_inv.join_over_types(m_inv, param.mem, [&](NumAbsDomain& inv, const type_encoding_t type) {
1,716✔
1733
                if (type == T_STACK) {
858✔
1734
                    // Pointer to a memory region that the called function may change,
1735
                    // so we must havoc.
1736
                    stack.havoc(inv, data_kind_t::types, addr, width);
846✔
1737
                    stack.havoc(inv, data_kind_t::svalues, addr, width);
846✔
1738
                    stack.havoc(inv, data_kind_t::uvalues, addr, width);
846✔
1739
                    stack.havoc(inv, data_kind_t::ctx_offsets, addr, width);
846✔
1740
                    stack.havoc(inv, data_kind_t::map_fds, addr, width);
846✔
1741
                    stack.havoc(inv, data_kind_t::packet_offsets, addr, width);
846✔
1742
                    stack.havoc(inv, data_kind_t::shared_offsets, addr, width);
846✔
1743
                    stack.havoc(inv, data_kind_t::stack_offsets, addr, width);
846✔
1744
                    stack.havoc(inv, data_kind_t::shared_region_sizes, addr, width);
846✔
1745
                } else {
1746
                    store_numbers = false;
12✔
1747
                }
1748
            });
1,716✔
1749
            if (store_numbers) {
858✔
1750
                // Functions are not allowed to write sensitive data,
1751
                // and initialization is guaranteed
1752
                stack.store_numbers(m_inv, addr, width);
846✔
1753
            }
1754
        }
1755
        }
1756
    }
1757

1758
    constexpr Reg r0_reg{R0_RETURN_VALUE};
13,508✔
1759
    const auto r0_pack = reg_pack(r0_reg);
13,508✔
1760
    havoc(r0_pack.stack_numeric_size);
13,508✔
1761
    if (call.is_map_lookup) {
13,508✔
1762
        // This is the only way to get a null pointer
1763
        if (maybe_fd_reg) {
2,882✔
1764
            if (const auto map_type = dom.get_map_type(*maybe_fd_reg)) {
2,882✔
1765
                if (thread_local_program_info->platform->get_map_type(*map_type).value_type == EbpfMapValueType::MAP) {
2,876✔
1766
                    if (const auto inner_map_fd = dom.get_map_inner_map_fd(*maybe_fd_reg)) {
22✔
1767
                        do_load_mapfd(r0_reg, to_signed(*inner_map_fd), true);
22✔
1768
                        goto out;
22✔
1769
                    }
1770
                } else {
1771
                    assign_valid_ptr(r0_reg, true);
2,854✔
1772
                    assign(r0_pack.shared_offset, 0);
2,854✔
1773
                    m_inv.set(r0_pack.shared_region_size, dom.get_map_value_size(*maybe_fd_reg));
2,854✔
1774
                    type_inv.assign_type(m_inv, r0_reg, T_SHARED);
4,281✔
1775
                }
1776
            }
1777
        }
1778
        assign_valid_ptr(r0_reg, true);
2,858✔
1779
        assign(r0_pack.shared_offset, 0);
2,858✔
1780
        type_inv.assign_type(m_inv, r0_reg, T_SHARED);
4,287✔
1781
    } else {
1782
        havoc(r0_pack.svalue);
10,626✔
1783
        havoc(r0_pack.uvalue);
10,626✔
1784
        havoc_offsets(r0_reg);
10,626✔
1785
        type_inv.assign_type(m_inv, r0_reg, T_NUM);
15,939✔
1786
        // assume(r0_pack.value < 0); for INTEGER_OR_NO_RETURN_IF_SUCCEED.
1787
    }
1788
out:
13,506✔
1789
    scratch_caller_saved_registers();
13,506✔
1790
    if (call.reallocate_packet) {
13,506✔
1791
        forget_packet_pointers();
38✔
1792
    }
1793
}
1794

1795
void ebpf_transformer::operator()(const CallLocal& call) {
58✔
1796
    using namespace crab::dsl_syntax;
29✔
1797
    if (m_inv.is_bottom()) {
58✔
1798
        return;
×
1799
    }
1800
    save_callee_saved_registers(call.stack_frame_prefix);
58✔
1801

1802
    // Update r10.
1803
    constexpr Reg r10_reg{R10_STACK_POINTER};
58✔
1804
    add(r10_reg, -EBPF_SUBPROGRAM_STACK_SIZE, 64);
58✔
1805
}
1806

1807
void ebpf_transformer::operator()(const Callx& callx) {
50✔
1808
    using namespace crab::dsl_syntax;
25✔
1809
    if (m_inv.is_bottom()) {
50✔
1810
        return;
4✔
1811
    }
1812

1813
    // Look up the helper function id.
1814
    const reg_pack_t& reg = reg_pack(callx.func);
50✔
1815
    const auto src_interval = m_inv.eval_interval(reg.svalue);
50✔
1816
    if (const auto sn = src_interval.singleton()) {
50✔
1817
        if (sn->fits<int32_t>()) {
48✔
1818
            // We can now process it as if the id was immediate.
1819
            const int32_t imm = sn->cast_to<int32_t>();
48✔
1820
            if (!thread_local_program_info->platform->is_helper_usable(imm)) {
48✔
1821
                return;
4✔
1822
            }
1823
            const Call call = make_call(imm, *thread_local_program_info->platform);
44✔
1824
            (*this)(call);
44✔
1825
        }
44✔
1826
    }
50✔
1827
}
50✔
1828

1829
void ebpf_transformer::do_load_mapfd(const Reg& dst_reg, const int mapfd, const bool maybe_null) {
7,790✔
1830
    const EbpfMapDescriptor& desc = thread_local_program_info->platform->get_map_descriptor(mapfd);
7,790✔
1831
    const EbpfMapType& type = thread_local_program_info->platform->get_map_type(desc.type);
7,790✔
1832
    if (type.value_type == EbpfMapValueType::PROGRAM) {
7,790✔
1833
        type_inv.assign_type(m_inv, dst_reg, T_MAP_PROGRAMS);
504✔
1834
    } else {
1835
        type_inv.assign_type(m_inv, dst_reg, T_MAP);
11,181✔
1836
    }
1837
    const reg_pack_t& dst = reg_pack(dst_reg);
7,790✔
1838
    assign(dst.map_fd, mapfd);
7,790✔
1839
    assign_valid_ptr(dst_reg, maybe_null);
7,790✔
1840
}
7,790✔
1841

1842
void ebpf_transformer::operator()(const LoadMapFd& ins) { do_load_mapfd(ins.dst, ins.mapfd, false); }
7,768✔
1843

1844
void ebpf_transformer::do_load_map_address(const Reg& dst_reg, const int mapfd, const int32_t offset) {
10✔
1845
    const EbpfMapDescriptor& desc = thread_local_program_info->platform->get_map_descriptor(mapfd);
10✔
1846
    const EbpfMapType& type = thread_local_program_info->platform->get_map_type(desc.type);
10✔
1847

1848
    if (type.value_type == EbpfMapValueType::PROGRAM) {
10✔
NEW
1849
        throw std::invalid_argument("Cannot load address of program map type - only data maps are supported");
×
1850
    }
1851

1852
    // Set the shared region size and offset for the map.
1853
    type_inv.assign_type(m_inv, dst_reg, T_SHARED);
10✔
1854
    const reg_pack_t& dst = reg_pack(dst_reg);
10✔
1855
    m_inv.assign(dst.shared_offset, offset);
10✔
1856
    m_inv.assign(dst.shared_region_size, desc.value_size);
10✔
1857
    assign_valid_ptr(dst_reg, false);
10✔
1858
}
10✔
1859

1860
void ebpf_transformer::operator()(const LoadMapAddress& ins) { do_load_map_address(ins.dst, ins.mapfd, ins.offset); }
10✔
1861

1862
void ebpf_transformer::assign_valid_ptr(const Reg& dst_reg, const bool maybe_null) {
13,512✔
1863
    using namespace crab::dsl_syntax;
6,756✔
1864
    const reg_pack_t& reg = reg_pack(dst_reg);
13,512✔
1865
    havoc(reg.svalue);
13,512✔
1866
    havoc(reg.uvalue);
13,512✔
1867
    if (maybe_null) {
13,512✔
1868
        m_inv += 0 <= reg.svalue;
11,468✔
1869
    } else {
1870
        m_inv += 0 < reg.svalue;
15,556✔
1871
    }
1872
    m_inv += reg.svalue <= PTR_MAX;
20,268✔
1873
    assign(reg.uvalue, reg.svalue);
13,512✔
1874
}
13,512✔
1875

1876
// If nothing is known of the stack_numeric_size,
1877
// try to recompute the stack_numeric_size.
1878
void ebpf_transformer::recompute_stack_numeric_size(NumAbsDomain& inv, const variable_t type_variable) const {
616,885✔
1879
    const variable_t stack_numeric_size_variable =
308,443✔
1880
        variable_t::kind_var(data_kind_t::stack_numeric_sizes, type_variable);
616,885✔
1881

1882
    if (!inv.eval_interval(stack_numeric_size_variable).is_top()) {
1,233,771✔
1883
        return;
×
1884
    }
1885

1886
    if (type_inv.has_type(inv, type_variable, T_STACK)) {
616,885✔
1887
        const int numeric_size =
308,002✔
1888
            stack.min_all_num_size(inv, variable_t::kind_var(data_kind_t::stack_offsets, type_variable));
616,003✔
1889
        if (numeric_size > 0) {
616,003✔
1890
            inv.assign(stack_numeric_size_variable, numeric_size);
18,315✔
1891
        }
1892
    }
1893
}
1894

1895
void ebpf_transformer::recompute_stack_numeric_size(NumAbsDomain& inv, const Reg& reg) const {
13,128✔
1896
    recompute_stack_numeric_size(inv, reg_pack(reg).type);
13,128✔
1897
}
13,128✔
1898

1899
void ebpf_transformer::add(const Reg& reg, const int imm, const int finite_width) {
16,732✔
1900
    const auto dst = reg_pack(reg);
16,732✔
1901
    const auto offset = dom.get_type_offset_variable(reg);
16,732✔
1902
    add_overflow(dst.svalue, dst.uvalue, imm, finite_width);
16,732✔
1903
    if (offset.has_value()) {
16,732✔
1904
        add(offset.value(), imm);
13,128✔
1905
        if (imm > 0) {
13,128✔
1906
            // Since the start offset is increasing but
1907
            // the end offset is not, the numeric size decreases.
1908
            sub(dst.stack_numeric_size, imm);
1,308✔
1909
        } else if (imm < 0) {
12,256✔
1910
            havoc(dst.stack_numeric_size);
12,256✔
1911
        }
1912
        recompute_stack_numeric_size(m_inv, reg);
13,128✔
1913
    }
1914
}
16,732✔
1915

1916
void ebpf_transformer::shl(const Reg& dst_reg, int imm, const int finite_width) {
4,144✔
1917
    const reg_pack_t dst = reg_pack(dst_reg);
4,144✔
1918

1919
    // The BPF ISA requires masking the imm.
1920
    imm &= finite_width - 1;
4,144✔
1921

1922
    if (m_inv.entail(type_is_number(dst))) {
6,216✔
1923
        const auto interval = m_inv.eval_interval(dst.uvalue);
4,142✔
1924
        if (interval.finite_size()) {
4,142✔
1925
            const number_t lb = interval.lb().number().value();
5,504✔
1926
            const number_t ub = interval.ub().number().value();
5,504✔
1927
            uint64_t lb_n = lb.cast_to<uint64_t>();
2,752✔
1928
            uint64_t ub_n = ub.cast_to<uint64_t>();
2,752✔
1929
            const uint64_t uint_max = finite_width == 64 ? uint64_t{std::numeric_limits<uint64_t>::max()}
2,938✔
1930
                                                         : uint64_t{std::numeric_limits<uint32_t>::max()};
186✔
1931
            if (lb_n >> (finite_width - imm) != ub_n >> (finite_width - imm)) {
2,752✔
1932
                // The bits that will be shifted out to the left are different,
1933
                // which means all combinations of remaining bits are possible.
1934
                lb_n = 0;
134✔
1935
                ub_n = uint_max << imm & uint_max;
134✔
1936
            } else {
1937
                // The bits that will be shifted out to the left are identical
1938
                // for all values in the interval, so we can safely shift left
1939
                // to get a new interval.
1940
                lb_n = lb_n << imm & uint_max;
2,618✔
1941
                ub_n = ub_n << imm & uint_max;
2,618✔
1942
            }
1943
            m_inv.set(dst.uvalue, interval_t{lb_n, ub_n});
2,752✔
1944
            m_inv.assign(dst.svalue, dst.uvalue);
2,752✔
1945
            overflow_signed(m_inv, dst.svalue, finite_width);
2,752✔
1946
            overflow_unsigned(m_inv, dst.uvalue, finite_width);
2,752✔
1947
            return;
2,752✔
1948
        }
2,752✔
1949
    }
4,142✔
1950
    shl_overflow(dst.svalue, dst.uvalue, imm);
1,392✔
1951
    havoc_offsets(dst_reg);
1,392✔
1952
}
1953

1954
void ebpf_transformer::lshr(const Reg& dst_reg, int imm, int finite_width) {
3,248✔
1955
    reg_pack_t dst = reg_pack(dst_reg);
3,248✔
1956

1957
    // The BPF ISA requires masking the imm.
1958
    imm &= finite_width - 1;
3,248✔
1959

1960
    if (m_inv.entail(type_is_number(dst))) {
4,872✔
1961
        auto interval = m_inv.eval_interval(dst.uvalue);
3,246✔
1962
        number_t lb_n{0};
3,246✔
1963
        number_t ub_n{std::numeric_limits<uint64_t>::max() >> imm};
3,246✔
1964
        if (interval.finite_size()) {
3,246✔
1965
            number_t lb = interval.lb().number().value();
3,172✔
1966
            number_t ub = interval.ub().number().value();
3,172✔
1967
            if (finite_width == 64) {
1,586✔
1968
                lb_n = lb.cast_to<uint64_t>() >> imm;
1,154✔
1969
                ub_n = ub.cast_to<uint64_t>() >> imm;
1,154✔
1970
            } else {
1971
                number_t lb_w = lb.cast_to_sint(finite_width);
432✔
1972
                number_t ub_w = ub.cast_to_sint(finite_width);
432✔
1973
                lb_n = lb_w.cast_to<uint32_t>() >> imm;
432✔
1974
                ub_n = ub_w.cast_to<uint32_t>() >> imm;
432✔
1975

1976
                // The interval must be valid since a signed range crossing 0
1977
                // was earlier converted to a full unsigned range.
1978
                assert(lb_n <= ub_n);
432✔
1979
            }
432✔
1980
        }
1,586✔
1981
        m_inv.set(dst.uvalue, interval_t{lb_n, ub_n});
8,115✔
1982
        m_inv.assign(dst.svalue, dst.uvalue);
3,246✔
1983
        overflow_signed(m_inv, dst.svalue, finite_width);
3,246✔
1984
        overflow_unsigned(m_inv, dst.uvalue, finite_width);
3,246✔
1985
        return;
3,246✔
1986
    }
4,869✔
1987
    havoc(dst.svalue);
2✔
1988
    havoc(dst.uvalue);
2✔
1989
    havoc_offsets(dst_reg);
2✔
1990
}
1991

1992
static int _movsx_bits(const Bin::Op op) {
4,136✔
1993
    switch (op) {
4,136✔
1994
    case Bin::Op::MOVSX8: return 8;
8✔
1995
    case Bin::Op::MOVSX16: return 16;
8✔
1996
    case Bin::Op::MOVSX32: return 32;
2,052✔
1997
    default: throw std::exception();
×
1998
    }
1999
}
2000

2001
void ebpf_transformer::sign_extend(const Reg& dst_reg, const linear_expression_t& right_svalue, const int finite_width,
2,086✔
2002
                                   const Bin::Op op) {
2003
    using namespace crab;
1,043✔
2004

2005
    const int bits = _movsx_bits(op);
2,086✔
2006
    const reg_pack_t dst = reg_pack(dst_reg);
2,086✔
2007
    interval_t right_interval = m_inv.eval_interval(right_svalue);
2,086✔
2008
    type_inv.assign_type(m_inv, dst_reg, T_NUM);
2,086✔
2009
    havoc_offsets(dst_reg);
2,086✔
2010
    const int64_t span = 1ULL << bits;
2,086✔
2011
    if (right_interval.ub() - right_interval.lb() >= span) {
4,172✔
2012
        // Interval covers the full space.
2013
        if (bits == 64) {
2,024✔
2014
            havoc(dst.svalue);
×
2015
            return;
×
2016
        }
2017
        right_interval = interval_t::signed_int(bits);
3,036✔
2018
    }
2019
    const int64_t mask = 1ULL << (bits - 1);
2,086✔
2020

2021
    // Sign extend each bound.
2022
    int64_t lb = right_interval.lb().number().value().cast_to<int64_t>();
4,172✔
2023
    lb &= span - 1;
2,086✔
2024
    lb = (lb ^ mask) - mask;
2,086✔
2025
    int64_t ub = right_interval.ub().number().value().cast_to<int64_t>();
4,172✔
2026
    ub &= span - 1;
2,086✔
2027
    ub = (ub ^ mask) - mask;
2,086✔
2028
    m_inv.set(dst.svalue, interval_t{lb, ub});
2,086✔
2029

2030
    if (finite_width) {
2,086✔
2031
        m_inv.assign(dst.uvalue, dst.svalue);
2,086✔
2032
        overflow_signed(m_inv, dst.svalue, finite_width);
2,086✔
2033
        overflow_unsigned(m_inv, dst.uvalue, finite_width);
2,086✔
2034
    }
2035
}
2,086✔
2036

2037
void ebpf_transformer::ashr(const Reg& dst_reg, const linear_expression_t& right_svalue, int finite_width) {
606✔
2038
    using namespace crab;
303✔
2039

2040
    reg_pack_t dst = reg_pack(dst_reg);
606✔
2041
    if (m_inv.entail(type_is_number(dst))) {
909✔
2042
        interval_t left_interval = interval_t::bottom();
596✔
2043
        interval_t right_interval = interval_t::bottom();
596✔
2044
        interval_t left_interval_positive = interval_t::bottom();
596✔
2045
        interval_t left_interval_negative = interval_t::bottom();
596✔
2046
        get_signed_intervals(m_inv, finite_width == 64, dst.svalue, dst.uvalue, right_svalue, left_interval,
596✔
2047
                             right_interval, left_interval_positive, left_interval_negative);
2048
        if (auto sn = right_interval.singleton()) {
596✔
2049
            // The BPF ISA requires masking the imm.
2050
            int64_t imm = sn->cast_to<int64_t>() & (finite_width - 1);
594✔
2051

2052
            int64_t lb_n = std::numeric_limits<int64_t>::min() >> imm;
594✔
2053
            int64_t ub_n = std::numeric_limits<int64_t>::max() >> imm;
594✔
2054
            if (left_interval.finite_size()) {
594✔
2055
                const auto [lb, ub] = left_interval.pair_number();
594✔
2056
                if (finite_width == 64) {
594✔
2057
                    lb_n = lb.cast_to<int64_t>() >> imm;
546✔
2058
                    ub_n = ub.cast_to<int64_t>() >> imm;
546✔
2059
                } else {
2060
                    number_t lb_w = lb.cast_to_sint(finite_width) >> gsl::narrow<int>(imm);
72✔
2061
                    number_t ub_w = ub.cast_to_sint(finite_width) >> gsl::narrow<int>(imm);
72✔
2062
                    if (lb_w.cast_to<uint32_t>() <= ub_w.cast_to<uint32_t>()) {
48✔
2063
                        lb_n = lb_w.cast_to<uint32_t>();
28✔
2064
                        ub_n = ub_w.cast_to<uint32_t>();
28✔
2065
                    }
2066
                }
48✔
2067
            }
594✔
2068
            m_inv.set(dst.svalue, interval_t{lb_n, ub_n});
594✔
2069
            m_inv.assign(dst.uvalue, dst.svalue);
594✔
2070
            overflow_signed(m_inv, dst.svalue, finite_width);
594✔
2071
            overflow_unsigned(m_inv, dst.uvalue, finite_width);
594✔
2072
            return;
594✔
2073
        }
596✔
2074
    }
2,381✔
2075
    havoc(dst.svalue);
12✔
2076
    havoc(dst.uvalue);
12✔
2077
    havoc_offsets(dst_reg);
12✔
2078
}
2079

2080
static void apply(NumAbsDomain& inv, const binop_t& op, const variable_t x, const variable_t y, const variable_t z) {
542✔
2081
    inv.apply(op, x, y, z, 0);
542✔
2082
}
271✔
2083

2084
void ebpf_transformer::operator()(const Bin& bin) {
109,324✔
2085
    using namespace crab::dsl_syntax;
54,662✔
2086

2087
    auto dst = reg_pack(bin.dst);
109,324✔
2088
    int finite_width = bin.is64 ? 64 : 32;
109,324✔
2089

2090
    if (auto pimm = std::get_if<Imm>(&bin.v)) {
109,324✔
2091
        // dst += K
2092
        int64_t imm;
32,121✔
2093
        if (bin.is64) {
64,242✔
2094
            // Use the full signed value.
2095
            imm = to_signed(pimm->v);
51,698✔
2096
        } else {
2097
            // Use only the low 32 bits of the value.
2098
            imm = gsl::narrow_cast<int32_t>(pimm->v);
12,544✔
2099
            bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
12,544✔
2100
            // If this is a 32-bit operation and the destination is not a number, forget everything about the register.
2101
            if (!type_inv.has_type(m_inv, bin.dst, T_NUM)) {
12,544✔
2102
                havoc_register(m_inv, bin.dst);
500✔
2103
                havoc_offsets(bin.dst);
500✔
2104
                havoc(dst.type);
500✔
2105
            }
2106
        }
2107
        switch (bin.op) {
64,242✔
2108
        case Bin::Op::MOV:
31,964✔
2109
            assign(dst.svalue, imm);
31,964✔
2110
            assign(dst.uvalue, imm);
31,964✔
2111
            overflow_unsigned(m_inv, dst.uvalue, bin.is64 ? 64 : 32);
34,921✔
2112
            type_inv.assign_type(m_inv, bin.dst, T_NUM);
31,964✔
2113
            havoc_offsets(bin.dst);
31,964✔
2114
            break;
15,982✔
2115
        case Bin::Op::MOVSX8:
×
2116
        case Bin::Op::MOVSX16:
2117
        case Bin::Op::MOVSX32: CRAB_ERROR("Unsupported operation");
×
2118
        case Bin::Op::ADD:
16,594✔
2119
            if (imm == 0) {
16,594✔
2120
                return;
130✔
2121
            }
2122
            add(bin.dst, gsl::narrow<int>(imm), finite_width);
16,590✔
2123
            break;
8,295✔
2124
        case Bin::Op::SUB:
28✔
2125
            if (imm == 0) {
28✔
2126
                return;
1✔
2127
            }
2128
            add(bin.dst, gsl::narrow<int>(-imm), finite_width);
26✔
2129
            break;
13✔
2130
        case Bin::Op::MUL:
90✔
2131
            mul(dst.svalue, dst.uvalue, imm, finite_width);
90✔
2132
            havoc_offsets(bin.dst);
90✔
2133
            break;
45✔
2134
        case Bin::Op::UDIV:
1,130✔
2135
            udiv(dst.svalue, dst.uvalue, imm, finite_width);
1,130✔
2136
            havoc_offsets(bin.dst);
1,130✔
2137
            break;
565✔
2138
        case Bin::Op::UMOD:
24✔
2139
            urem(dst.svalue, dst.uvalue, imm, finite_width);
24✔
2140
            havoc_offsets(bin.dst);
24✔
2141
            break;
12✔
2142
        case Bin::Op::SDIV:
18✔
2143
            sdiv(dst.svalue, dst.uvalue, imm, finite_width);
18✔
2144
            havoc_offsets(bin.dst);
18✔
2145
            break;
9✔
2146
        case Bin::Op::SMOD:
34✔
2147
            srem(dst.svalue, dst.uvalue, imm, finite_width);
34✔
2148
            havoc_offsets(bin.dst);
34✔
2149
            break;
17✔
2150
        case Bin::Op::OR:
1,542✔
2151
            bitwise_or(dst.svalue, dst.uvalue, imm);
1,542✔
2152
            havoc_offsets(bin.dst);
1,542✔
2153
            break;
771✔
2154
        case Bin::Op::AND:
4,710✔
2155
            // FIX: what to do with ptr&-8 as in counter/simple_loop_unrolled?
2156
            bitwise_and(dst.svalue, dst.uvalue, imm);
4,710✔
2157
            if (gsl::narrow<int32_t>(imm) > 0) {
4,710✔
2158
                // AND with immediate is only a 32-bit operation so svalue and uvalue are the same.
2159
                assume(dst.svalue <= imm);
6,369✔
2160
                assume(dst.uvalue <= imm);
6,369✔
2161
                assume(0 <= dst.svalue);
8,492✔
2162
                assume(0 <= dst.uvalue);
8,492✔
2163
            }
2164
            havoc_offsets(bin.dst);
4,710✔
2165
            break;
2,355✔
2166
        case Bin::Op::LSH: shl(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
4,098✔
2167
        case Bin::Op::RSH: lshr(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
3,214✔
2168
        case Bin::Op::ARSH: ashr(bin.dst, gsl::narrow<int32_t>(imm), finite_width); break;
572✔
2169
        case Bin::Op::XOR:
224✔
2170
            bitwise_xor(dst.svalue, dst.uvalue, imm);
224✔
2171
            havoc_offsets(bin.dst);
224✔
2172
            break;
112✔
2173
        }
2174
    } else {
2175
        // dst op= src
2176
        auto src_reg = std::get<Reg>(bin.v);
45,082✔
2177
        auto src = reg_pack(src_reg);
45,082✔
2178
        switch (bin.op) {
45,082✔
2179
        case Bin::Op::ADD: {
2,726✔
2180
            if (type_inv.same_type(m_inv, bin.dst, std::get<Reg>(bin.v))) {
2,726✔
2181
                // both must be numbers
2182
                add_overflow(dst.svalue, dst.uvalue, src.svalue, finite_width);
2,180✔
2183
            } else {
2184
                // Here we're not sure that lhs and rhs are the same type; they might be.
2185
                // But previous assertions should fail unless we know that exactly one of lhs or rhs is a pointer.
2186
                m_inv =
546✔
2187
                    type_inv.join_over_types(m_inv, bin.dst, [&](NumAbsDomain& inv, const type_encoding_t dst_type) {
1,092✔
2188
                        inv = type_inv.join_over_types(
1,100✔
2189
                            inv, src_reg, [&](NumAbsDomain& inv, const type_encoding_t src_type) {
550✔
2190
                                if (dst_type == T_NUM && src_type != T_NUM) {
556✔
2191
                                    // num += ptr
2192
                                    type_inv.assign_type(inv, bin.dst, src_type);
12✔
2193
                                    if (const auto dst_offset = dom.get_type_offset_variable(bin.dst, src_type)) {
12✔
2194
                                        crab::apply(inv, arith_binop_t::ADD, dst_offset.value(), dst.svalue,
20✔
2195
                                                    dom.get_type_offset_variable(src_reg, src_type).value());
20✔
2196
                                    }
2197
                                    if (src_type == T_SHARED) {
12✔
2198
                                        inv.assign(dst.shared_region_size, src.shared_region_size);
×
2199
                                    }
2200
                                } else if (dst_type != T_NUM && src_type == T_NUM) {
550✔
2201
                                    // ptr += num
2202
                                    type_inv.assign_type(inv, bin.dst, dst_type);
532✔
2203
                                    if (const auto dst_offset = dom.get_type_offset_variable(bin.dst, dst_type)) {
532✔
2204
                                        crab::apply(inv, arith_binop_t::ADD, dst_offset.value(), dst_offset.value(),
798✔
2205
                                                    src.svalue);
532✔
2206
                                        if (dst_type == T_STACK) {
532✔
2207
                                            // Reduce the numeric size.
2208
                                            using namespace crab::dsl_syntax;
8✔
2209
                                            if (m_inv.intersect(src.svalue < 0)) {
24✔
2210
                                                inv -= dst.stack_numeric_size;
2✔
2211
                                                recompute_stack_numeric_size(inv, dst.type);
2✔
2212
                                            } else {
2213
                                                apply_signed(inv, arith_binop_t::SUB, dst.stack_numeric_size,
14✔
2214
                                                             dst.stack_numeric_size, dst.stack_numeric_size, src.svalue,
14✔
2215
                                                             0);
2216
                                            }
2217
                                        }
2218
                                    }
2219
                                } else if (dst_type == T_NUM && src_type == T_NUM) {
544✔
2220
                                    // dst and src don't necessarily have the same type, but among the possibilities
2221
                                    // enumerated is the case where they are both numbers.
2222
                                    apply_signed(inv, arith_binop_t::ADD, dst.svalue, dst.uvalue, dst.svalue,
2✔
2223
                                                 src.svalue, finite_width);
2✔
2224
                                } else {
2225
                                    // We ignore the cases here that do not match the assumption described
2226
                                    // above.  Joining bottom with another results will leave the other
2227
                                    // results unchanged.
2228
                                    inv.set_to_bottom();
10✔
2229
                                }
2230
                            });
1,106✔
2231
                    });
1,096✔
2232
                // careful: change dst.value only after dealing with offset
2233
                apply_signed(m_inv, arith_binop_t::ADD, dst.svalue, dst.uvalue, dst.svalue, src.svalue, finite_width);
546✔
2234
            }
2235
            break;
1,363✔
2236
        }
2237
        case Bin::Op::SUB: {
480✔
2238
            if (type_inv.same_type(m_inv, bin.dst, std::get<Reg>(bin.v))) {
480✔
2239
                // src and dest have the same type.
2240
                m_inv = type_inv.join_over_types(m_inv, bin.dst, [&](NumAbsDomain& inv, const type_encoding_t type) {
940✔
2241
                    switch (type) {
474✔
2242
                    case T_NUM:
264✔
2243
                        // This is: sub_overflow(inv, dst.value, src.value, finite_width);
2244
                        apply_signed(inv, arith_binop_t::SUB, dst.svalue, dst.uvalue, dst.svalue, src.svalue,
264✔
2245
                                     finite_width);
2246
                        type_inv.assign_type(inv, bin.dst, T_NUM);
264✔
2247
                        crab::havoc_offsets(inv, bin.dst);
264✔
2248
                        break;
264✔
2249
                    default:
210✔
2250
                        // ptr -= ptr
2251
                        // Assertions should make sure we only perform this on non-shared pointers.
2252
                        if (const auto dst_offset = dom.get_type_offset_variable(bin.dst, type)) {
210✔
2253
                            apply_signed(inv, arith_binop_t::SUB, dst.svalue, dst.uvalue, dst_offset.value(),
210✔
2254
                                         dom.get_type_offset_variable(src_reg, type).value(), finite_width);
315✔
2255
                            inv -= dst_offset.value();
210✔
2256
                        }
2257
                        crab::havoc_offsets(inv, bin.dst);
210✔
2258
                        type_inv.assign_type(inv, bin.dst, T_NUM);
210✔
2259
                        break;
210✔
2260
                    }
2261
                });
944✔
2262
            } else {
2263
                // We're not sure that lhs and rhs are the same type.
2264
                // Either they're different, or at least one is not a singleton.
2265
                if (type_inv.get_type(m_inv, std::get<Reg>(bin.v)) != T_NUM) {
10✔
2266
                    type_inv.havoc_type(m_inv, bin.dst);
6✔
2267
                    havoc(dst.svalue);
6✔
2268
                    havoc(dst.uvalue);
6✔
2269
                    havoc_offsets(bin.dst);
6✔
2270
                } else {
2271
                    sub_overflow(dst.svalue, dst.uvalue, src.svalue, finite_width);
4✔
2272
                    if (auto dst_offset = dom.get_type_offset_variable(bin.dst)) {
4✔
2273
                        sub(dst_offset.value(), src.svalue);
4✔
2274
                        if (type_inv.has_type(m_inv, dst.type, T_STACK)) {
4✔
2275
                            // Reduce the numeric size.
2276
                            using namespace crab::dsl_syntax;
2✔
2277
                            if (m_inv.intersect(src.svalue > 0)) {
8✔
2278
                                m_inv -= dst.stack_numeric_size;
4✔
2279
                                recompute_stack_numeric_size(m_inv, dst.type);
4✔
2280
                            } else {
2281
                                crab::apply(m_inv, arith_binop_t::ADD, dst.stack_numeric_size, dst.stack_numeric_size,
×
2282
                                            src.svalue);
2283
                            }
2284
                        }
2285
                    }
2286
                }
2287
            }
2288
            break;
240✔
2289
        }
2290
        case Bin::Op::MUL:
74✔
2291
            mul(dst.svalue, dst.uvalue, src.svalue, finite_width);
74✔
2292
            havoc_offsets(bin.dst);
74✔
2293
            break;
37✔
2294
        case Bin::Op::UDIV:
108✔
2295
            udiv(dst.svalue, dst.uvalue, src.uvalue, finite_width);
108✔
2296
            havoc_offsets(bin.dst);
108✔
2297
            break;
54✔
2298
        case Bin::Op::UMOD:
36✔
2299
            urem(dst.svalue, dst.uvalue, src.uvalue, finite_width);
36✔
2300
            havoc_offsets(bin.dst);
36✔
2301
            break;
18✔
2302
        case Bin::Op::SDIV:
36✔
2303
            sdiv(dst.svalue, dst.uvalue, src.svalue, finite_width);
36✔
2304
            havoc_offsets(bin.dst);
36✔
2305
            break;
18✔
2306
        case Bin::Op::SMOD:
48✔
2307
            srem(dst.svalue, dst.uvalue, src.svalue, finite_width);
48✔
2308
            havoc_offsets(bin.dst);
48✔
2309
            break;
24✔
2310
        case Bin::Op::OR:
4,440✔
2311
            bitwise_or(dst.svalue, dst.uvalue, src.uvalue, finite_width);
4,440✔
2312
            havoc_offsets(bin.dst);
4,440✔
2313
            break;
2,220✔
2314
        case Bin::Op::AND:
468✔
2315
            bitwise_and(dst.svalue, dst.uvalue, src.uvalue, finite_width);
468✔
2316
            havoc_offsets(bin.dst);
468✔
2317
            break;
234✔
2318
        case Bin::Op::LSH:
592✔
2319
            if (m_inv.entail(type_is_number(src_reg))) {
888✔
2320
                auto src_interval = m_inv.eval_interval(src.uvalue);
588✔
2321
                if (std::optional<number_t> sn = src_interval.singleton()) {
588✔
2322
                    // truncate to uint64?
2323
                    uint64_t imm = sn->cast_to<uint64_t>() & (bin.is64 ? 63 : 31);
46✔
2324
                    if (imm <= std::numeric_limits<int32_t>::max()) {
46✔
2325
                        if (!bin.is64) {
46✔
2326
                            // Use only the low 32 bits of the value.
2327
                            bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
39✔
2328
                        }
2329
                        shl(bin.dst, gsl::narrow_cast<int32_t>(imm), finite_width);
46✔
2330
                        break;
46✔
2331
                    }
2332
                }
588✔
2333
            }
588✔
2334
            shl_overflow(dst.svalue, dst.uvalue, src.uvalue);
546✔
2335
            havoc_offsets(bin.dst);
546✔
2336
            break;
273✔
2337
        case Bin::Op::RSH:
80✔
2338
            if (m_inv.entail(type_is_number(src_reg))) {
120✔
2339
                auto src_interval = m_inv.eval_interval(src.uvalue);
78✔
2340
                if (std::optional<number_t> sn = src_interval.singleton()) {
78✔
2341
                    uint64_t imm = sn->cast_to<uint64_t>() & (bin.is64 ? 63 : 31);
34✔
2342
                    if (imm <= std::numeric_limits<int32_t>::max()) {
34✔
2343
                        if (!bin.is64) {
34✔
2344
                            // Use only the low 32 bits of the value.
2345
                            bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
24✔
2346
                        }
2347
                        lshr(bin.dst, gsl::narrow_cast<int32_t>(imm), finite_width);
34✔
2348
                        break;
34✔
2349
                    }
2350
                }
78✔
2351
            }
78✔
2352
            havoc(dst.svalue);
46✔
2353
            havoc(dst.uvalue);
46✔
2354
            havoc_offsets(bin.dst);
46✔
2355
            break;
23✔
2356
        case Bin::Op::ARSH:
36✔
2357
            if (m_inv.entail(type_is_number(src_reg))) {
54✔
2358
                ashr(bin.dst, src.svalue, finite_width);
34✔
2359
                break;
34✔
2360
            }
2361
            havoc(dst.svalue);
2✔
2362
            havoc(dst.uvalue);
2✔
2363
            havoc_offsets(bin.dst);
2✔
2364
            break;
1✔
2365
        case Bin::Op::XOR:
248✔
2366
            bitwise_xor(dst.svalue, dst.uvalue, src.uvalue, finite_width);
248✔
2367
            havoc_offsets(bin.dst);
248✔
2368
            break;
124✔
2369
        case Bin::Op::MOVSX8:
2,098✔
2370
        case Bin::Op::MOVSX16:
1,049✔
2371
        case Bin::Op::MOVSX32:
1,049✔
2372
            // Keep relational information if operation is a no-op.
2373
            if (dst.svalue == src.svalue &&
3,123✔
2374
                m_inv.eval_interval(dst.svalue) <= interval_t::signed_int(_movsx_bits(bin.op))) {
5,173✔
2375
                return;
124✔
2376
            }
2377
            if (m_inv.entail(type_is_number(src_reg))) {
3,141✔
2378
                sign_extend(bin.dst, src.svalue, finite_width, bin.op);
2,086✔
2379
                break;
2,086✔
2380
            }
2381
            havoc(dst.svalue);
8✔
2382
            havoc(dst.uvalue);
8✔
2383
            havoc_offsets(bin.dst);
22,483✔
2384
            break;
4✔
2385
        case Bin::Op::MOV:
33,612✔
2386
            // Keep relational information if operation is a no-op.
2387
            if (dst.svalue == src.svalue &&
34,246✔
2388
                m_inv.eval_interval(dst.uvalue) <= interval_t::unsigned_int(bin.is64 ? 64 : 32)) {
36,141✔
2389
                return;
60✔
2390
            }
2391
            assign(dst.svalue, src.svalue);
33,492✔
2392
            assign(dst.uvalue, src.uvalue);
33,492✔
2393
            havoc_offsets(bin.dst);
33,492✔
2394
            m_inv = type_inv.join_over_types(m_inv, src_reg, [&](NumAbsDomain& inv, const type_encoding_t type) {
66,984✔
2395
                switch (type) {
33,510✔
2396
                case T_CTX:
4,540✔
2397
                    if (bin.is64) {
4,540✔
2398
                        inv.assign(dst.type, type);
4,536✔
2399
                        inv.assign(dst.ctx_offset, src.ctx_offset);
6,804✔
2400
                    }
2401
                    break;
2,270✔
2402
                case T_MAP:
202✔
2403
                case T_MAP_PROGRAMS:
101✔
2404
                    if (bin.is64) {
202✔
2405
                        inv.assign(dst.type, type);
198✔
2406
                        inv.assign(dst.map_fd, src.map_fd);
297✔
2407
                    }
2408
                    break;
101✔
2409
                case T_PACKET:
872✔
2410
                    if (bin.is64) {
872✔
2411
                        inv.assign(dst.type, type);
870✔
2412
                        inv.assign(dst.packet_offset, src.packet_offset);
1,305✔
2413
                    }
2414
                    break;
436✔
2415
                case T_SHARED:
776✔
2416
                    if (bin.is64) {
776✔
2417
                        inv.assign(dst.type, type);
774✔
2418
                        inv.assign(dst.shared_region_size, src.shared_region_size);
774✔
2419
                        inv.assign(dst.shared_offset, src.shared_offset);
1,161✔
2420
                    }
2421
                    break;
388✔
2422
                case T_STACK:
12,992✔
2423
                    if (bin.is64) {
12,992✔
2424
                        inv.assign(dst.type, type);
12,990✔
2425
                        inv.assign(dst.stack_offset, src.stack_offset);
12,990✔
2426
                        inv.assign(dst.stack_numeric_size, src.stack_numeric_size);
19,485✔
2427
                    }
2428
                    break;
6,496✔
2429
                default: inv.assign(dst.type, type); break;
21,192✔
2430
                }
2431
            });
33,492✔
2432
            if (bin.is64) {
33,492✔
2433
                // Add dst.type=src.type invariant.
2434
                if (bin.dst.v != std::get<Reg>(bin.v).v || type_inv.get_type(m_inv, dst.type) == T_UNINIT) {
27,261✔
2435
                    // Only forget the destination type if we're copying from a different register,
2436
                    // or from the same uninitialized register.
2437
                    havoc(dst.type);
27,258✔
2438
                }
2439
                type_inv.assign_type(m_inv, bin.dst, std::get<Reg>(bin.v));
27,260✔
2440
            }
2441
            break;
16,746✔
2442
        }
2443
    }
2444
    if (!bin.is64) {
109,194✔
2445
        bitwise_and(dst.svalue, dst.uvalue, std::numeric_limits<uint32_t>::max());
31,599✔
2446
    }
2447
}
2448

2449
void ebpf_transformer::initialize_loop_counter(const label_t& label) {
32✔
2450
    m_inv.assign(variable_t::loop_counter(to_string(label)), 0);
48✔
2451
}
32✔
2452

2453
void ebpf_transformer::operator()(const IncrementLoopCounter& ins) {
164✔
2454
    const auto counter = variable_t::loop_counter(to_string(ins.name));
164✔
2455
    this->add(counter, 1);
164✔
2456
}
164✔
2457

2458
void ebpf_domain_initialize_loop_counter(ebpf_domain_t& dom, const label_t& label) {
32✔
2459
    ebpf_transformer{dom}.initialize_loop_counter(label);
32✔
2460
}
32✔
2461

2462
} // namespace crab
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc