• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

Alan-Jowett / ebpf-verifier / 15194704016

22 May 2025 08:53AM UTC coverage: 88.11% (-0.07%) from 88.177%
15194704016

push

github

elazarg
uniform class names and explicit constructors for adapt_sgraph.hpp

Signed-off-by: Elazar Gershuni <elazarg@gmail.com>

27 of 30 new or added lines in 1 file covered. (90.0%)

481 existing lines in 33 files now uncovered.

8552 of 9706 relevant lines covered (88.11%)

9089054.61 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.78
/src/test/test_marshal.cpp
1
// Copyright (c) Prevail Verifier contributors.
2
// SPDX-License-Identifier: MIT
3
#include <catch2/catch_all.hpp>
4

5
#include "asm_marshal.hpp"
6
#include "asm_unmarshal.hpp"
7

8
using namespace prevail;
9

10
// Below we define a tample of instruction templates that specify
11
// what values each field are allowed to contain.  We first define
12
// a set of sentinel values that mean certain types of wildcards.
13
// For example, MEM_OFFSET and JMP_OFFSET are different wildcards
14
// for the 'offset' field of an instruction.  Any non-sentinel values
15
// in an instruction template are treated as literals.
16

17
constexpr int MEM_OFFSET = 3;                           // Any valid memory offset value.
18
constexpr int JMP_OFFSET = 5;                           // Any valid jump offset value.
19
constexpr int DST = 7;                                  // Any destination register number.
20
constexpr int HELPER_ID = 8;                            // Any helper ID.
21
constexpr int SRC = 9;                                  // Any source register number.
22
constexpr int IMM = -1;                                 // Any imm value.
23
constexpr int INVALID_REGISTER = R10_STACK_POINTER + 1; // Not a valid register.
24

25
struct EbpfInstructionTemplate {
26
    EbpfInst inst;
27
    bpf_conformance_groups_t groups;
28
};
29

30
// The following table is derived from the table in the Appendix of the
31
// BPF ISA specification (https://datatracker.ietf.org/doc/draft-ietf-bpf-isa/).
32
static const EbpfInstructionTemplate instruction_template[] = {
33
    // {opcode, dst, src, offset, imm}, group
34
    {{0x04, DST, 0, 0, IMM}, bpf_conformance_groups_t::base32},
35
    {{0x05, 0, 0, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
36
    {{0x06, 0, 0, 0, JMP_OFFSET}, bpf_conformance_groups_t::base32},
37
    {{0x07, DST, 0, 0, IMM}, bpf_conformance_groups_t::base64},
38
    {{0x0c, DST, SRC, 0, 0}, bpf_conformance_groups_t::base32},
39
    {{0x0f, DST, SRC, 0, 0}, bpf_conformance_groups_t::base64},
40
    {{0x14, DST, 0, 0, IMM}, bpf_conformance_groups_t::base32},
41
    {{0x15, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base64},
42
    {{0x16, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base32},
43
    {{0x17, DST, 0, 0, IMM}, bpf_conformance_groups_t::base64},
44
    {{0x18, DST, 0, 0, IMM}, bpf_conformance_groups_t::base64},
45
    {{0x18, DST, 1, 0, IMM}, bpf_conformance_groups_t::base64},
46
    {{0x18, DST, 2, 0, IMM}, bpf_conformance_groups_t::base64},
47
    // TODO(issue #533): add support for LDDW with src_reg > 2.
48
    // {{0x18, DST, 3, 0, IMM}, bpf_conformance_groups_t::base64},
49
    // {{0x18, DST, 4, 0, IMM}, bpf_conformance_groups_t::base64},
50
    // {{0x18, DST, 5, 0, IMM}, bpf_conformance_groups_t::base64},
51
    // {{0x18, DST, 6, 0, IMM}, bpf_conformance_groups_t::base64},
52
    {{0x1c, DST, SRC, 0, 0}, bpf_conformance_groups_t::base32},
53
    {{0x1d, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base64},
54
    {{0x1e, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
55
    {{0x1f, DST, SRC, 0, 0}, bpf_conformance_groups_t::base64},
56
    {{0x20, 0, 0, 0, IMM}, bpf_conformance_groups_t::packet},
57
    {{0x24, DST, 0, 0, IMM}, bpf_conformance_groups_t::divmul32},
58
    {{0x25, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base64},
59
    {{0x26, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base32},
60
    {{0x27, DST, 0, 0, IMM}, bpf_conformance_groups_t::divmul64},
61
    {{0x28, 0, 0, 0, IMM}, bpf_conformance_groups_t::packet},
62
    {{0x2c, DST, SRC, 0, 0}, bpf_conformance_groups_t::divmul32},
63
    {{0x2d, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base64},
64
    {{0x2e, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
65
    {{0x2f, DST, SRC, 0, 0}, bpf_conformance_groups_t::divmul64},
66
    {{0x30, 0, 0, 0, IMM}, bpf_conformance_groups_t::packet},
67
    {{0x34, DST, 0, 0, IMM}, bpf_conformance_groups_t::divmul32},
68
    {{0x34, DST, 0, 1, IMM}, bpf_conformance_groups_t::divmul32},
69
    {{0x35, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base64},
70
    {{0x36, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base32},
71
    {{0x37, DST, 0, 0, IMM}, bpf_conformance_groups_t::divmul64},
72
    {{0x37, DST, 0, 1, IMM}, bpf_conformance_groups_t::divmul64},
73
    {{0x3c, DST, SRC, 0, 0}, bpf_conformance_groups_t::divmul32},
74
    {{0x3c, DST, SRC, 1, 0}, bpf_conformance_groups_t::divmul32},
75
    {{0x3d, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base64},
76
    {{0x3e, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
77
    {{0x3f, DST, SRC, 0, 0}, bpf_conformance_groups_t::divmul64},
78
    {{0x3f, DST, SRC, 1, 0}, bpf_conformance_groups_t::divmul64},
79
    {{0x40, 0, SRC, 0, IMM}, bpf_conformance_groups_t::packet},
80
    {{0x44, DST, 0, 0, IMM}, bpf_conformance_groups_t::base32},
81
    {{0x45, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base64},
82
    {{0x46, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base32},
83
    {{0x47, DST, 0, 0, IMM}, bpf_conformance_groups_t::base64},
84
    {{0x48, 0, SRC, 0, IMM}, bpf_conformance_groups_t::packet},
85
    {{0x4c, DST, SRC, 0, 0}, bpf_conformance_groups_t::base32},
86
    {{0x4d, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base64},
87
    {{0x4e, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
88
    {{0x4f, DST, SRC, 0, 0}, bpf_conformance_groups_t::base64},
89
    {{0x50, 0, SRC, 0, IMM}, bpf_conformance_groups_t::packet},
90
    {{0x54, DST, 0, 0, IMM}, bpf_conformance_groups_t::base32},
91
    {{0x55, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base64},
92
    {{0x56, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base32},
93
    {{0x57, DST, 0, 0, IMM}, bpf_conformance_groups_t::base64},
94
    {{0x5c, DST, SRC, 0, 0}, bpf_conformance_groups_t::base32},
95
    {{0x5d, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base64},
96
    {{0x5e, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
97
    {{0x5f, DST, SRC, 0, 0}, bpf_conformance_groups_t::base64},
98
    {{0x61, DST, SRC, MEM_OFFSET, 0}, bpf_conformance_groups_t::base32},
99
    {{0x62, DST, 0, MEM_OFFSET, IMM}, bpf_conformance_groups_t::base32},
100
    {{0x63, DST, SRC, MEM_OFFSET, 0}, bpf_conformance_groups_t::base32},
101
    {{0x64, DST, 0, 0, IMM}, bpf_conformance_groups_t::base32},
102
    {{0x65, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base64},
103
    {{0x66, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base32},
104
    {{0x67, DST, 0, 0, IMM}, bpf_conformance_groups_t::base64},
105
    {{0x69, DST, SRC, MEM_OFFSET, 0}, bpf_conformance_groups_t::base32},
106
    {{0x6a, DST, 0, MEM_OFFSET, IMM}, bpf_conformance_groups_t::base32},
107
    {{0x6b, DST, SRC, MEM_OFFSET, 0}, bpf_conformance_groups_t::base32},
108
    {{0x6c, DST, SRC, 0, 0}, bpf_conformance_groups_t::base32},
109
    {{0x6d, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base64},
110
    {{0x6e, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
111
    {{0x6f, DST, SRC, 0, 0}, bpf_conformance_groups_t::base64},
112
    {{0x71, DST, SRC, MEM_OFFSET, 0}, bpf_conformance_groups_t::base32},
113
    {{0x72, DST, 0, MEM_OFFSET, IMM}, bpf_conformance_groups_t::base32},
114
    {{0x73, DST, SRC, MEM_OFFSET, 0}, bpf_conformance_groups_t::base32},
115
    {{0x74, DST, 0, 0, IMM}, bpf_conformance_groups_t::base32},
116
    {{0x75, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base64},
117
    {{0x76, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base32},
118
    {{0x77, DST, 0, 0, IMM}, bpf_conformance_groups_t::base64},
119
    {{0x79, DST, SRC, MEM_OFFSET, 0}, bpf_conformance_groups_t::base64},
120
    {{0x7a, DST, 0, MEM_OFFSET, IMM}, bpf_conformance_groups_t::base64},
121
    {{0x7b, DST, SRC, MEM_OFFSET, 0}, bpf_conformance_groups_t::base64},
122
    {{0x7c, DST, SRC, 0, 0}, bpf_conformance_groups_t::base32},
123
    {{0x7d, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base64},
124
    {{0x7e, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
125
    {{0x7f, DST, SRC, 0, 0}, bpf_conformance_groups_t::base64},
126
    {{0x84, DST, 0, 0, 0}, bpf_conformance_groups_t::base32},
127
    {{0x85, 0, 0, 0, HELPER_ID}, bpf_conformance_groups_t::base32},
128
    {{0x85, 0, 1, 0, JMP_OFFSET}, bpf_conformance_groups_t::base32},
129
    // TODO(issue #590): Add support for calling a helper function by BTF ID.
130
    // {{0x85, 0, 2, 0, IMM}, bpf_conformance_groups_t::base32},
131
    {{0x87, DST, 0, 0, 0}, bpf_conformance_groups_t::base64},
132
    {{0x8d, DST, 0, 0, 0}, bpf_conformance_groups_t::callx},
133
    {{0x94, DST, 0, 0, IMM}, bpf_conformance_groups_t::divmul32},
134
    {{0x94, DST, 0, 1, IMM}, bpf_conformance_groups_t::divmul32},
135
    {{0x95, 0, 0, 0, 0}, bpf_conformance_groups_t::base32},
136
    {{0x97, DST, 0, 0, IMM}, bpf_conformance_groups_t::divmul64},
137
    {{0x97, DST, 0, 1, IMM}, bpf_conformance_groups_t::divmul64},
138
    {{0x9c, DST, SRC, 0, 0}, bpf_conformance_groups_t::divmul32},
139
    {{0x9c, DST, SRC, 1, 0}, bpf_conformance_groups_t::divmul32},
140
    {{0x9f, DST, SRC, 0, 0}, bpf_conformance_groups_t::divmul64},
141
    {{0x9f, DST, SRC, 1, 0}, bpf_conformance_groups_t::divmul64},
142
    {{0xa4, DST, 0, 0, IMM}, bpf_conformance_groups_t::base32},
143
    {{0xa5, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base64},
144
    {{0xa6, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base32},
145
    {{0xa7, DST, 0, 0, IMM}, bpf_conformance_groups_t::base64},
146
    {{0xac, DST, SRC, 0, 0}, bpf_conformance_groups_t::base32},
147
    {{0xad, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base64},
148
    {{0xae, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
149
    {{0xaf, DST, SRC, 0, 0}, bpf_conformance_groups_t::base64},
150
    {{0xb4, DST, 0, 0, IMM}, bpf_conformance_groups_t::base32},
151
    {{0xb5, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base64},
152
    {{0xb6, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base32},
153
    {{0xb7, DST, 0, 0, IMM}, bpf_conformance_groups_t::base64},
154
    {{0xbc, DST, SRC, 0, 0}, bpf_conformance_groups_t::base32},
155
    {{0xbc, DST, SRC, 8, 0}, bpf_conformance_groups_t::base32},
156
    {{0xbc, DST, SRC, 16, 0}, bpf_conformance_groups_t::base32},
157
    {{0xbd, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base64},
158
    {{0xbe, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
159
    {{0xbf, DST, SRC, 0, 0}, bpf_conformance_groups_t::base64},
160
    {{0xbf, DST, SRC, 8, 0}, bpf_conformance_groups_t::base64},
161
    {{0xbf, DST, SRC, 16, 0}, bpf_conformance_groups_t::base64},
162
    {{0xbf, DST, SRC, 32, 0}, bpf_conformance_groups_t::base64},
163
    {{0xc3, DST, SRC, MEM_OFFSET, 0x00}, bpf_conformance_groups_t::atomic32},
164
    {{0xc3, DST, SRC, MEM_OFFSET, 0x01}, bpf_conformance_groups_t::atomic32},
165
    {{0xc3, DST, SRC, MEM_OFFSET, 0x40}, bpf_conformance_groups_t::atomic32},
166
    {{0xc3, DST, SRC, MEM_OFFSET, 0x41}, bpf_conformance_groups_t::atomic32},
167
    {{0xc3, DST, SRC, MEM_OFFSET, 0x50}, bpf_conformance_groups_t::atomic32},
168
    {{0xc3, DST, SRC, MEM_OFFSET, 0x51}, bpf_conformance_groups_t::atomic32},
169
    {{0xc3, DST, SRC, MEM_OFFSET, 0xa0}, bpf_conformance_groups_t::atomic32},
170
    {{0xc3, DST, SRC, MEM_OFFSET, 0xa1}, bpf_conformance_groups_t::atomic32},
171
    {{0xc3, DST, SRC, MEM_OFFSET, 0xe1}, bpf_conformance_groups_t::atomic32},
172
    {{0xc3, DST, SRC, MEM_OFFSET, 0xf1}, bpf_conformance_groups_t::atomic32},
173
    {{0xc4, DST, 0, 0, IMM}, bpf_conformance_groups_t::base32},
174
    {{0xc5, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base64},
175
    {{0xc6, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base32},
176
    {{0xc7, DST, 0, 0, IMM}, bpf_conformance_groups_t::base64},
177
    {{0xcc, DST, SRC, 0, 0}, bpf_conformance_groups_t::base32},
178
    {{0xcd, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base64},
179
    {{0xce, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
180
    {{0xcf, DST, SRC, 0, 0}, bpf_conformance_groups_t::base64},
181
    {{0xd4, DST, 0, 0, 0x10}, bpf_conformance_groups_t::base32},
182
    {{0xd4, DST, 0, 0, 0x20}, bpf_conformance_groups_t::base32},
183
    {{0xd4, DST, 0, 0, 0x40}, bpf_conformance_groups_t::base64},
184
    {{0xd5, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base64},
185
    {{0xd6, DST, 0, JMP_OFFSET, IMM}, bpf_conformance_groups_t::base32},
186
    {{0xd7, DST, 0, 0, 0x10}, bpf_conformance_groups_t::base32},
187
    {{0xd7, DST, 0, 0, 0x20}, bpf_conformance_groups_t::base32},
188
    {{0xd7, DST, 0, 0, 0x40}, bpf_conformance_groups_t::base64},
189
    {{0xdb, DST, SRC, MEM_OFFSET, 0x00}, bpf_conformance_groups_t::atomic64},
190
    {{0xdb, DST, SRC, MEM_OFFSET, 0x01}, bpf_conformance_groups_t::atomic64},
191
    {{0xdb, DST, SRC, MEM_OFFSET, 0x40}, bpf_conformance_groups_t::atomic64},
192
    {{0xdb, DST, SRC, MEM_OFFSET, 0x41}, bpf_conformance_groups_t::atomic64},
193
    {{0xdb, DST, SRC, MEM_OFFSET, 0x50}, bpf_conformance_groups_t::atomic64},
194
    {{0xdb, DST, SRC, MEM_OFFSET, 0x51}, bpf_conformance_groups_t::atomic64},
195
    {{0xdb, DST, SRC, MEM_OFFSET, 0xa0}, bpf_conformance_groups_t::atomic64},
196
    {{0xdb, DST, SRC, MEM_OFFSET, 0xa1}, bpf_conformance_groups_t::atomic64},
197
    {{0xdb, DST, SRC, MEM_OFFSET, 0xe1}, bpf_conformance_groups_t::atomic64},
198
    {{0xdb, DST, SRC, MEM_OFFSET, 0xf1}, bpf_conformance_groups_t::atomic64},
199
    {{0xdc, DST, 0, 0, 0x10}, bpf_conformance_groups_t::base32},
200
    {{0xdc, DST, 0, 0, 0x20}, bpf_conformance_groups_t::base32},
201
    {{0xdc, DST, 0, 0, 0x40}, bpf_conformance_groups_t::base64},
202
    {{0xdd, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base64},
203
    {{0xde, DST, SRC, JMP_OFFSET, 0}, bpf_conformance_groups_t::base32},
204
};
205

206
// Verify that we can successfully unmarshal an instruction.
207
static void check_unmarshal_succeed(const EbpfInst& ins, const ebpf_platform_t& platform = g_ebpf_platform_linux) {
320✔
208
    const ProgramInfo info{.platform = &platform, .type = platform.get_program_type("unspec", "unspec")};
800✔
209
    constexpr EbpfInst exit{.opcode = INST_OP_EXIT};
320✔
210
    const InstructionSeq parsed =
160✔
211
        std::get<InstructionSeq>(unmarshal(RawProgram{"", "", 0, "", {ins, exit, exit}, info}));
1,280✔
212
    REQUIRE(parsed.size() == 3);
320✔
213
}
320✔
214

215
// Verify that we can successfully unmarshal a 64-bit immediate instruction.
216
static void check_unmarshal_succeed(EbpfInst inst1, EbpfInst inst2,
6✔
217
                                    const ebpf_platform_t& platform = g_ebpf_platform_linux) {
218
    const ProgramInfo info{.platform = &platform, .type = platform.get_program_type("unspec", "unspec")};
15✔
219
    constexpr EbpfInst exit{.opcode = INST_OP_EXIT};
6✔
220
    const InstructionSeq parsed =
3✔
221
        std::get<InstructionSeq>(unmarshal(RawProgram{"", "", 0, "", {inst1, inst2, exit, exit}, info}));
24✔
222
    REQUIRE(parsed.size() == 3);
6✔
223
}
6✔
224

225
// Verify that if we unmarshal an instruction and then re-marshal it,
226
// we get what we expect.
227
static void compare_unmarshal_marshal(const EbpfInst& ins, const EbpfInst& expected_result,
20✔
228
                                      const ebpf_platform_t& platform = g_ebpf_platform_linux) {
229
    ProgramInfo info{.platform = &platform, .type = platform.get_program_type("unspec", "unspec")};
50✔
230
    constexpr EbpfInst exit{.opcode = INST_OP_EXIT};
20✔
231
    const InstructionSeq inst_seq =
10✔
232
        std::get<InstructionSeq>(unmarshal(RawProgram{"", "", 0, "", {ins, exit, exit}, info}));
80✔
233
    REQUIRE(inst_seq.size() == 3);
20✔
234
    auto [_, single, _2] = inst_seq.front();
20✔
235
    (void)_;  // unused
10✔
236
    (void)_2; // unused
10✔
237
    std::vector<EbpfInst> marshaled = marshal(single, 0);
20✔
238
    REQUIRE(marshaled.size() == 1);
20✔
239
    EbpfInst result = marshaled.back();
20✔
240
    REQUIRE(memcmp(&expected_result, &result, sizeof(result)) == 0);
20✔
241
}
20✔
242

243
// Verify that if we unmarshal two instructions and then re-marshal it,
244
// we get what we expect.
245
static void compare_unmarshal_marshal(const EbpfInst& ins1, const EbpfInst& ins2, const EbpfInst& expected_result) {
4✔
246
    ProgramInfo info{.platform = &g_ebpf_platform_linux,
4✔
247
                     .type = g_ebpf_platform_linux.get_program_type("unspec", "unspec")};
10✔
248
    constexpr EbpfInst exit{.opcode = INST_OP_EXIT};
4✔
249
    InstructionSeq parsed =
2✔
250
        std::get<InstructionSeq>(unmarshal(RawProgram{"", "", 0, "", {ins1, ins2, exit, exit}, info}));
16✔
251
    REQUIRE(parsed.size() == 3);
4✔
252
    auto [_, single, _2] = parsed.front();
4✔
253
    (void)_;  // unused
2✔
254
    (void)_2; // unused
2✔
255
    std::vector<EbpfInst> marshaled = marshal(single, 0);
4✔
256
    REQUIRE(marshaled.size() == 1);
4✔
257
    EbpfInst result = marshaled.back();
4✔
258
    REQUIRE(memcmp(&expected_result, &result, sizeof(result)) == 0);
4✔
259
}
4✔
260

261
// Verify that if we unmarshal a 64-bit immediate instruction and then re-marshal it,
262
// we get what we expect.
263
static void compare_unmarshal_marshal(const EbpfInst& ins1, const EbpfInst& ins2, const EbpfInst& expected_result1,
4✔
264
                                      const EbpfInst& expected_result2) {
265
    ProgramInfo info{.platform = &g_ebpf_platform_linux,
4✔
266
                     .type = g_ebpf_platform_linux.get_program_type("unspec", "unspec")};
10✔
267
    constexpr EbpfInst exit{.opcode = INST_OP_EXIT};
4✔
268
    const InstructionSeq inst_seq =
2✔
269
        std::get<InstructionSeq>(unmarshal(RawProgram{"", "", 0, "", {ins1, ins2, exit, exit}, info}));
16✔
270
    REQUIRE(inst_seq.size() == 3);
4✔
271
    auto [_, single, _2] = inst_seq.front();
4✔
272
    (void)_;  // unused
2✔
273
    (void)_2; // unused
2✔
274
    std::vector<EbpfInst> marshaled = marshal(single, 0);
4✔
275
    REQUIRE(marshaled.size() == 2);
4✔
276
    EbpfInst result1 = marshaled.front();
4✔
277
    REQUIRE(memcmp(&expected_result1, &result1, sizeof(result1)) == 0);
4✔
278
    EbpfInst result2 = marshaled.back();
4✔
279
    REQUIRE(memcmp(&expected_result2, &result2, sizeof(result2)) == 0);
4✔
280
}
4✔
281

282
// Verify that if we marshal an instruction and then unmarshal it,
283
// we get the original.
284
static void compare_marshal_unmarshal(const Instruction& ins, bool double_cmd = false,
336✔
285
                                      const ebpf_platform_t& platform = g_ebpf_platform_linux) {
286
    ProgramInfo info{.platform = &platform, .type = platform.get_program_type("unspec", "unspec")};
840✔
287
    const InstructionSeq inst_seq =
168✔
288
        std::get<InstructionSeq>(unmarshal(RawProgram{"", "", 0, "", marshal(ins, 0), info}));
1,176✔
289
    REQUIRE(inst_seq.size() == 1);
336✔
290
    auto [_, single, _2] = inst_seq.back();
336✔
291
    (void)_;  // unused
168✔
292
    (void)_2; // unused
168✔
293
    REQUIRE(single == ins);
336✔
294
}
336✔
295

296
static void check_marshal_unmarshal_fail(const Instruction& ins, const std::string& expected_error_message,
56✔
297
                                         const ebpf_platform_t& platform = g_ebpf_platform_linux) {
298
    const ProgramInfo info{.platform = &platform, .type = platform.get_program_type("unspec", "unspec")};
140✔
299
    auto result = unmarshal(RawProgram{"", "", 0, "", marshal(ins, 0), info});
168✔
300
    auto* error_message = std::get_if<std::string>(&result);
56✔
301
    REQUIRE(error_message != nullptr);
56✔
302
    REQUIRE(*error_message == expected_error_message);
56✔
303
}
56✔
304

305
static void check_unmarshal_fail(EbpfInst inst, const std::string& expected_error_message,
1,470✔
306
                                 const ebpf_platform_t& platform = g_ebpf_platform_linux) {
307
    ProgramInfo info{.platform = &platform, .type = platform.get_program_type("unspec", "unspec")};
3,675✔
308
    std::vector insns = {inst};
2,205✔
309
    auto result = unmarshal(RawProgram{"", "", 0, "", insns, info});
4,410✔
310
    auto* error_message = std::get_if<std::string>(&result);
1,470✔
311
    REQUIRE(error_message != nullptr);
1,470✔
312
    REQUIRE(*error_message == expected_error_message);
1,470✔
313
}
1,470✔
314

315
static void check_unmarshal_fail_goto(EbpfInst inst, const std::string& expected_error_message,
226✔
316
                                      const ebpf_platform_t& platform = g_ebpf_platform_linux) {
317
    ProgramInfo info{.platform = &platform, .type = platform.get_program_type("unspec", "unspec")};
565✔
318
    constexpr EbpfInst exit{.opcode = INST_OP_EXIT};
226✔
319
    std::vector insns{inst, exit, exit};
339✔
320
    auto result = unmarshal(RawProgram{"", "", 0, "", insns, info});
678✔
321
    auto* error_message = std::get_if<std::string>(&result);
226✔
322
    REQUIRE(error_message != nullptr);
226✔
323
    REQUIRE(*error_message == expected_error_message);
226✔
324
}
226✔
325

326
// Check that unmarshaling a 64-bit immediate instruction fails.
327
static void check_unmarshal_fail(EbpfInst inst1, EbpfInst inst2, const std::string& expected_error_message,
32✔
328
                                 const ebpf_platform_t& platform = g_ebpf_platform_linux) {
329
    ProgramInfo info{.platform = &platform, .type = platform.get_program_type("unspec", "unspec")};
80✔
330
    std::vector insns{inst1, inst2};
48✔
331
    auto result = unmarshal(RawProgram{"", "", 0, "", insns, info});
96✔
332
    auto* error_message = std::get_if<std::string>(&result);
32✔
333
    REQUIRE(error_message != nullptr);
32✔
334
    REQUIRE(*error_message == expected_error_message);
32✔
335
}
32✔
336

337
static constexpr auto ws = {1, 2, 4, 8};
338

339
TEST_CASE("disasm_marshal", "[disasm][marshal]") {
28✔
340
    SECTION("Bin") {
28✔
341
        SECTION("Reg src") {
6✔
342
            auto ops = {Bin::Op::MOV,  Bin::Op::ADD,  Bin::Op::SUB,    Bin::Op::MUL,     Bin::Op::UDIV,   Bin::Op::UMOD,
2✔
343
                        Bin::Op::OR,   Bin::Op::AND,  Bin::Op::LSH,    Bin::Op::RSH,     Bin::Op::ARSH,   Bin::Op::XOR,
344
                        Bin::Op::SDIV, Bin::Op::SMOD, Bin::Op::MOVSX8, Bin::Op::MOVSX16, Bin::Op::MOVSX32};
2✔
345
            for (const auto op : ops) {
36✔
346
                compare_marshal_unmarshal(Bin{.op = op, .dst = Reg{1}, .v = Reg{2}, .is64 = true});
34✔
347
                compare_marshal_unmarshal(Bin{.op = op, .dst = Reg{1}, .v = Reg{2}, .is64 = false});
51✔
348
            }
349
        }
6✔
350
        SECTION("Imm src") {
6✔
351
            // MOVSX* instructions are not defined for Imm, only Reg.
352
            auto ops = {Bin::Op::MOV,  Bin::Op::ADD, Bin::Op::SUB,  Bin::Op::MUL, Bin::Op::UDIV,
4✔
353
                        Bin::Op::UMOD, Bin::Op::OR,  Bin::Op::AND,  Bin::Op::LSH, Bin::Op::RSH,
354
                        Bin::Op::ARSH, Bin::Op::XOR, Bin::Op::SDIV, Bin::Op::SMOD};
4✔
355
            for (const auto op : ops) {
60✔
356
                compare_marshal_unmarshal(Bin{.op = op, .dst = Reg{1}, .v = Imm{2}, .is64 = false});
56✔
357
                compare_marshal_unmarshal(Bin{.op = op, .dst = Reg{1}, .v = Imm{2}, .is64 = true});
84✔
358
            }
359
            SECTION("LDDW") {
4✔
360
                compare_marshal_unmarshal(
1✔
361
                    Bin{.op = Bin::Op::MOV, .dst = Reg{1}, .v = Imm{2}, .is64 = true, .lddw = true}, true);
4✔
362
            }
4✔
363
            SECTION("r10") {
4✔
364
                check_marshal_unmarshal_fail(Bin{.op = Bin::Op::ADD, .dst = Reg{10}, .v = Imm{4}, .is64 = true},
4✔
365
                                             "0: invalid target r10\n");
366
            }
4✔
367
        }
6✔
368
    }
28✔
369
    SECTION("Neg") {
28✔
370
        compare_marshal_unmarshal(Un{.op = Un::Op::NEG, .dst = Reg{1}, .is64 = false});
2✔
371
        compare_marshal_unmarshal(Un{.op = Un::Op::NEG, .dst = Reg{1}, .is64 = true});
3✔
372
    }
28✔
373
    SECTION("Endian") {
28✔
374
        // FIX: `.is64` comes from the instruction class (BPF_ALU or BPF_ALU64) but is unused since it can be derived
375
        // from `.op`.
376
        {
1✔
377
            auto ops = {
2✔
378
                Un::Op::BE16, Un::Op::BE32, Un::Op::BE64, Un::Op::LE16, Un::Op::LE32, Un::Op::LE64,
379
            };
2✔
380
            for (const auto op : ops) {
14✔
381
                compare_marshal_unmarshal(Un{.op = op, .dst = Reg{1}, .is64 = false});
18✔
382
            }
383
        }
384
        {
1✔
385
            auto ops = {
2✔
386
                Un::Op::SWAP16,
387
                Un::Op::SWAP32,
388
                Un::Op::SWAP64,
389
            };
2✔
390
            for (const auto op : ops) {
8✔
391
                compare_marshal_unmarshal(Un{.op = op, .dst = Reg{1}, .is64 = true});
9✔
392
            }
393
        }
394
    }
28✔
395

396
    SECTION("LoadMapFd") { compare_marshal_unmarshal(LoadMapFd{.dst = Reg{1}, .mapfd = 1}, true); }
29✔
397
    SECTION("LoadMapAddress") {
28✔
398
        compare_marshal_unmarshal(LoadMapAddress{.dst = Reg{1}, .mapfd = 1, .offset = 4}, true);
3✔
399
    }
28✔
400

401
    SECTION("Jmp") {
28✔
402
        auto ops = {Condition::Op::EQ, Condition::Op::GT, Condition::Op::GE, Condition::Op::SET,
6✔
403
                    // Condition::Op::NSET, does not exist in ebpf
404
                    Condition::Op::NE, Condition::Op::SGT, Condition::Op::SGE, Condition::Op::LT, Condition::Op::LE,
405
                    Condition::Op::SLT, Condition::Op::SLE};
6✔
406
        SECTION("goto offset") {
6✔
407
            EbpfInst jmp_offset{.opcode = INST_OP_JA16, .offset = 1};
2✔
408
            compare_unmarshal_marshal(jmp_offset, jmp_offset);
2✔
409

410
            // JA32 +1 is equivalent to JA16 +1 since the offset fits in 16 bits.
411
            compare_unmarshal_marshal(EbpfInst{.opcode = INST_OP_JA32, .imm = 1}, jmp_offset);
2✔
412
        }
6✔
413
        SECTION("Reg right") {
6✔
414
            for (const auto op : ops) {
24✔
415
                Condition cond{.op = op, .left = Reg{1}, .right = Reg{2}, .is64 = true};
22✔
416
                compare_marshal_unmarshal(Jmp{.cond = cond, .target = Label(0)});
44✔
417

418
                // The following should fail unmarshalling since it jumps past the end of the instruction set.
419
                check_marshal_unmarshal_fail(Jmp{.cond = cond, .target = Label(1)}, "0: jump out of bounds\n");
55✔
420
            }
421
        }
6✔
422
        SECTION("Imm right") {
6✔
423
            for (const auto op : ops) {
24✔
424
                Condition cond{.op = op, .left = Reg{1}, .right = Imm{2}, .is64 = true};
22✔
425
                compare_marshal_unmarshal(Jmp{.cond = cond, .target = Label(0)});
44✔
426

427
                // The following should fail unmarshalling since it jumps past the end of the instruction set.
428
                check_marshal_unmarshal_fail(Jmp{.cond = cond, .target = Label(1)}, "0: jump out of bounds\n");
55✔
429
            }
430
        }
6✔
431
    }
28✔
432

433
    SECTION("Call") {
28✔
434
        for (int func : {1, 17}) {
6✔
435
            compare_marshal_unmarshal(Call{func});
6✔
436
        }
437

438
        // Test callx without support.
439
        std::ostringstream oss;
2✔
440
        oss << "0: bad instruction op 0x" << std::hex << INST_OP_CALLX << std::endl;
2✔
441
        check_unmarshal_fail(EbpfInst{.opcode = INST_OP_CALLX}, oss.str());
2✔
442

443
        // Test callx with support.  Note that callx puts the register number in 'dst' not 'src'.
444
        ebpf_platform_t platform = g_ebpf_platform_linux;
2✔
445
        platform.supported_conformance_groups |= bpf_conformance_groups_t::callx;
2✔
446
        compare_marshal_unmarshal(Callx{8}, false, platform);
2✔
447
        EbpfInst callx{.opcode = INST_OP_CALLX, .dst = 8};
2✔
448
        compare_unmarshal_marshal(callx, callx, platform);
2✔
449
        check_unmarshal_fail({.opcode = INST_OP_CALLX, .dst = 11}, "0: bad register\n", platform);
3✔
450
        check_unmarshal_fail({.opcode = INST_OP_CALLX, .dst = 8, .imm = 8}, "0: nonzero imm for op 0x8d\n", platform);
2✔
451

452
        // clang prior to v19 put the register into 'imm' instead of 'dst' so we treat it as equivalent.
453
        compare_unmarshal_marshal(EbpfInst{.opcode = /* 0x8d */ INST_OP_CALLX, .imm = 8}, callx, platform);
2✔
454
        check_unmarshal_fail({.opcode = INST_OP_CALLX, .imm = 11}, "0: bad register\n", platform);
3✔
455
        check_unmarshal_fail({.opcode = INST_OP_CALLX, .imm = -1}, "0: bad register\n", platform);
2✔
456
    }
30✔
457

458
    SECTION("Exit") { compare_marshal_unmarshal(Exit{}); }
29✔
459

460
    SECTION("Packet") {
28✔
461
        for (int w : ws) {
10✔
462
            if (w != 8) {
8✔
463
                compare_marshal_unmarshal(Packet{.width = w, .offset = 7, .regoffset = {}});
6✔
464
                compare_marshal_unmarshal(Packet{.width = w, .offset = 7, .regoffset = Reg{2}});
9✔
465
            }
466
        }
467
    }
28✔
468

469
    SECTION("Atomic") {
28✔
470
        for (int w : ws) {
10✔
471
            if (w == 4 || w == 8) {
8✔
472
                Deref access{.width = w, .basereg = Reg{2}, .offset = 17};
4✔
473
                compare_marshal_unmarshal(
2✔
474
                    Atomic{.op = Atomic::Op::ADD, .fetch = false, .access = access, .valreg = Reg{1}});
4✔
475
                compare_marshal_unmarshal(
2✔
476
                    Atomic{.op = Atomic::Op::ADD, .fetch = true, .access = access, .valreg = Reg{1}});
4✔
477
                compare_marshal_unmarshal(
2✔
478
                    Atomic{.op = Atomic::Op::OR, .fetch = false, .access = access, .valreg = Reg{1}});
4✔
479
                compare_marshal_unmarshal(
2✔
480
                    Atomic{.op = Atomic::Op::OR, .fetch = true, .access = access, .valreg = Reg{1}});
4✔
481
                compare_marshal_unmarshal(
2✔
482
                    Atomic{.op = Atomic::Op::AND, .fetch = false, .access = access, .valreg = Reg{1}});
4✔
483
                compare_marshal_unmarshal(
2✔
484
                    Atomic{.op = Atomic::Op::AND, .fetch = true, .access = access, .valreg = Reg{1}});
4✔
485
                compare_marshal_unmarshal(
2✔
486
                    Atomic{.op = Atomic::Op::XOR, .fetch = false, .access = access, .valreg = Reg{1}});
4✔
487
                compare_marshal_unmarshal(
2✔
488
                    Atomic{.op = Atomic::Op::XOR, .fetch = true, .access = access, .valreg = Reg{1}});
6✔
489
                check_marshal_unmarshal_fail(
8✔
490
                    Atomic{.op = Atomic::Op::XCHG, .fetch = false, .access = access, .valreg = Reg{1}},
6✔
491
                    "0: unsupported immediate\n");
492
                compare_marshal_unmarshal(
2✔
493
                    Atomic{.op = Atomic::Op::XCHG, .fetch = true, .access = access, .valreg = Reg{1}});
6✔
494
                check_marshal_unmarshal_fail(
8✔
495
                    Atomic{.op = Atomic::Op::CMPXCHG, .fetch = false, .access = access, .valreg = Reg{1}},
6✔
496
                    "0: unsupported immediate\n");
497
                compare_marshal_unmarshal(
4✔
498
                    Atomic{.op = Atomic::Op::CMPXCHG, .fetch = true, .access = access, .valreg = Reg{1}});
6✔
499
            }
500
        }
501
    }
28✔
502
}
28✔
503

504
TEST_CASE("marshal", "[disasm][marshal]") {
8✔
505
    SECTION("Load") {
8✔
506
        Deref access{.width = 1, .basereg = Reg{4}, .offset = 6};
2✔
507
        Mem m{.access = access, .value = Reg{3}, .is_load = true};
2✔
508
        auto ins = marshal(m, 0).at(0);
3✔
509
        EbpfInst expect{
2✔
510
            .opcode = gsl::narrow<uint8_t>(INST_CLS_LD | INST_MODE_MEM | width_to_opcode(1) | 0x1),
2✔
511
            .dst = 3,
512
            .src = 4,
513
            .offset = 6,
514
            .imm = 0,
515
        };
2✔
516
        REQUIRE(ins.dst == expect.dst);
2✔
517
        REQUIRE(ins.src == expect.src);
2✔
518
        REQUIRE(ins.offset == expect.offset);
2✔
519
        REQUIRE(ins.imm == expect.imm);
2✔
520
        REQUIRE(ins.opcode == expect.opcode);
2✔
521
    }
8✔
522
    SECTION("Load Imm") {
8✔
523
        Deref access{.width = 1, .basereg = Reg{4}, .offset = 6};
2✔
524
        REQUIRE_THROWS(marshal(Mem{.access = access, .value = Imm{3}, .is_load = true}, 0));
4✔
525
    }
8✔
526
    SECTION("Store") {
8✔
527
        Deref access{.width = 1, .basereg = Reg{4}, .offset = 6};
2✔
528
        auto ins = marshal(Mem{.access = access, .value = Reg{3}, .is_load = false}, 0).at(0);
3✔
529
        REQUIRE(ins.src == 3);
2✔
530
        REQUIRE(ins.dst == 4);
2✔
531
        REQUIRE(ins.offset == 6);
2✔
532
        REQUIRE(ins.imm == 0);
2✔
533
        REQUIRE(ins.opcode == (uint8_t)(INST_CLS_ST | INST_MODE_MEM | width_to_opcode(1) | 0x1));
2✔
534
    }
8✔
535
    SECTION("StoreImm") {
8✔
536
        Deref access{.width = 1, .basereg = Reg{4}, .offset = 6};
2✔
537
        auto ins = marshal(Mem{.access = access, .value = Imm{3}, .is_load = false}, 0).at(0);
3✔
538
        REQUIRE(ins.src == 0);
2✔
539
        REQUIRE(ins.dst == 4);
2✔
540
        REQUIRE(ins.offset == 6);
2✔
541
        REQUIRE(ins.imm == 3);
2✔
542
        REQUIRE(ins.opcode == (uint8_t)(INST_CLS_ST | INST_MODE_MEM | width_to_opcode(1) | 0x0));
2✔
543
    }
8✔
544
}
8✔
545

546
TEST_CASE("disasm_marshal_Mem", "[disasm][marshal]") {
8✔
547
    SECTION("Load") {
8✔
548
        for (const int w : ws) {
10✔
549
            Deref access;
8✔
550
            access.basereg = Reg{4};
8✔
551
            access.offset = 6;
8✔
552
            access.width = w;
8✔
553
            compare_marshal_unmarshal(Mem{.access = access, .value = Reg{3}, .is_load = true});
12✔
554
        }
555
    }
8✔
556
    SECTION("Load R10") {
8✔
557
        Deref access;
2✔
558
        access.basereg = Reg{0};
2✔
559
        access.offset = 0;
2✔
560
        access.width = 8;
2✔
561
        check_marshal_unmarshal_fail(Mem{.access = access, .value = Reg{10}, .is_load = true},
4✔
562
                                     "0: cannot modify r10\n");
563
    }
8✔
564
    SECTION("Store Register") {
8✔
565
        for (const int w : ws) {
10✔
566
            Deref access;
8✔
567
            access.basereg = Reg{9};
8✔
568
            access.offset = 8;
8✔
569
            access.width = w;
8✔
570
            compare_marshal_unmarshal(Mem{.access = access, .value = Reg{4}, .is_load = false});
12✔
571
        }
572
    }
8✔
573
    SECTION("Store Immediate") {
8✔
574
        for (const int w : ws) {
10✔
575
            Deref access;
8✔
576
            access.basereg = Reg{10};
8✔
577
            access.offset = 2;
8✔
578
            access.width = w;
8✔
579
            compare_marshal_unmarshal(Mem{.access = access, .value = Imm{5}, .is_load = false});
12✔
580
        }
581
    }
8✔
582
}
8✔
583

584
TEST_CASE("unmarshal extension opcodes", "[disasm][marshal]") {
2✔
585
    // Merge (rX <<= 32; rX >>>= 32) into wX = rX.
586
    compare_unmarshal_marshal(EbpfInst{.opcode = INST_ALU_OP_LSH | INST_SRC_IMM | INST_CLS_ALU64, .dst = 1, .imm = 32},
2✔
587
                              EbpfInst{.opcode = INST_ALU_OP_RSH | INST_SRC_IMM | INST_CLS_ALU64, .dst = 1, .imm = 32},
2✔
588
                              EbpfInst{.opcode = INST_ALU_OP_MOV | INST_SRC_REG | INST_CLS_ALU, .dst = 1, .src = 1});
2✔
589

590
    // Merge (rX <<= 32; rX >>= 32)  into rX s32= rX.
591
    compare_unmarshal_marshal(
2✔
592
        EbpfInst{.opcode = INST_ALU_OP_LSH | INST_SRC_IMM | INST_CLS_ALU64, .dst = 1, .imm = 32},
2✔
593
        EbpfInst{.opcode = INST_ALU_OP_ARSH | INST_SRC_IMM | INST_CLS_ALU64, .dst = 1, .imm = 32},
2✔
594
        EbpfInst{.opcode = INST_ALU_OP_MOV | INST_SRC_REG | INST_CLS_ALU64, .dst = 1, .src = 1, .offset = 32});
2✔
595
}
2✔
596

597
// Check that unmarshaling an invalid instruction fails with a given message.
598
static void check_unmarshal_instruction_fail(EbpfInst& inst, const std::string& message,
1,080✔
599
                                             const ebpf_platform_t& platform = g_ebpf_platform_linux) {
600
    if (inst.offset == JMP_OFFSET) {
1,080✔
601
        inst.offset = 1;
226✔
602
        check_unmarshal_fail_goto(inst, message);
226✔
603
    } else if (inst.opcode == INST_OP_LDDW_IMM) {
854✔
604
        check_unmarshal_fail(inst, EbpfInst{}, message, platform);
14✔
605
    } else {
606
        check_unmarshal_fail(inst, message, platform);
840✔
607
    }
608
}
1,080✔
609

610
static ebpf_platform_t get_template_platform(const EbpfInstructionTemplate& previous_template) {
1,304✔
611
    ebpf_platform_t platform = g_ebpf_platform_linux;
1,304✔
612
    platform.supported_conformance_groups |= previous_template.groups;
1,304✔
613
    return platform;
1,304✔
614
}
615

616
// Check whether an instruction matches an instruction template that may have wildcards.
617
static bool matches_template_inst(const EbpfInst inst, const EbpfInst template_inst) {
540✔
618
    if (inst.opcode != template_inst.opcode) {
540✔
619
        return false;
201✔
620
    }
621
    if (inst.dst != template_inst.dst && template_inst.dst != DST) {
138✔
622
        return false;
1✔
623
    }
624
    if (inst.src != template_inst.src && template_inst.src != SRC) {
136✔
625
        return false;
13✔
626
    }
627
    if (inst.offset != template_inst.offset && template_inst.offset != MEM_OFFSET &&
110✔
628
        template_inst.offset != JMP_OFFSET) {
20✔
629
        return false;
20✔
630
    }
631
    if (inst.imm != template_inst.imm && template_inst.imm != IMM && template_inst.imm != JMP_OFFSET) {
70✔
632
        return false;
32✔
633
    }
634
    return true;
19✔
635
}
636

637
// Check that various 'dst' variations between two valid instruction templates fail.
638
static void check_instruction_dst_variations(const EbpfInstructionTemplate& previous_template,
326✔
639
                                             const std::optional<const EbpfInstructionTemplate> next_template) {
640
    EbpfInst inst = previous_template.inst;
326✔
641
    const ebpf_platform_t platform = get_template_platform(previous_template);
326✔
642
    if (inst.dst == DST) {
326✔
643
        inst.dst = INVALID_REGISTER;
304✔
644
        check_unmarshal_instruction_fail(inst, "0: bad register\n", platform);
608✔
645
    } else {
646
        // This instruction doesn't put a register number in the 'dst' field.
647
        // Just try the next value unless that's what the next template has.
648
        inst.dst++;
22✔
649
        if (!next_template || !matches_template_inst(inst, next_template->inst)) {
22✔
650
            std::ostringstream oss;
22✔
651
            if (inst.dst == 1) {
22✔
652
                oss << "0: nonzero dst for register op 0x" << std::hex << static_cast<int>(inst.opcode) << std::endl;
22✔
653
            } else {
UNCOV
654
                oss << "0: bad instruction op 0x" << std::hex << static_cast<int>(inst.opcode) << std::endl;
×
655
            }
656
            check_unmarshal_instruction_fail(inst, oss.str(), platform);
22✔
657
        }
22✔
658
    }
659
}
326✔
660

661
// Check that various 'src' variations between two valid instruction templates fail.
662
static void check_instruction_src_variations(const EbpfInstructionTemplate& previous_template,
326✔
663
                                             const std::optional<const EbpfInstructionTemplate> next_template) {
664
    EbpfInst inst = previous_template.inst;
326✔
665
    const ebpf_platform_t platform = get_template_platform(previous_template);
326✔
666
    if (inst.src == SRC) {
326✔
667
        inst.src = INVALID_REGISTER;
172✔
668
        check_unmarshal_instruction_fail(inst, "0: bad register\n", platform);
344✔
669
    } else {
670
        // This instruction doesn't put a register number in the 'src' field.
671
        // Just try the next value unless that's what the next template has.
672
        inst.src++;
154✔
673
        if (!next_template || !matches_template_inst(inst, next_template->inst)) {
154✔
674
            std::ostringstream oss;
148✔
675
            oss << "0: bad instruction op 0x" << std::hex << static_cast<int>(inst.opcode) << std::endl;
148✔
676
            check_unmarshal_instruction_fail(inst, oss.str(), platform);
148✔
677
        }
148✔
678
    }
679
}
326✔
680

681
// Check that various 'offset' variations between two valid instruction templates fail.
682
static void check_instruction_offset_variations(const EbpfInstructionTemplate& previous_template,
326✔
683
                                                const std::optional<const EbpfInstructionTemplate> next_template) {
684
    EbpfInst inst = previous_template.inst;
326✔
685
    const ebpf_platform_t platform = get_template_platform(previous_template);
326✔
686
    if (inst.offset == JMP_OFFSET) {
326✔
687
        inst.offset = 0; // Not a valid jump offset.
90✔
688
        check_unmarshal_instruction_fail(inst, "0: jump out of bounds\n", platform);
180✔
689
    } else if (inst.offset != MEM_OFFSET) {
236✔
690
        // This instruction limits what can appear in the 'offset' field.
691
        // Just try the next value unless that's what the next template has.
692
        inst.offset++;
172✔
693
        if (!next_template || !matches_template_inst(inst, next_template->inst)) {
172✔
694
            std::ostringstream oss;
156✔
695
            if (inst.offset == 1 &&
221✔
696
                (!next_template || next_template->inst.opcode != inst.opcode || next_template->inst.offset == 0)) {
130✔
697
                oss << "0: nonzero offset for op 0x" << std::hex << static_cast<int>(inst.opcode) << std::endl;
126✔
698
            } else {
699
                oss << "0: invalid offset for op 0x" << std::hex << static_cast<int>(inst.opcode) << std::endl;
30✔
700
            }
701
            check_unmarshal_instruction_fail(inst, oss.str(), platform);
156✔
702
        }
156✔
703
    }
704
}
326✔
705

706
// Check that various 'imm' variations between two valid instruction templates fail.
707
static void check_instruction_imm_variations(const EbpfInstructionTemplate& previous_template,
326✔
708
                                             const std::optional<const EbpfInstructionTemplate> next_template) {
709
    EbpfInst inst = previous_template.inst;
326✔
710
    const ebpf_platform_t platform = get_template_platform(previous_template);
326✔
711
    if (inst.imm == JMP_OFFSET) {
326✔
712
        inst.imm = 0; // Not a valid jump offset.
4✔
713
        check_unmarshal_instruction_fail(inst, "0: jump out of bounds\n", platform);
8✔
714
    } else if (inst.imm != IMM && inst.imm != HELPER_ID) {
322✔
715
        // This instruction limits what can appear in the 'imm' field.
716
        // Just try the next value unless that's what the next template has.
717
        inst.imm++;
194✔
718
        if (!next_template || !matches_template_inst(inst, next_template->inst)) {
194✔
719
            std::ostringstream oss;
178✔
720
            if (inst.imm == 1) {
178✔
721
                oss << "0: nonzero imm for op 0x" << std::hex << static_cast<int>(inst.opcode) << std::endl;
136✔
722
            } else {
723
                oss << "0: unsupported immediate" << std::endl;
42✔
724
            }
725
            check_unmarshal_instruction_fail(inst, oss.str(), platform);
178✔
726
        }
178✔
727
    }
728

729
    // Some instructions only permit non-zero imm values.
730
    // If the next template is for one of those, check the zero value now.
731
    if (next_template && (previous_template.inst.opcode != next_template->inst.opcode) &&
488✔
732
        (next_template->inst.imm > 0) && (next_template->inst.imm != HELPER_ID) &&
614✔
733
        (next_template->inst.imm != JMP_OFFSET)) {
8✔
734
        inst = next_template->inst;
6✔
735
        inst.imm = 0;
6✔
736
        check_unmarshal_instruction_fail(inst, "0: unsupported immediate\n");
12✔
737
    }
738
}
326✔
739

740
// Check that various variations between two valid instruction templates fail.
741
static void check_instruction_variations(const std::optional<const EbpfInstructionTemplate> previous_template,
328✔
742
                                         const std::optional<const EbpfInstructionTemplate> next_template) {
743
    if (previous_template) {
328✔
744
        check_instruction_dst_variations(*previous_template, next_template);
326✔
745
        check_instruction_src_variations(*previous_template, next_template);
326✔
746
        check_instruction_offset_variations(*previous_template, next_template);
326✔
747
        check_instruction_imm_variations(*previous_template, next_template);
326✔
748
    }
749

750
    // Check any invalid opcodes in between the previous and next templates.
751
    const int previous_opcode = previous_template ? previous_template->inst.opcode : -1;
328✔
752
    const int next_opcode = next_template ? next_template->inst.opcode : 0x100;
328✔
753
    for (int opcode = previous_opcode + 1; opcode < next_opcode; opcode++) {
594✔
754
        const EbpfInst inst{.opcode = static_cast<uint8_t>(opcode)};
266✔
755
        std::ostringstream oss;
266✔
756
        oss << "0: bad instruction op 0x" << std::hex << opcode << std::endl;
266✔
757
        check_unmarshal_fail(inst, oss.str());
266✔
758
    }
266✔
759
}
328✔
760

761
TEST_CASE("fail unmarshal bad instructions", "[disasm][marshal]") {
2✔
762
    constexpr size_t template_count = std::size(instruction_template);
2✔
763

764
    // Check any variations before the first template.
765
    check_instruction_variations({}, instruction_template[0]);
2✔
766

767
    for (size_t index = 1; index < template_count; index++) {
326✔
768
        check_instruction_variations(instruction_template[index - 1], instruction_template[index]);
324✔
769
    }
770

771
    // Check any remaining variations after the last template.
772
    check_instruction_variations(instruction_template[template_count - 1], {});
2✔
773
}
2✔
774

775
TEST_CASE("check unmarshal conformance groups", "[disasm][marshal]") {
2✔
776
    for (const auto& current : instruction_template) {
328✔
777
        // Try unmarshaling without support.
778
        ebpf_platform_t platform = g_ebpf_platform_linux;
326✔
779
        platform.supported_conformance_groups &= ~current.groups;
326✔
780
        std::ostringstream oss;
326✔
781
        oss << "0: bad instruction op 0x" << std::hex << static_cast<int>(current.inst.opcode) << std::endl;
326✔
782
        check_unmarshal_fail(current.inst, oss.str(), platform);
326✔
783

784
        // Try unmarshaling with support.
785
        platform.supported_conformance_groups |= current.groups;
326✔
786
        EbpfInst inst = current.inst;
326✔
787
        if (inst.offset == JMP_OFFSET) {
326✔
788
            inst.offset = 1;
90✔
789
        }
790
        if (inst.imm == JMP_OFFSET) {
326✔
791
            inst.imm = 1;
4✔
792
        }
793
        if (inst.opcode == INST_OP_LDDW_IMM) {
326✔
794
            check_unmarshal_succeed(inst, EbpfInst{}, platform);
6✔
795
        } else {
796
            check_unmarshal_succeed(inst, platform);
320✔
797
        }
798
    }
326✔
799
}
2✔
800

801
TEST_CASE("check unmarshal legacy opcodes", "[disasm][marshal]") {
2✔
802
    // The following opcodes are deprecated and should no longer be used.
803
    static uint8_t supported_legacy_opcodes[] = {0x20, 0x28, 0x30, 0x40, 0x48, 0x50};
1✔
804
    for (const uint8_t opcode : supported_legacy_opcodes) {
14✔
805
        compare_unmarshal_marshal(EbpfInst{.opcode = opcode}, EbpfInst{.opcode = opcode});
12✔
806
    }
807

808
    // Disable legacy packet instruction support.
809
    ebpf_platform_t platform = g_ebpf_platform_linux;
2✔
810
    platform.supported_conformance_groups &= ~bpf_conformance_groups_t::packet;
2✔
811
    for (const uint8_t opcode : supported_legacy_opcodes) {
14✔
812
        std::ostringstream oss;
12✔
813
        oss << "0: bad instruction op 0x" << std::hex << static_cast<int>(opcode) << std::endl;
12✔
814
        check_unmarshal_fail(EbpfInst{.opcode = opcode}, oss.str(), platform);
12✔
815
    }
12✔
816
}
2✔
817

818
TEST_CASE("unmarshal 64bit immediate", "[disasm][marshal]") {
2✔
819
    compare_unmarshal_marshal(EbpfInst{.opcode = /* 0x18 */ INST_OP_LDDW_IMM, .src = 0, .imm = 1}, EbpfInst{.imm = 2},
2✔
820
                              EbpfInst{.opcode = /* 0x18 */ INST_OP_LDDW_IMM, .src = 0, .imm = 1}, EbpfInst{.imm = 2});
2✔
821
    compare_unmarshal_marshal(EbpfInst{.opcode = /* 0x18 */ INST_OP_LDDW_IMM, .src = 0, .imm = 1}, EbpfInst{},
2✔
822
                              EbpfInst{.opcode = /* 0x18 */ INST_OP_LDDW_IMM, .src = 0, .imm = 1}, EbpfInst{});
2✔
823

824
    for (uint8_t src = 0; src <= 7; src++) {
18✔
825
        check_unmarshal_fail(EbpfInst{.opcode = /* 0x18 */ INST_OP_LDDW_IMM, .src = src}, "0: incomplete lddw\n");
24✔
826
        check_unmarshal_fail(EbpfInst{.opcode = /* 0x18 */ INST_OP_LDDW_IMM, .src = src},
32✔
827
                             EbpfInst{.opcode = /* 0x18 */ INST_OP_LDDW_IMM}, "0: invalid lddw\n");
828
    }
829

830
    // When src = {1, 3, 4, 5}, next_imm must be 0.
831
    // TODO(issue #533): add support for LDDW with src_reg > 1.
832
    check_unmarshal_fail(EbpfInst{.opcode = /* 0x18 */ INST_OP_LDDW_IMM, .src = 1}, EbpfInst{.imm = 1},
2✔
833
                         "0: lddw uses reserved fields\n");
834
}
2✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc