• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

qmonnet / rbpf / 14420063260

12 Apr 2025 01:27PM UTC coverage: 95.277% (+0.02%) from 95.255%
14420063260

Pull #122

github

web-flow
Merge 0c892bdeb into 9642917c9
Pull Request #122: feat: Add bpf to bpf call support

192 of 204 new or added lines in 7 files covered. (94.12%)

3 existing lines in 3 files now uncovered.

4317 of 4531 relevant lines covered (95.28%)

256.63 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.63
/src/interpreter.rs
1
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
2
// Derived from uBPF <https://github.com/iovisor/ubpf>
3
// Copyright 2015 Big Switch Networks, Inc
4
//      (uBPF: VM architecture, parts of the interpreter, originally in C)
5
// Copyright 2016 6WIND S.A. <quentin.monnet@6wind.com>
6
//      (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for helpers)
7

8
use crate::ebpf;
9
use crate::ebpf::MAX_CALL_DEPTH;
10
use crate::lib::*;
11
use crate::stack::{StackFrame, StackUsage};
12

13
#[allow(clippy::too_many_arguments)]
14
fn check_mem(
181✔
15
    addr: u64,
181✔
16
    len: usize,
181✔
17
    access_type: &str,
181✔
18
    insn_ptr: usize,
181✔
19
    mbuff: &[u8],
181✔
20
    mem: &[u8],
181✔
21
    stack: &[u8],
181✔
22
    allowed_memory: &HashSet<u64>
181✔
23
) -> Result<(), Error> {
181✔
24
    if let Some(addr_end) = addr.checked_add(len as u64) {
181✔
25
      if mbuff.as_ptr() as u64 <= addr && addr_end <= mbuff.as_ptr() as u64 + mbuff.len() as u64 {
180✔
26
          return Ok(());
4✔
27
      }
176✔
28
      if mem.as_ptr() as u64 <= addr && addr_end <= mem.as_ptr() as u64 + mem.len() as u64 {
176✔
29
          return Ok(());
155✔
30
      }
21✔
31
      if stack.as_ptr() as u64 <= addr && addr_end <= stack.as_ptr() as u64 + stack.len() as u64 {
21✔
32
          return Ok(());
15✔
33
      }
6✔
34
      if allowed_memory.contains(&addr) {
6✔
35
          return Ok(());
×
36
      }
6✔
37
    }
1✔
38

39
    Err(Error::new(ErrorKind::Other, format!(
7✔
40
        "Error: out of bounds memory {} (insn #{:?}), addr {:#x}, size {:?}\nmbuff: {:#x}/{:#x}, mem: {:#x}/{:#x}, stack: {:#x}/{:#x}",
7✔
41
        access_type, insn_ptr, addr, len,
7✔
42
        mbuff.as_ptr() as u64, mbuff.len(),
7✔
43
        mem.as_ptr() as u64, mem.len(),
7✔
44
        stack.as_ptr() as u64, stack.len()
7✔
45
    )))
7✔
46
}
181✔
47

48
pub fn execute_program(
164✔
49
    prog_: Option<&[u8]>,
164✔
50
    stack_usage: Option<&StackUsage>,
164✔
51
    mem: &[u8],
164✔
52
    mbuff: &[u8],
164✔
53
    helpers: &HashMap<u32, ebpf::Helper>,
164✔
54
    allowed_memory: &HashSet<u64>,
164✔
55
) -> Result<u64, Error> {
164✔
56
    const U32MAX: u64 = u32::MAX as u64;
57
    const SHIFT_MASK_64: u64 = 0x3f;
58

59
    let (prog, stack_usage) = match prog_ {
164✔
60
        Some(prog) => (prog, stack_usage.unwrap()),
163✔
61
        None => Err(Error::new(ErrorKind::Other,
1✔
62
                    "Error: No program set, call prog_set() to load one"))?,
1✔
63
    };
64
    let stack = vec![0u8;ebpf::STACK_SIZE];
163✔
65
    let mut stacks = [StackFrame::new();MAX_CALL_DEPTH];
163✔
66
    let mut stack_frame_idx = 0;
163✔
67

163✔
68
    // R1 points to beginning of memory area, R10 to stack
163✔
69
    let mut reg: [u64;11] = [
163✔
70
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, stack.as_ptr() as u64 + stack.len() as u64
163✔
71
    ];
163✔
72
    if !mbuff.is_empty() {
163✔
73
        reg[1] = mbuff.as_ptr() as u64;
4✔
74
    }
4✔
75
    else if !mem.is_empty() {
159✔
76
        reg[1] = mem.as_ptr() as u64;
47✔
77
    }
112✔
78

79
    let check_mem_load = | addr: u64, len: usize, insn_ptr: usize | {
163✔
80
        check_mem(addr, len, "load", insn_ptr, mbuff, mem, &stack, allowed_memory)
140✔
81
    };
140✔
82
    let check_mem_store = | addr: u64, len: usize, insn_ptr: usize | {
163✔
83
        check_mem(addr, len, "store", insn_ptr, mbuff, mem, &stack, allowed_memory)
41✔
84
    };
41✔
85

86
    // Loop on instructions
87
    let mut insn_ptr:usize = 0;
163✔
88
    while insn_ptr * ebpf::INSN_SIZE < prog.len() {
1,965✔
89
        let insn = ebpf::get_insn(prog, insn_ptr);
1,965✔
90
        if stack_frame_idx < MAX_CALL_DEPTH{
1,965✔
91
            if let Some(usage) = stack_usage.stack_usage_for_local_func(insn_ptr) {
1,965✔
92
                stacks[stack_frame_idx].set_stack_usage(usage);
172✔
93
            }
1,793✔
NEW
94
        }
×
95
        insn_ptr += 1;
1,965✔
96
        let _dst = insn.dst as usize;
1,965✔
97
        let _src = insn.src as usize;
1,965✔
98

1,965✔
99
        let mut do_jump = || {
1,965✔
100
            insn_ptr = (insn_ptr as i16 + insn.off) as usize;
148✔
101
        };
148✔
102

103
        macro_rules! unsigned_u64 {
104
            ($imm:expr) => {
105
                ($imm as u32) as u64
106
            };
107
        }
108

109
        match insn.opc {
3✔
110

111
            // BPF_LD class
112
            // LD_ABS_* and LD_IND_* are supposed to load pointer to data from metadata buffer.
113
            // Since this pointer is constant, and since we already know it (mem), do not
114
            // bother re-fetching it, just use mem already.
115
            ebpf::LD_ABS_B   => reg[0] = unsafe {
1✔
116
                let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u8;
1✔
117
                check_mem_load(x as u64, 8, insn_ptr)?;
1✔
118
                x.read_unaligned() as u64
1✔
119
            },
120
            ebpf::LD_ABS_H   => reg[0] = unsafe {
1✔
121
                let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u16;
1✔
122
                check_mem_load(x as u64, 8, insn_ptr)?;
1✔
123
                x.read_unaligned() as u64
1✔
124
            },
125
            ebpf::LD_ABS_W   => reg[0] = unsafe {
1✔
126
                let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u32;
1✔
127
                check_mem_load(x as u64, 8, insn_ptr)?;
1✔
128
                x.read_unaligned() as u64
1✔
129
            },
130
            ebpf::LD_ABS_DW  => reg[0] = unsafe {
1✔
131
                let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u64;
5✔
132
                check_mem_load(x as u64, 8, insn_ptr)?;
5✔
133
                x.read_unaligned()
1✔
134
            },
135
            ebpf::LD_IND_B   => reg[0] = unsafe {
1✔
136
                let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u8;
1✔
137
                check_mem_load(x as u64, 8, insn_ptr)?;
1✔
138
                x.read_unaligned() as u64
1✔
139
            },
140
            ebpf::LD_IND_H   => reg[0] = unsafe {
1✔
141
                let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u16;
1✔
142
                check_mem_load(x as u64, 8, insn_ptr)?;
1✔
143
                x.read_unaligned() as u64
1✔
144
            },
145
            ebpf::LD_IND_W   => reg[0] = unsafe {
1✔
146
                let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u32;
1✔
147
                check_mem_load(x as u64, 8, insn_ptr)?;
1✔
148
                x.read_unaligned() as u64
1✔
149
            },
150
            ebpf::LD_IND_DW  => reg[0] = unsafe {
1✔
151
                let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u64;
1✔
152
                check_mem_load(x as u64, 8, insn_ptr)?;
1✔
153
                x.read_unaligned()
1✔
154
            },
155

156
            ebpf::LD_DW_IMM  => {
10✔
157
                let next_insn = ebpf::get_insn(prog, insn_ptr);
10✔
158
                insn_ptr += 1;
10✔
159
                reg[_dst] = ((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32);
10✔
160
            },
10✔
161

162
            // BPF_LDX class
163
            ebpf::LD_B_REG   => reg[_dst] = unsafe {
60✔
164
                let x = (reg[_src] as *const u8).wrapping_offset(insn.off as isize);
60✔
165
                check_mem_load(x as u64, 1, insn_ptr)?;
60✔
166
                x.read_unaligned() as u64
60✔
167
            },
168
            ebpf::LD_H_REG   => reg[_dst] = unsafe {
36✔
169
                let x = (reg[_src] as *const u8).wrapping_offset(insn.off as isize) as *const u16;
36✔
170
                check_mem_load(x as u64, 2, insn_ptr)?;
36✔
171
                x.read_unaligned() as u64
36✔
172
            },
173
            ebpf::LD_W_REG   => reg[_dst] = unsafe {
17✔
174
                let x = (reg[_src] as *const u8).wrapping_offset(insn.off as isize) as *const u32;
17✔
175
                check_mem_load(x as u64, 4, insn_ptr)?;
17✔
176
                x.read_unaligned() as u64
17✔
177
            },
178
            ebpf::LD_DW_REG  => reg[_dst] = unsafe {
15✔
179
                let x = (reg[_src] as *const u8).wrapping_offset(insn.off as isize) as *const u64;
15✔
180
                check_mem_load(x as u64, 8, insn_ptr)?;
15✔
181
                x.read_unaligned()
15✔
182
            },
183

184
            // BPF_ST class
185
            ebpf::ST_B_IMM   => unsafe {
186
                let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u8;
6✔
187
                check_mem_store(x as u64, 1, insn_ptr)?;
6✔
188
                x.write_unaligned(insn.imm as u8);
5✔
189
            },
190
            ebpf::ST_H_IMM   => unsafe {
191
                let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u16;
2✔
192
                check_mem_store(x as u64, 2, insn_ptr)?;
2✔
193
                x.write_unaligned(insn.imm as u16);
2✔
194
            },
195
            ebpf::ST_W_IMM   => unsafe {
196
                let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u32;
1✔
197
                check_mem_store(x as u64, 4, insn_ptr)?;
1✔
198
                x.write_unaligned(insn.imm as u32);
1✔
199
            },
200
            ebpf::ST_DW_IMM  => unsafe {
201
                let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u64;
5✔
202
                check_mem_store(x as u64, 8, insn_ptr)?;
5✔
203
                x.write_unaligned(insn.imm as u64);
3✔
204
            },
205

206
            // BPF_STX class
207
            ebpf::ST_B_REG   => unsafe {
208
                let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u8;
22✔
209
                check_mem_store(x as u64, 1, insn_ptr)?;
22✔
210
                x.write_unaligned(reg[_src] as u8);
22✔
211
            },
212
            ebpf::ST_H_REG   => unsafe {
213
                let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u16;
1✔
214
                check_mem_store(x as u64, 2, insn_ptr)?;
1✔
215
                x.write_unaligned(reg[_src] as u16);
1✔
216
            },
217
            ebpf::ST_W_REG   => unsafe {
218
                let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u32;
3✔
219
                check_mem_store(x as u64, 4, insn_ptr)?;
3✔
220
                x.write_unaligned(reg[_src] as u32);
3✔
221
            },
222
            ebpf::ST_DW_REG  => unsafe {
223
                let x = (reg[_dst] as *const u8).wrapping_offset(insn.off as isize) as *mut u64;
1✔
224
                check_mem_store(x as u64, 8, insn_ptr)?;
1✔
225
                x.write_unaligned(reg[_src]);
1✔
226
            },
227
            ebpf::ST_W_XADD  => unimplemented!(),
×
228
            ebpf::ST_DW_XADD => unimplemented!(),
×
229

230
            // BPF_ALU class
231
            // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
232
            // before we do the operation?
233
            // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
234
            ebpf::ADD32_IMM  => reg[_dst] = (reg[_dst] as i32).wrapping_add(insn.imm)         as u64, //((reg[_dst] & U32MAX) + insn.imm  as u64)     & U32MAX,
2✔
235
            ebpf::ADD32_REG  => reg[_dst] = (reg[_dst] as i32).wrapping_add(reg[_src] as i32) as u64, //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
2✔
236
            ebpf::SUB32_IMM  => reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm)         as u64,
1✔
237
            ebpf::SUB32_REG  => reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
1✔
238
            ebpf::MUL32_IMM  => reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm)         as u64,
2✔
239
            ebpf::MUL32_REG  => reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
3✔
240
            ebpf::DIV32_IMM if insn.imm as u32 == 0 => reg[_dst] = 0,
3✔
241
            ebpf::DIV32_IMM  => reg[_dst] = (reg[_dst] as u32 / insn.imm              as u32) as u64,
2✔
242
            ebpf::DIV32_REG if reg[_src] as u32 == 0 => reg[_dst] = 0,
5✔
243
            ebpf::DIV32_REG  => reg[_dst] = (reg[_dst] as u32 / reg[_src]             as u32) as u64,
3✔
244
            ebpf::OR32_IMM   =>   reg[_dst] = (reg[_dst] as u32             | insn.imm  as u32) as u64,
1✔
245
            ebpf::OR32_REG   =>   reg[_dst] = (reg[_dst] as u32             | reg[_src] as u32) as u64,
1✔
246
            ebpf::AND32_IMM  =>   reg[_dst] = (reg[_dst] as u32             & insn.imm  as u32) as u64,
1✔
247
            ebpf::AND32_REG  =>   reg[_dst] = (reg[_dst] as u32             & reg[_src] as u32) as u64,
1✔
248
            // As for the 64-bit version, we should mask the number of bits to shift with
249
            // 0x1f, but .wrappping_shr() already takes care of it for us.
250
            ebpf::LSH32_IMM  =>   reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm  as u32) as u64,
7✔
251
            ebpf::LSH32_REG  =>   reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
3✔
252
            ebpf::RSH32_IMM  =>   reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm  as u32) as u64,
2✔
253
            ebpf::RSH32_REG  =>   reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
1✔
254
            ebpf::NEG32      => { reg[_dst] = (reg[_dst] as i32).wrapping_neg()                 as u64; reg[_dst] &= U32MAX; },
1✔
255
            ebpf::MOD32_IMM if insn.imm as u32 == 0 => (),
3✔
256
            ebpf::MOD32_IMM  =>   reg[_dst] = (reg[_dst] as u32             % insn.imm  as u32) as u64,
2✔
257
            ebpf::MOD32_REG if reg[_src] as u32 == 0 => (),
3✔
258
            ebpf::MOD32_REG  =>   reg[_dst] = (reg[_dst] as u32 % reg[_src]             as u32) as u64,
1✔
259
            ebpf::XOR32_IMM  =>   reg[_dst] = (reg[_dst] as u32             ^ insn.imm  as u32) as u64,
1✔
260
            ebpf::XOR32_REG  =>   reg[_dst] = (reg[_dst] as u32             ^ reg[_src] as u32) as u64,
1✔
261
            ebpf::MOV32_IMM  =>   reg[_dst] = insn.imm   as u32                                 as u64,
189✔
262
            ebpf::MOV32_REG  =>   reg[_dst] = (reg[_src] as u32)                                as u64,
1✔
263
            // As for the 64-bit version, we should mask the number of bits to shift with
264
            // 0x1f, but .wrappping_shr() already takes care of it for us.
265
            ebpf::ARSH32_IMM => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm  as u32) as u64; reg[_dst] &= U32MAX; },
2✔
266
            ebpf::ARSH32_REG => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
3✔
267
            ebpf::LE         => {
268
                reg[_dst] = match insn.imm {
3✔
269
                    16 => (reg[_dst] as u16).to_le() as u64,
1✔
270
                    32 => (reg[_dst] as u32).to_le() as u64,
1✔
271
                    64 =>  reg[_dst].to_le(),
1✔
272
                    _  => unreachable!(),
×
273
                };
274
            },
275
            ebpf::BE         => {
276
                reg[_dst] = match insn.imm {
39✔
277
                    16 => (reg[_dst] as u16).to_be() as u64,
23✔
278
                    32 => (reg[_dst] as u32).to_be() as u64,
13✔
279
                    64 =>  reg[_dst].to_be(),
3✔
280
                    _  => unreachable!(),
×
281
                };
282
            },
283

284
            // BPF_ALU64 class
285
            ebpf::ADD64_IMM  => reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
93✔
286
            ebpf::ADD64_REG  => reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
19✔
287
            ebpf::SUB64_IMM  => reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
2✔
288
            ebpf::SUB64_REG  => reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
68✔
289
            ebpf::MUL64_IMM  => reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
12✔
290
            ebpf::MUL64_REG  => reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
67✔
291
            ebpf::DIV64_IMM if insn.imm == 0 => reg[_dst] = 0,
3✔
292
            ebpf::DIV64_IMM  => reg[_dst]                       /= insn.imm as u64,
2✔
293
            ebpf::DIV64_REG if reg[_src] == 0 => reg[_dst] = 0,
68✔
294
            ebpf::DIV64_REG  => reg[_dst] /= reg[_src],
67✔
295
            ebpf::OR64_IMM   => reg[_dst] |=  insn.imm as u64,
4✔
296
            ebpf::OR64_REG   => reg[_dst] |=  reg[_src],
67✔
297
            ebpf::AND64_IMM  => reg[_dst] &=  insn.imm as u64,
11✔
298
            ebpf::AND64_REG  => reg[_dst] &=  reg[_src],
1✔
299
            ebpf::LSH64_IMM  => reg[_dst] <<= insn.imm as u64 & SHIFT_MASK_64,
79✔
300
            ebpf::LSH64_REG  => reg[_dst] <<= reg[_src] & SHIFT_MASK_64,
3✔
301
            ebpf::RSH64_IMM  => reg[_dst] >>= insn.imm as u64 & SHIFT_MASK_64,
7✔
302
            ebpf::RSH64_REG  => reg[_dst] >>= reg[_src] & SHIFT_MASK_64,
2✔
303
            ebpf::NEG64      => reg[_dst] = -(reg[_dst] as i64) as u64,
1✔
304
            ebpf::MOD64_IMM if insn.imm == 0 => (),
2✔
305
            ebpf::MOD64_IMM  => reg[_dst] %=  insn.imm as u64,
1✔
306
            ebpf::MOD64_REG if reg[_src] == 0 => (),
2✔
307
            ebpf::MOD64_REG  => reg[_dst] %= reg[_src],
1✔
308
            ebpf::XOR64_IMM  => reg[_dst] ^= insn.imm  as u64,
2✔
309
            ebpf::XOR64_REG  => reg[_dst] ^= reg[_src],
2✔
310
            ebpf::MOV64_IMM  => reg[_dst] =  insn.imm  as u64,
316✔
311
            ebpf::MOV64_REG  => reg[_dst] =  reg[_src],
180✔
312
            ebpf::ARSH64_IMM => reg[_dst] = (reg[_dst] as i64 >> (insn.imm as u64 & SHIFT_MASK_64))  as u64,
13✔
313
            ebpf::ARSH64_REG => reg[_dst] = (reg[_dst] as i64 >> (reg[_src] as u64 & SHIFT_MASK_64)) as u64,
2✔
314

315
            // BPF_JMP class
316
            // TODO: check this actually works as expected for signed / unsigned ops
317
            // J-EQ, J-NE, J-GT, J-GE, J-LT, J-LE: unsigned
318
            // JS-GT, JS-GE, JS-LT, JS-LE: signed
319
            ebpf::JA         =>                                             do_jump(),
6✔
320
            ebpf::JEQ_IMM    => if  reg[_dst] == unsigned_u64!(insn.imm)  { do_jump(); },
20✔
321
            ebpf::JEQ_REG    => if  reg[_dst] == reg[_src]                { do_jump(); },
3✔
322
            ebpf::JGT_IMM    => if  reg[_dst] >  unsigned_u64!(insn.imm)  { do_jump(); },
4✔
323
            ebpf::JGT_REG    => if  reg[_dst] >  reg[_src]                { do_jump(); },
6✔
324
            ebpf::JGE_IMM    => if  reg[_dst] >= unsigned_u64!(insn.imm)  { do_jump(); },
2✔
325
            ebpf::JGE_REG    => if  reg[_dst] >= reg[_src]                { do_jump(); },
65✔
326
            ebpf::JLT_IMM    => if  reg[_dst] <  unsigned_u64!(insn.imm)  { do_jump(); },
3✔
327
            ebpf::JLT_REG    => if  reg[_dst] <  reg[_src]                { do_jump(); },
3✔
328
            ebpf::JLE_IMM    => if  reg[_dst] <= unsigned_u64!(insn.imm)  { do_jump(); },
3✔
329
            ebpf::JLE_REG    => if  reg[_dst] <= reg[_src]                { do_jump(); },
3✔
330
            ebpf::JSET_IMM   => if  reg[_dst] &  insn.imm as u64 != 0     { do_jump(); },
2✔
331
            ebpf::JSET_REG   => if  reg[_dst] &  reg[_src]       != 0     { do_jump(); },
2✔
332
            ebpf::JNE_IMM    => if  reg[_dst] != unsigned_u64!(insn.imm)  { do_jump(); },
92✔
333
            ebpf::JNE_REG    => if  reg[_dst] != reg[_src]                { do_jump(); },
2✔
334
            ebpf::JSGT_IMM   => if  reg[_dst] as i64  >  insn.imm  as i64 { do_jump(); },
2✔
335
            ebpf::JSGT_REG   => if  reg[_dst] as i64  >  reg[_src] as i64 { do_jump(); },
7✔
336
            ebpf::JSGE_IMM   => if  reg[_dst] as i64  >= insn.imm  as i64 { do_jump(); },
3✔
337
            ebpf::JSGE_REG   => if  reg[_dst] as i64  >= reg[_src] as i64 { do_jump(); },
3✔
338
            ebpf::JSLT_IMM   => if (reg[_dst] as i64) <  insn.imm  as i64 { do_jump(); },
3✔
339
            ebpf::JSLT_REG   => if (reg[_dst] as i64) <  reg[_src] as i64 { do_jump(); },
3✔
340
            ebpf::JSLE_IMM   => if  reg[_dst] as i64  <= insn.imm  as i64 { do_jump(); },
3✔
341
            ebpf::JSLE_REG   => if  reg[_dst] as i64  <= reg[_src] as i64 { do_jump(); },
3✔
342

343
            // BPF_JMP32 class
344
            ebpf::JEQ_IMM32  => if  reg[_dst] as u32  == insn.imm  as u32      { do_jump(); },
2✔
345
            ebpf::JEQ_REG32  => if  reg[_dst] as u32  == reg[_src] as u32      { do_jump(); },
2✔
346
            ebpf::JGT_IMM32  => if  reg[_dst] as u32  >  insn.imm  as u32      { do_jump(); },
3✔
347
            ebpf::JGT_REG32  => if  reg[_dst] as u32  >  reg[_src] as u32      { do_jump(); },
3✔
348
            ebpf::JGE_IMM32  => if  reg[_dst] as u32  >= insn.imm  as u32      { do_jump(); },
2✔
349
            ebpf::JGE_REG32  => if  reg[_dst] as u32  >= reg[_src] as u32      { do_jump(); },
2✔
350
            ebpf::JLT_IMM32  => if (reg[_dst] as u32) <  insn.imm  as u32      { do_jump(); },
3✔
351
            ebpf::JLT_REG32  => if (reg[_dst] as u32) <  reg[_src] as u32      { do_jump(); },
3✔
352
            ebpf::JLE_IMM32  => if  reg[_dst] as u32  <= insn.imm  as u32      { do_jump(); },
3✔
353
            ebpf::JLE_REG32  => if  reg[_dst] as u32  <= reg[_src] as u32      { do_jump(); },
3✔
354
            ebpf::JSET_IMM32 => if  reg[_dst] as u32  &  insn.imm  as u32 != 0 { do_jump(); },
2✔
355
            ebpf::JSET_REG32 => if  reg[_dst] as u32  &  reg[_src] as u32 != 0 { do_jump(); },
2✔
356
            ebpf::JNE_IMM32  => if  reg[_dst] as u32  != insn.imm  as u32      { do_jump(); },
2✔
357
            ebpf::JNE_REG32  => if  reg[_dst] as u32  != reg[_src] as u32      { do_jump(); },
2✔
358
            ebpf::JSGT_IMM32 => if  reg[_dst] as i32  >  insn.imm              { do_jump(); },
2✔
359
            ebpf::JSGT_REG32 => if  reg[_dst] as i32  >  reg[_src] as i32      { do_jump(); },
2✔
360
            ebpf::JSGE_IMM32 => if  reg[_dst] as i32  >= insn.imm              { do_jump(); },
3✔
361
            ebpf::JSGE_REG32 => if  reg[_dst] as i32  >= reg[_src] as i32      { do_jump(); },
3✔
362
            ebpf::JSLT_IMM32 => if (reg[_dst] as i32) <  insn.imm              { do_jump(); },
3✔
363
            ebpf::JSLT_REG32 => if (reg[_dst] as i32) <  reg[_src] as i32      { do_jump(); },
3✔
364
            ebpf::JSLE_IMM32 => if  reg[_dst] as i32  <= insn.imm              { do_jump(); },
3✔
365
            ebpf::JSLE_REG32 => if  reg[_dst] as i32  <= reg[_src] as i32      { do_jump(); },
3✔
366

367
            // Do not delegate the check to the verifier, since registered functions can be
368
            // changed after the program has been verified.
369
            ebpf::CALL       => {
370
                match _src {
11✔
371
                    // Call helper function
372
                    0 => {
373
                        if let Some(function) = helpers.get(&(insn.imm as u32)) {
7✔
374
                            reg[0] = function(reg[1], reg[2], reg[3], reg[4], reg[5]);
6✔
375
                        } else {
6✔
376
                            Err(Error::new(ErrorKind::Other, format!("Error: unknown helper function (id: {:#x})", insn.imm as u32)))?;
1✔
377
                        }
378
                    }
379
                    // eBPF to eBPF call
380
                    1 => {
381
                        if stack_frame_idx >= MAX_CALL_DEPTH {
3✔
NEW
382
                            Err(Error::new(ErrorKind::Other, format!("Error: too many nested calls (max: {MAX_CALL_DEPTH})")))?;
×
383
                        }
3✔
384
                        stacks[stack_frame_idx].save_registers(&reg[6..=9]);
3✔
385
                        stacks[stack_frame_idx].save_return_address(insn_ptr);
3✔
386
                        reg[10] -= stacks[stack_frame_idx].get_stack_usage().stack_usage() as u64;
3✔
387
                        stack_frame_idx += 1;
3✔
388
                        insn_ptr += insn.imm as usize;
3✔
389
                    }
390
                    _ => {
391
                        Err(Error::new(ErrorKind::Other, format!("Error: unsupported call type #{} (insn #{})", _src, insn_ptr-1)))?;
1✔
392
                    }
393
                }
394
            }
UNCOV
395
            ebpf::TAIL_CALL  => unimplemented!(),
×
396
            ebpf::EXIT       => {
397
                if stack_frame_idx > 0 {
156✔
398
                    stack_frame_idx -= 1;
2✔
399
                    reg[6..=9].copy_from_slice(&stacks[stack_frame_idx].get_registers());
2✔
400
                    insn_ptr = stacks[stack_frame_idx].get_return_address();
2✔
401
                    reg[10] += stacks[stack_frame_idx].get_stack_usage().stack_usage() as u64;
2✔
402
                } else {
2✔
403
                    return Ok(reg[0]);
154✔
404
                }
405
            }
406

407
            _                => unreachable!()
×
408
        }
409
    }
410

411
    unreachable!()
×
412
}
164✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc