• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 18638570188

20 Oct 2025 12:38AM UTC coverage: 20.202% (-0.01%) from 20.213%
18638570188

Pull #308

github

web-flow
Merge 73a1640e9 into 416357998
Pull Request #308: Add tests for PciSegment

0 of 59 new or added lines in 5 files covered. (0.0%)

1163 existing lines in 25 files now uncovered.

1578 of 7811 relevant lines covered (20.2%)

19.85 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/alioth/src/hv/kvm/vm/vm.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
#[cfg(target_arch = "aarch64")]
16
mod aarch64;
17
#[cfg(target_arch = "x86_64")]
18
mod x86_64;
19

20
use std::collections::HashMap;
21
use std::fmt::{self, Display, Formatter};
22
use std::io::ErrorKind;
23
use std::os::fd::{AsFd, AsRawFd, BorrowedFd, FromRawFd, OwnedFd};
24
use std::os::unix::thread::JoinHandleExt;
25
use std::sync::Arc;
26
use std::sync::atomic::{AtomicU32, Ordering};
27
use std::thread::JoinHandle;
28

29
use libc::{EFD_CLOEXEC, EFD_NONBLOCK, SIGRTMIN, eventfd, write};
30
use parking_lot::{Mutex, RwLock};
31
use snafu::ResultExt;
32

33
#[cfg(target_arch = "x86_64")]
34
use crate::arch::sev::{SevPolicy, SnpPageType, SnpPolicy};
35
use crate::ffi;
36
use crate::hv::kvm::vcpu::{KvmRunBlock, KvmVcpu};
37
use crate::hv::kvm::{KvmError, kvm_error};
38
use crate::hv::{
39
    Error, IoeventFd, IoeventFdRegistry, IrqFd, IrqSender, MemMapOption, MsiSender, Result, Vm,
40
    VmMemory, error,
41
};
42
#[cfg(target_arch = "x86_64")]
43
use crate::sys::kvm::KVM_IRQCHIP_IOAPIC;
44
#[cfg(target_arch = "aarch64")]
45
use crate::sys::kvm::KvmMsiFlag;
46
use crate::sys::kvm::{
47
    KVM_IRQ_ROUTING_IRQCHIP, KVM_IRQ_ROUTING_MSI, KvmCap, KvmEncRegion, KvmIoEventFd,
48
    KvmIoEventFdFlag, KvmIrqRouting, KvmIrqRoutingEntry, KvmIrqRoutingIrqchip, KvmIrqRoutingMsi,
49
    KvmIrqfd, KvmIrqfdFlag, KvmMemFlag, KvmMemoryAttribute, KvmMemoryAttributes, KvmMsi,
50
    KvmUserspaceMemoryRegion, KvmUserspaceMemoryRegion2, kvm_check_extension, kvm_create_vcpu,
51
    kvm_ioeventfd, kvm_irqfd, kvm_memory_encrypt_reg_region, kvm_memory_encrypt_unreg_region,
52
    kvm_set_gsi_routing, kvm_set_memory_attributes, kvm_set_user_memory_region,
53
    kvm_set_user_memory_region2, kvm_signal_msi,
54
};
55

56
#[cfg(target_arch = "x86_64")]
57
pub use self::x86_64::VmArch;
58

59
#[derive(Debug)]
60
pub(super) struct VmInner {
61
    pub(super) fd: OwnedFd,
62
    pub(super) memfd: Option<OwnedFd>,
63
    pub(super) ioeventfds: Mutex<HashMap<i32, KvmIoEventFd>>,
64
    pub(super) msi_table: RwLock<HashMap<u32, KvmMsiEntryData>>,
65
    pub(super) next_msi_gsi: AtomicU32,
66
    pub(super) pin_map: AtomicU32,
67
    #[cfg(target_arch = "x86_64")]
68
    pub(super) arch: VmArch,
69
}
70

71
impl VmInner {
UNCOV
72
    fn update_routing_table(&self, table: &HashMap<u32, KvmMsiEntryData>) -> Result<(), KvmError> {
UNCOV
73
        let mut entries = [KvmIrqRoutingEntry::default(); MAX_GSI_ROUTES];
UNCOV
74
        let mut index = 0;
UNCOV
75
        let pin_map = self.pin_map.load(Ordering::Acquire);
76
        #[cfg(target_arch = "x86_64")]
77
        let (irqchip, max_pin) = (KVM_IRQCHIP_IOAPIC, 24);
78
        #[cfg(target_arch = "aarch64")]
UNCOV
79
        let (irqchip, max_pin) = (0, 32);
UNCOV
80
        for pin in 0..max_pin {
UNCOV
81
            if pin_map & (1 << pin) == 0 {
UNCOV
82
                continue;
83
            }
UNCOV
84
            entries[index].gsi = pin;
UNCOV
85
            entries[index].type_ = KVM_IRQ_ROUTING_IRQCHIP;
UNCOV
86
            entries[index].routing.irqchip = KvmIrqRoutingIrqchip { irqchip, pin };
UNCOV
87
            index += 1;
88
        }
UNCOV
89
        for (gsi, entry) in table.iter() {
UNCOV
90
            if entry.masked {
UNCOV
91
                continue;
92
            }
UNCOV
93
            entries[index].gsi = *gsi;
UNCOV
94
            entries[index].type_ = KVM_IRQ_ROUTING_MSI;
95
            #[cfg(target_arch = "aarch64")]
96
            {
UNCOV
97
                entries[index].flags = KvmMsiFlag::VALID_DEVID;
98
            }
UNCOV
99
            entries[index].routing.msi = KvmIrqRoutingMsi {
UNCOV
100
                address_hi: entry.addr_hi,
UNCOV
101
                address_lo: entry.addr_lo,
UNCOV
102
                data: entry.data,
UNCOV
103
                #[cfg(target_arch = "aarch64")]
UNCOV
104
                devid: entry.devid,
UNCOV
105
                #[cfg(not(target_arch = "aarch64"))]
UNCOV
106
                devid: 0,
107
            };
UNCOV
108
            index += 1;
109
        }
110
        let irq_routing = KvmIrqRouting {
UNCOV
111
            nr: index as u32,
112
            _flags: 0,
113
            entries,
114
        };
UNCOV
115
        log::trace!("{self}: updating GSI routing table to {irq_routing:#x?}");
UNCOV
116
        unsafe { kvm_set_gsi_routing(&self.fd, &irq_routing) }.context(kvm_error::GsiRouting)?;
UNCOV
117
        Ok(())
118
    }
119

UNCOV
120
    pub fn check_extension(&self, id: KvmCap) -> Result<i32, Error> {
UNCOV
121
        let ret = unsafe { kvm_check_extension(&self.fd, id) };
UNCOV
122
        match ret {
UNCOV
123
            Ok(num) => Ok(num),
124
            Err(_) => error::Capability {
125
                cap: "KVM_CAP_CHECK_EXTENSION_VM",
126
            }
127
            .fail(),
128
        }
129
    }
130
}
131

132
impl Display for VmInner {
UNCOV
133
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
UNCOV
134
        write!(f, "kvm-{}", self.fd.as_raw_fd())
135
    }
136
}
137

138
pub struct KvmVm {
139
    pub(super) vm: Arc<VmInner>,
140
    pub(super) vcpu_mmap_size: usize,
141
    pub(super) memory_created: bool,
142
}
143

144
type MemSlots = (u32, HashMap<(u64, u64), u32>);
145

146
#[derive(Debug)]
147
pub struct KvmMemory {
148
    slots: Mutex<MemSlots>,
149
    vm: Arc<VmInner>,
150
}
151

152
impl KvmMemory {
UNCOV
153
    pub(super) fn new(vm: Arc<VmInner>) -> Self {
154
        KvmMemory {
UNCOV
155
            slots: Mutex::new((0, HashMap::new())),
156
            vm,
157
        }
158
    }
159

UNCOV
160
    fn unmap(&self, slot: u32, gpa: u64, size: u64) -> Result<()> {
UNCOV
161
        let flags = KvmMemFlag::empty();
162
        let region = KvmUserspaceMemoryRegion {
163
            slot,
164
            guest_phys_addr: gpa,
165
            memory_size: 0,
166
            userspace_addr: 0,
167
            flags,
168
        };
UNCOV
169
        unsafe { kvm_set_user_memory_region(&self.vm.fd, &region) }
UNCOV
170
            .context(error::GuestUnmap { gpa, size })?;
UNCOV
171
        log::trace!(
UNCOV
172
            "{}: slot-{slot}: unmapped: {gpa:#018x}, size={size:#x}",
173
            self.vm
174
        );
UNCOV
175
        Ok(())
176
    }
177
}
178

179
impl VmMemory for KvmMemory {
UNCOV
180
    fn mem_map(&self, gpa: u64, size: u64, hva: usize, option: MemMapOption) -> Result<(), Error> {
UNCOV
181
        let mut flags = KvmMemFlag::empty();
UNCOV
182
        if !option.read || !option.exec {
UNCOV
183
            return kvm_error::MmapOption { option }.fail()?;
184
        }
UNCOV
185
        if !option.write {
UNCOV
186
            flags |= KvmMemFlag::READONLY;
187
        }
UNCOV
188
        if option.log_dirty {
UNCOV
189
            flags |= KvmMemFlag::LOG_DIRTY_PAGES;
190
        }
UNCOV
191
        let (slot_id, slots) = &mut *self.slots.lock();
UNCOV
192
        if let Some(memfd) = &self.vm.memfd {
193
            flags |= KvmMemFlag::GUEST_MEMFD;
194
            let region = KvmUserspaceMemoryRegion2 {
195
                slot: *slot_id,
196
                guest_phys_addr: gpa as _,
197
                memory_size: size as _,
198
                userspace_addr: hva as _,
199
                flags,
200
                guest_memfd: memfd.as_raw_fd() as _,
201
                guest_memfd_offset: gpa,
202
                ..Default::default()
203
            };
204
            unsafe { kvm_set_user_memory_region2(&self.vm.fd, &region) }
205
        } else {
206
            let region = KvmUserspaceMemoryRegion {
UNCOV
207
                slot: *slot_id,
UNCOV
208
                guest_phys_addr: gpa as _,
UNCOV
209
                memory_size: size as _,
UNCOV
210
                userspace_addr: hva as _,
211
                flags,
212
            };
UNCOV
213
            unsafe { kvm_set_user_memory_region(&self.vm.fd, &region) }
214
        }
UNCOV
215
        .context(error::GuestMap { hva, gpa, size })?;
UNCOV
216
        slots.insert((gpa, size), *slot_id);
217
        log::trace!(
UNCOV
218
            "{}: slot-{slot_id}: mapped: {gpa:#018x} -> {hva:#018x}, size = {size:#x}",
219
            self.vm
220
        );
221
        *slot_id += 1;
222
        Ok(())
223
    }
224

UNCOV
225
    fn unmap(&self, gpa: u64, size: u64) -> Result<(), Error> {
UNCOV
226
        let (_, slots) = &mut *self.slots.lock();
UNCOV
227
        let Some(slot) = slots.remove(&(gpa, size)) else {
UNCOV
228
            return Err(ErrorKind::NotFound.into()).context(error::GuestUnmap { gpa, size });
229
        };
UNCOV
230
        self.unmap(slot, gpa, size)
231
    }
232

UNCOV
233
    fn register_encrypted_range(&self, range: &[u8]) -> Result<()> {
234
        let region = KvmEncRegion {
UNCOV
235
            addr: range.as_ptr() as u64,
UNCOV
236
            size: range.len() as u64,
237
        };
UNCOV
238
        unsafe { kvm_memory_encrypt_reg_region(&self.vm.fd, &region) }
UNCOV
239
            .context(error::EncryptedRegion)?;
UNCOV
240
        Ok(())
241
    }
242

UNCOV
243
    fn deregister_encrypted_range(&self, range: &[u8]) -> Result<()> {
244
        let region = KvmEncRegion {
UNCOV
245
            addr: range.as_ptr() as u64,
UNCOV
246
            size: range.len() as u64,
247
        };
UNCOV
248
        unsafe { kvm_memory_encrypt_unreg_region(&self.vm.fd, &region) }
UNCOV
249
            .context(error::EncryptedRegion)?;
UNCOV
250
        Ok(())
251
    }
252

UNCOV
253
    fn mark_private_memory(&self, gpa: u64, size: u64, private: bool) -> Result<()> {
254
        let attr = KvmMemoryAttributes {
255
            address: gpa,
256
            size,
UNCOV
257
            attributes: if private {
258
                KvmMemoryAttribute::PRIVATE
259
            } else {
260
                KvmMemoryAttribute::empty()
261
            },
262
            flags: 0,
263
        };
UNCOV
264
        unsafe { kvm_set_memory_attributes(&self.vm.fd, &attr) }.context(error::EncryptedRegion)?;
UNCOV
265
        Ok(())
266
    }
267

UNCOV
268
    fn reset(&self) -> Result<()> {
UNCOV
269
        let (slot_id, slots) = &mut *self.slots.lock();
UNCOV
270
        for ((gpa, size), slot) in slots.drain() {
UNCOV
271
            self.unmap(slot, gpa, size)?;
272
        }
UNCOV
273
        *slot_id = 0;
UNCOV
274
        Ok(())
275
    }
276
}
277

278
#[derive(Debug)]
279
pub struct KvmIrqSender {
280
    pin: u8,
281
    vm: Arc<VmInner>,
282
    event_fd: OwnedFd,
283
}
284

285
impl Drop for KvmIrqSender {
UNCOV
286
    fn drop(&mut self) {
UNCOV
287
        let pin_flag = 1 << (self.pin as u32);
UNCOV
288
        self.vm.pin_map.fetch_and(!pin_flag, Ordering::AcqRel);
289
        let request = KvmIrqfd {
UNCOV
290
            fd: self.event_fd.as_raw_fd() as u32,
UNCOV
291
            gsi: self.pin as u32,
292
            flags: KvmIrqfdFlag::DEASSIGN,
293
            ..Default::default()
294
        };
UNCOV
295
        if let Err(e) = unsafe { kvm_irqfd(&self.vm.fd, &request) } {
UNCOV
296
            log::error!(
297
                "{}: removing irqfd {:#x}: {e}",
298
                self.vm,
UNCOV
299
                self.event_fd.as_raw_fd(),
300
            )
301
        }
302
    }
303
}
304

305
impl IrqSender for KvmIrqSender {
UNCOV
306
    fn send(&self) -> Result<(), Error> {
UNCOV
307
        ffi!(unsafe { write(self.event_fd.as_raw_fd(), &1u64 as *const _ as _, 8) })
UNCOV
308
            .context(error::SendInterrupt)?;
UNCOV
309
        Ok(())
310
    }
311
}
312

313
#[derive(Debug, Default)]
314
pub(crate) struct KvmMsiEntryData {
315
    addr_lo: u32,
316
    addr_hi: u32,
317
    data: u32,
318
    masked: bool,
319
    dirty: bool,
320
    #[cfg(target_arch = "aarch64")]
321
    devid: u32,
322
}
323

324
#[derive(Debug)]
325
pub struct KvmIrqFd {
326
    event_fd: OwnedFd,
327
    vm: Arc<VmInner>,
328
    gsi: u32,
329
}
330

331
impl Drop for KvmIrqFd {
UNCOV
332
    fn drop(&mut self) {
UNCOV
333
        let mut table = self.vm.msi_table.write();
UNCOV
334
        let Some(entry) = table.remove(&self.gsi) else {
UNCOV
335
            log::error!(
UNCOV
336
                "{}: cannot find gsi {:#x} in the gsi table",
337
                self.vm,
338
                self.gsi,
339
            );
UNCOV
340
            return;
341
        };
UNCOV
342
        if entry.masked {
UNCOV
343
            return;
344
        }
UNCOV
345
        if let Err(e) = self.deassign_irqfd() {
UNCOV
346
            log::error!(
347
                "{}: removing irqfd {:#x}: {e}",
348
                self.vm,
UNCOV
349
                self.event_fd.as_raw_fd(),
350
            )
351
        }
352
    }
353
}
354

355
impl AsFd for KvmIrqFd {
UNCOV
356
    fn as_fd(&self) -> BorrowedFd<'_> {
UNCOV
357
        self.event_fd.as_fd()
358
    }
359
}
360

361
impl KvmIrqFd {
UNCOV
362
    fn assign_irqfd(&self) -> Result<()> {
363
        let request = KvmIrqfd {
UNCOV
364
            fd: self.event_fd.as_raw_fd() as u32,
UNCOV
365
            gsi: self.gsi,
366
            ..Default::default()
367
        };
UNCOV
368
        unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::IrqFd)?;
UNCOV
369
        log::debug!(
UNCOV
370
            "{}: assigned: gsi {:#x} -> irqfd {:#x}",
371
            self.vm,
372
            self.gsi,
UNCOV
373
            self.event_fd.as_raw_fd()
374
        );
UNCOV
375
        Ok(())
376
    }
377

UNCOV
378
    fn deassign_irqfd(&self) -> Result<()> {
379
        let request = KvmIrqfd {
UNCOV
380
            fd: self.event_fd.as_raw_fd() as u32,
UNCOV
381
            gsi: self.gsi,
382
            flags: KvmIrqfdFlag::DEASSIGN,
383
            ..Default::default()
384
        };
UNCOV
385
        unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::IrqFd)?;
UNCOV
386
        log::debug!(
UNCOV
387
            "{}: de-assigned: gsi {:#x} -> irqfd {:#x}",
388
            self.vm,
389
            self.gsi,
UNCOV
390
            self.event_fd.as_raw_fd()
391
        );
UNCOV
392
        Ok(())
393
    }
394
}
395

396
macro_rules! impl_irqfd_method {
397
    ($field:ident, $get:ident, $set:ident) => {
UNCOV
398
        fn $get(&self) -> u32 {
UNCOV
399
            let table = self.vm.msi_table.read();
UNCOV
400
            let Some(entry) = table.get(&self.gsi) else {
UNCOV
401
                unreachable!("cannot find gsi {}", self.gsi);
402
            };
UNCOV
403
            entry.$field
404
        }
UNCOV
405
        fn $set(&self, val: u32) -> Result<()> {
UNCOV
406
            let mut table = self.vm.msi_table.write();
UNCOV
407
            let Some(entry) = table.get_mut(&self.gsi) else {
UNCOV
408
                unreachable!("cannot find gsi {}", self.gsi);
409
            };
UNCOV
410
            if entry.$field == val {
UNCOV
411
                return Ok(());
412
            }
UNCOV
413
            entry.$field = val;
414

UNCOV
415
            if !entry.masked {
UNCOV
416
                self.vm.update_routing_table(&table)?;
417
            } else {
UNCOV
418
                entry.dirty = true;
419
            }
UNCOV
420
            Ok(())
421
        }
422
    };
423
}
424

425
impl IrqFd for KvmIrqFd {
426
    impl_irqfd_method!(addr_lo, get_addr_lo, set_addr_lo);
427

428
    impl_irqfd_method!(addr_hi, get_addr_hi, set_addr_hi);
429

430
    impl_irqfd_method!(data, get_data, set_data);
431

UNCOV
432
    fn get_masked(&self) -> bool {
UNCOV
433
        let table = self.vm.msi_table.read();
434
        let Some(entry) = table.get(&self.gsi) else {
435
            unreachable!("{}: cannot find gsi {:#x}", self.vm, self.gsi);
436
        };
UNCOV
437
        entry.masked
438
    }
439

UNCOV
440
    fn set_masked(&self, val: bool) -> Result<bool> {
UNCOV
441
        let mut table = self.vm.msi_table.write();
442
        let Some(entry) = table.get_mut(&self.gsi) else {
443
            unreachable!("{}: cannot find gsi {:#x}", self.vm, self.gsi);
444
        };
UNCOV
445
        if entry.masked == val {
UNCOV
446
            return Ok(false);
447
        }
UNCOV
448
        entry.masked = val;
UNCOV
449
        if !val {
UNCOV
450
            if entry.dirty {
UNCOV
451
                self.vm.update_routing_table(&table)?;
452
            }
UNCOV
453
            self.assign_irqfd()?;
454
        } else {
UNCOV
455
            self.deassign_irqfd()?;
456
        }
UNCOV
457
        Ok(true)
458
    }
459
}
460

461
const MAX_GSI_ROUTES: usize = 256;
462

463
#[derive(Debug)]
464
pub struct KvmMsiSender {
465
    vm: Arc<VmInner>,
466
    #[cfg(target_arch = "aarch64")]
467
    devid: u32,
468
}
469

470
impl MsiSender for KvmMsiSender {
471
    type IrqFd = KvmIrqFd;
472

UNCOV
473
    fn send(&self, addr: u64, data: u32) -> Result<()> {
474
        let kvm_msi = KvmMsi {
UNCOV
475
            address_lo: addr as u32,
UNCOV
476
            address_hi: (addr >> 32) as u32,
477
            data,
478
            #[cfg(target_arch = "aarch64")]
UNCOV
479
            devid: self.devid,
480
            #[cfg(target_arch = "aarch64")]
481
            flags: KvmMsiFlag::VALID_DEVID,
482
            ..Default::default()
483
        };
UNCOV
484
        unsafe { kvm_signal_msi(&self.vm.fd, &kvm_msi) }.context(error::SendInterrupt)?;
UNCOV
485
        Ok(())
486
    }
487

UNCOV
488
    fn create_irqfd(&self) -> Result<Self::IrqFd> {
489
        let event_fd = unsafe {
490
            OwnedFd::from_raw_fd(
UNCOV
491
                ffi!(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK)).context(error::IrqFd)?,
492
            )
493
        };
UNCOV
494
        let mut table = self.vm.msi_table.write();
UNCOV
495
        let mut allocated_gsi = None;
UNCOV
496
        for _ in 0..(MAX_GSI_ROUTES - 24) {
UNCOV
497
            let gsi = self.vm.next_msi_gsi.fetch_add(1, Ordering::AcqRel)
UNCOV
498
                % (MAX_GSI_ROUTES as u32 - 24)
UNCOV
499
                + 24;
500
            let new_entry = KvmMsiEntryData {
501
                #[cfg(target_arch = "aarch64")]
UNCOV
502
                devid: self.devid,
503
                masked: true,
504
                ..Default::default()
505
            };
UNCOV
506
            if let Some(e) = table.insert(gsi, new_entry) {
UNCOV
507
                table.insert(gsi, e);
508
            } else {
UNCOV
509
                allocated_gsi = Some(gsi);
UNCOV
510
                break;
511
            }
512
        }
UNCOV
513
        let Some(gsi) = allocated_gsi else {
UNCOV
514
            return kvm_error::AllocateGsi.fail()?;
515
        };
UNCOV
516
        log::debug!(
UNCOV
517
            "{}: allocated: gsi {gsi:#x} -> irqfd {:#x}",
518
            self.vm,
UNCOV
519
            event_fd.as_raw_fd()
520
        );
521
        let entry = KvmIrqFd {
UNCOV
522
            vm: self.vm.clone(),
523
            event_fd,
524
            gsi,
525
        };
UNCOV
526
        Ok(entry)
527
    }
528
}
529

530
#[derive(Debug)]
531
pub struct KvmIoeventFd {
532
    fd: OwnedFd,
533
}
534

535
impl AsFd for KvmIoeventFd {
UNCOV
536
    fn as_fd(&self) -> BorrowedFd<'_> {
UNCOV
537
        self.fd.as_fd()
538
    }
539
}
540

541
impl IoeventFd for KvmIoeventFd {}
542

543
#[derive(Debug)]
544
pub struct KvmIoeventFdRegistry {
545
    vm: Arc<VmInner>,
546
}
547

548
impl IoeventFdRegistry for KvmIoeventFdRegistry {
549
    type IoeventFd = KvmIoeventFd;
550

UNCOV
551
    fn create(&self) -> Result<Self::IoeventFd> {
UNCOV
552
        let fd =
UNCOV
553
            ffi!(unsafe { eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK) }).context(error::IoeventFd)?;
UNCOV
554
        Ok(KvmIoeventFd {
UNCOV
555
            fd: unsafe { OwnedFd::from_raw_fd(fd) },
556
        })
557
    }
558

UNCOV
559
    fn register(&self, fd: &Self::IoeventFd, gpa: u64, len: u8, data: Option<u64>) -> Result<()> {
560
        let mut request = KvmIoEventFd {
561
            addr: gpa,
UNCOV
562
            len: len as u32,
UNCOV
563
            fd: fd.as_fd().as_raw_fd(),
564
            ..Default::default()
565
        };
UNCOV
566
        if let Some(data) = data {
UNCOV
567
            request.datamatch = data;
UNCOV
568
            request.flags |= KvmIoEventFdFlag::DATA_MATCH;
569
        }
UNCOV
570
        unsafe { kvm_ioeventfd(&self.vm.fd, &request) }.context(error::IoeventFd)?;
UNCOV
571
        let mut fds = self.vm.ioeventfds.lock();
UNCOV
572
        fds.insert(request.fd, request);
UNCOV
573
        Ok(())
574
    }
575

576
    #[cfg(target_arch = "x86_64")]
577
    fn register_port(
578
        &self,
579
        _fd: &Self::IoeventFd,
580
        _port: u16,
581
        _len: u8,
582
        _data: Option<u64>,
583
    ) -> Result<()> {
584
        unimplemented!()
585
    }
586

UNCOV
587
    fn deregister(&self, fd: &Self::IoeventFd) -> Result<()> {
UNCOV
588
        let mut fds = self.vm.ioeventfds.lock();
UNCOV
589
        if let Some(mut request) = fds.remove(&fd.as_fd().as_raw_fd()) {
UNCOV
590
            request.flags |= KvmIoEventFdFlag::DEASSIGN;
UNCOV
591
            unsafe { kvm_ioeventfd(&self.vm.fd, &request) }.context(error::IoeventFd)?;
592
        }
UNCOV
593
        Ok(())
594
    }
595
}
596

597
impl Vm for KvmVm {
598
    #[cfg(target_arch = "aarch64")]
599
    type GicV2 = aarch64::KvmGicV2;
600
    #[cfg(target_arch = "aarch64")]
601
    type GicV2m = aarch64::KvmGicV2m;
602
    #[cfg(target_arch = "aarch64")]
603
    type GicV3 = aarch64::KvmGicV3;
604
    type IoeventFdRegistry = KvmIoeventFdRegistry;
605
    type IrqSender = KvmIrqSender;
606
    #[cfg(target_arch = "aarch64")]
607
    type Its = aarch64::KvmIts;
608
    type Memory = KvmMemory;
609
    type MsiSender = KvmMsiSender;
610
    type Vcpu = KvmVcpu;
611

UNCOV
612
    fn create_vcpu(&self, id: u32) -> Result<Self::Vcpu, Error> {
UNCOV
613
        let vcpu_fd = unsafe { kvm_create_vcpu(&self.vm.fd, id) }.context(error::CreateVcpu)?;
UNCOV
614
        let kvm_run = unsafe { KvmRunBlock::new(vcpu_fd, self.vcpu_mmap_size) }?;
UNCOV
615
        Ok(KvmVcpu {
UNCOV
616
            fd: unsafe { OwnedFd::from_raw_fd(vcpu_fd) },
UNCOV
617
            kvm_run,
UNCOV
618
            vm: self.vm.clone(),
UNCOV
619
            io_index: 0,
620
        })
621
    }
622

623
    fn stop_vcpu<T>(&self, _id: u32, handle: &JoinHandle<T>) -> Result<(), Error> {
×
624
        ffi!(unsafe { libc::pthread_kill(handle.as_pthread_t() as _, SIGRTMIN()) })
×
625
            .context(error::StopVcpu)?;
×
626
        Ok(())
×
627
    }
628

UNCOV
629
    fn create_vm_memory(&mut self) -> Result<Self::Memory, Error> {
UNCOV
630
        if self.memory_created {
UNCOV
631
            error::MemoryCreated.fail()
632
        } else {
UNCOV
633
            let kvm_memory = KvmMemory::new(self.vm.clone());
634
            self.memory_created = true;
635
            Ok(kvm_memory)
636
        }
637
    }
638

UNCOV
639
    fn create_irq_sender(&self, pin: u8) -> Result<Self::IrqSender, Error> {
UNCOV
640
        let pin_flag = 1 << pin;
UNCOV
641
        if self.vm.pin_map.fetch_or(pin_flag, Ordering::AcqRel) & pin_flag == pin_flag {
UNCOV
642
            return Err(std::io::ErrorKind::AlreadyExists.into()).context(error::CreateIrq { pin });
643
        }
UNCOV
644
        if self.vm.check_extension(KvmCap::IRQFD)? == 0 {
UNCOV
645
            return error::Capability {
UNCOV
646
                cap: "KVM_CAP_IRQFD",
647
            }
UNCOV
648
            .fail();
649
        }
UNCOV
650
        let event_fd = ffi!(unsafe { eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK) })
UNCOV
651
            .context(error::CreateIrq { pin })?;
652
        let request = KvmIrqfd {
UNCOV
653
            fd: event_fd as u32,
UNCOV
654
            gsi: pin as u32,
655
            ..Default::default()
656
        };
UNCOV
657
        self.vm.update_routing_table(&self.vm.msi_table.read())?;
UNCOV
658
        unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::CreateIrq { pin })?;
UNCOV
659
        Ok(KvmIrqSender {
UNCOV
660
            pin,
UNCOV
661
            vm: self.vm.clone(),
UNCOV
662
            event_fd: unsafe { OwnedFd::from_raw_fd(event_fd) },
663
        })
664
    }
665

UNCOV
666
    fn create_msi_sender(
667
        &self,
668
        #[cfg(target_arch = "aarch64")] devid: u32,
669
    ) -> Result<Self::MsiSender> {
UNCOV
670
        if self.vm.check_extension(KvmCap::SIGNAL_MSI)? == 0 {
UNCOV
671
            return error::Capability {
UNCOV
672
                cap: "KVM_CAP_SIGNAL_MSI",
673
            }
UNCOV
674
            .fail();
675
        }
UNCOV
676
        Ok(KvmMsiSender {
UNCOV
677
            vm: self.vm.clone(),
UNCOV
678
            #[cfg(target_arch = "aarch64")]
UNCOV
679
            devid,
680
        })
681
    }
682

UNCOV
683
    fn create_ioeventfd_registry(&self) -> Result<Self::IoeventFdRegistry> {
UNCOV
684
        Ok(KvmIoeventFdRegistry {
UNCOV
685
            vm: self.vm.clone(),
686
        })
687
    }
688

689
    #[cfg(target_arch = "x86_64")]
690
    fn sev_launch_start(&self, policy: SevPolicy) -> Result<(), Error> {
691
        self.kvm_sev_launch_start(policy)
692
    }
693

694
    #[cfg(target_arch = "x86_64")]
695
    fn sev_launch_update_data(&self, range: &mut [u8]) -> Result<(), Error> {
696
        self.kvm_sev_launch_update_data(range)
697
    }
698

699
    #[cfg(target_arch = "x86_64")]
700
    fn sev_launch_update_vmsa(&self) -> Result<(), Error> {
701
        self.kvm_sev_launch_update_vmsa()
702
    }
703

704
    #[cfg(target_arch = "x86_64")]
705
    fn sev_launch_measure(&self) -> Result<Vec<u8>, Error> {
706
        self.kvm_sev_launch_measure()
707
    }
708

709
    #[cfg(target_arch = "x86_64")]
710
    fn sev_launch_finish(&self) -> Result<(), Error> {
711
        self.kvm_sev_launch_finish()
712
    }
713

714
    #[cfg(target_arch = "x86_64")]
715
    fn snp_launch_start(&self, policy: SnpPolicy) -> Result<()> {
716
        self.kvm_snp_launch_start(policy)
717
    }
718

719
    #[cfg(target_arch = "x86_64")]
720
    fn snp_launch_update(&self, range: &mut [u8], gpa: u64, type_: SnpPageType) -> Result<()> {
721
        self.kvm_snp_launch_update(range, gpa, type_)
722
    }
723

724
    #[cfg(target_arch = "x86_64")]
725
    fn snp_launch_finish(&self) -> Result<()> {
726
        self.kvm_snp_launch_finish()
727
    }
728

729
    #[cfg(target_arch = "aarch64")]
UNCOV
730
    fn create_gic_v2(&self, distributor_base: u64, cpu_interface_base: u64) -> Result<Self::GicV2> {
UNCOV
731
        self.kvm_create_gic_v2(distributor_base, cpu_interface_base)
732
    }
733

734
    #[cfg(target_arch = "aarch64")]
UNCOV
735
    fn create_gic_v2m(&self, _base: u64) -> Result<Self::GicV2m> {
UNCOV
736
        Err(std::io::ErrorKind::Unsupported.into()).context(error::CreateDevice)
737
    }
738

739
    #[cfg(target_arch = "aarch64")]
UNCOV
740
    fn create_gic_v3(
741
        &self,
742
        distributor_base: u64,
743
        redistributor_base: u64,
744
        redistributor_count: u32,
745
    ) -> Result<Self::GicV3> {
UNCOV
746
        self.kvm_create_gic_v3(distributor_base, redistributor_base, redistributor_count)
747
    }
748

749
    #[cfg(target_arch = "aarch64")]
UNCOV
750
    fn create_its(&self, base: u64) -> Result<Self::Its> {
UNCOV
751
        self.kvm_create_its(base)
752
    }
753
}
754

755
#[cfg(test)]
756
mod test {
757
    use std::ptr::null_mut;
758

759
    use assert_matches::assert_matches;
760
    use libc::{MAP_ANONYMOUS, MAP_FAILED, MAP_PRIVATE, PROT_EXEC, PROT_READ, PROT_WRITE, mmap};
761

762
    use super::*;
763
    use crate::ffi;
764
    use crate::hv::kvm::KvmConfig;
765
    use crate::hv::{Hypervisor, Kvm, MemMapOption, VmConfig};
766

767
    #[test]
768
    #[cfg_attr(not(feature = "test-hv"), ignore)]
769
    fn test_mem_map() {
770
        let kvm = Kvm::new(KvmConfig::default()).unwrap();
771
        let vm_config = VmConfig { coco: None };
772
        let mut vm = kvm.create_vm(&vm_config).unwrap();
773
        let vm_memory = vm.create_vm_memory().unwrap();
774

775
        let prot = PROT_WRITE | PROT_READ | PROT_EXEC;
776
        let flag = MAP_ANONYMOUS | MAP_PRIVATE;
777
        let user_mem = ffi!(
778
            unsafe { mmap(null_mut(), 0x1000, prot, flag, -1, 0,) },
779
            MAP_FAILED
780
        )
781
        .unwrap();
782
        let option_no_write = MemMapOption {
783
            read: false,
784
            write: true,
785
            exec: true,
786
            log_dirty: true,
787
        };
788
        assert_matches!(
789
            vm_memory.mem_map(0x0, 0x1000, user_mem as usize, option_no_write),
790
            Err(Error::KvmErr { .. })
791
        );
792
        let option_no_exec = MemMapOption {
793
            read: false,
794
            write: true,
795
            exec: true,
796
            log_dirty: true,
797
        };
798
        assert_matches!(
799
            vm_memory.mem_map(0x0, 0x1000, user_mem as usize, option_no_exec),
800
            Err(Error::KvmErr { .. })
801
        );
802
        let option = MemMapOption {
803
            read: true,
804
            write: false,
805
            exec: true,
806
            log_dirty: true,
807
        };
808
        vm_memory
809
            .mem_map(0x0, 0x1000, user_mem as usize, option)
810
            .unwrap();
811
    }
812
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc