• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 17385983897

01 Sep 2025 07:41PM UTC coverage: 18.009% (-0.1%) from 18.149%
17385983897

Pull #281

github

web-flow
Merge 782c57b11 into 6ec9a6d6b
Pull Request #281: Port to Apple Hypervisor framework

0 of 154 new or added lines in 11 files covered. (0.0%)

1166 existing lines in 29 files now uncovered.

1362 of 7563 relevant lines covered (18.01%)

18.77 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/alioth/src/hv/kvm/vm/vm.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
#[cfg(target_arch = "aarch64")]
16
mod aarch64;
17
#[cfg(target_arch = "x86_64")]
18
mod x86_64;
19

20
use std::collections::HashMap;
21
use std::fmt::{self, Display, Formatter};
22
use std::io::ErrorKind;
23
use std::os::fd::{AsFd, AsRawFd, BorrowedFd, FromRawFd, OwnedFd};
24
use std::os::unix::thread::JoinHandleExt;
25
use std::sync::Arc;
26
use std::sync::atomic::{AtomicU32, Ordering};
27
use std::thread::JoinHandle;
28

29
use libc::{EFD_CLOEXEC, EFD_NONBLOCK, SIGRTMIN, eventfd, write};
30
use parking_lot::{Mutex, RwLock};
31
use snafu::ResultExt;
32

33
#[cfg(target_arch = "x86_64")]
34
use crate::arch::sev::{SnpPageType, SnpPolicy};
35
use crate::ffi;
36
#[cfg(target_arch = "x86_64")]
37
use crate::hv::kvm::bindings::KVM_IRQCHIP_IOAPIC;
38
#[cfg(target_arch = "aarch64")]
39
use crate::hv::kvm::bindings::KvmMsiFlag;
40
use crate::hv::kvm::bindings::{
41
    KVM_IRQ_ROUTING_IRQCHIP, KVM_IRQ_ROUTING_MSI, KvmCap, KvmEncRegion, KvmIoEventFd,
42
    KvmIoEventFdFlag, KvmIrqRouting, KvmIrqRoutingEntry, KvmIrqRoutingIrqchip, KvmIrqRoutingMsi,
43
    KvmIrqfd, KvmIrqfdFlag, KvmMemFlag, KvmMemoryAttribute, KvmMemoryAttributes, KvmMsi,
44
    KvmUserspaceMemoryRegion, KvmUserspaceMemoryRegion2,
45
};
46
use crate::hv::kvm::ioctls::{
47
    kvm_check_extension, kvm_create_vcpu, kvm_ioeventfd, kvm_irqfd, kvm_memory_encrypt_reg_region,
48
    kvm_memory_encrypt_unreg_region, kvm_set_gsi_routing, kvm_set_memory_attributes,
49
    kvm_set_user_memory_region, kvm_set_user_memory_region2, kvm_signal_msi,
50
};
51
use crate::hv::kvm::vcpu::{KvmRunBlock, KvmVcpu};
52
use crate::hv::kvm::{KvmError, kvm_error};
53
use crate::hv::{
54
    Error, IoeventFd, IoeventFdRegistry, IrqFd, IrqSender, MemMapOption, MsiSender, Result, Vm,
55
    VmMemory, error,
56
};
57

58
#[cfg(target_arch = "x86_64")]
59
pub use self::x86_64::VmArch;
60

61
#[derive(Debug)]
62
pub(super) struct VmInner {
63
    pub(super) fd: OwnedFd,
64
    pub(super) memfd: Option<OwnedFd>,
65
    pub(super) ioeventfds: Mutex<HashMap<i32, KvmIoEventFd>>,
66
    pub(super) msi_table: RwLock<HashMap<u32, KvmMsiEntryData>>,
67
    pub(super) next_msi_gsi: AtomicU32,
68
    pub(super) pin_map: AtomicU32,
69
    #[cfg(target_arch = "x86_64")]
70
    pub(super) arch: VmArch,
71
}
72

73
impl VmInner {
UNCOV
74
    fn update_routing_table(&self, table: &HashMap<u32, KvmMsiEntryData>) -> Result<(), KvmError> {
UNCOV
75
        let mut entries = [KvmIrqRoutingEntry::default(); MAX_GSI_ROUTES];
UNCOV
76
        let mut index = 0;
UNCOV
77
        let pin_map = self.pin_map.load(Ordering::Acquire);
78
        #[cfg(target_arch = "x86_64")]
79
        let (irqchip, max_pin) = (KVM_IRQCHIP_IOAPIC, 24);
80
        #[cfg(target_arch = "aarch64")]
UNCOV
81
        let (irqchip, max_pin) = (0, 32);
UNCOV
82
        for pin in 0..max_pin {
UNCOV
83
            if pin_map & (1 << pin) == 0 {
UNCOV
84
                continue;
85
            }
UNCOV
86
            entries[index].gsi = pin;
UNCOV
87
            entries[index].type_ = KVM_IRQ_ROUTING_IRQCHIP;
UNCOV
88
            entries[index].routing.irqchip = KvmIrqRoutingIrqchip { irqchip, pin };
UNCOV
89
            index += 1;
90
        }
UNCOV
91
        for (gsi, entry) in table.iter() {
UNCOV
92
            if entry.masked {
UNCOV
93
                continue;
94
            }
UNCOV
95
            entries[index].gsi = *gsi;
UNCOV
96
            entries[index].type_ = KVM_IRQ_ROUTING_MSI;
97
            #[cfg(target_arch = "aarch64")]
98
            {
UNCOV
99
                entries[index].flags = KvmMsiFlag::VALID_DEVID;
100
            }
UNCOV
101
            entries[index].routing.msi = KvmIrqRoutingMsi {
UNCOV
102
                address_hi: entry.addr_hi,
UNCOV
103
                address_lo: entry.addr_lo,
UNCOV
104
                data: entry.data,
UNCOV
105
                #[cfg(target_arch = "aarch64")]
UNCOV
106
                devid: entry.devid,
UNCOV
107
                #[cfg(not(target_arch = "aarch64"))]
UNCOV
108
                devid: 0,
109
            };
UNCOV
110
            index += 1;
111
        }
112
        let irq_routing = KvmIrqRouting {
UNCOV
113
            nr: index as u32,
114
            _flags: 0,
115
            entries,
116
        };
UNCOV
117
        log::trace!("{self}: updating GSI routing table to {irq_routing:#x?}");
UNCOV
118
        unsafe { kvm_set_gsi_routing(&self.fd, &irq_routing) }.context(kvm_error::GsiRouting)?;
UNCOV
119
        Ok(())
120
    }
121

UNCOV
122
    pub fn check_extension(&self, id: KvmCap) -> Result<i32, Error> {
UNCOV
123
        let ret = unsafe { kvm_check_extension(&self.fd, id) };
UNCOV
124
        match ret {
UNCOV
125
            Ok(num) => Ok(num),
126
            Err(_) => error::Capability {
127
                cap: "KVM_CAP_CHECK_EXTENSION_VM",
128
            }
129
            .fail(),
130
        }
131
    }
132
}
133

134
impl Display for VmInner {
UNCOV
135
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
UNCOV
136
        write!(f, "kvm-{}", self.fd.as_raw_fd())
137
    }
138
}
139

140
pub struct KvmVm {
141
    pub(super) vm: Arc<VmInner>,
142
    pub(super) vcpu_mmap_size: usize,
143
    pub(super) memory_created: bool,
144
}
145

146
type MemSlots = (u32, HashMap<(u64, u64), u32>);
147

148
#[derive(Debug)]
149
pub struct KvmMemory {
150
    slots: Mutex<MemSlots>,
151
    vm: Arc<VmInner>,
152
}
153

154
impl KvmMemory {
UNCOV
155
    pub(super) fn new(vm: Arc<VmInner>) -> Self {
156
        KvmMemory {
UNCOV
157
            slots: Mutex::new((0, HashMap::new())),
158
            vm,
159
        }
160
    }
161

UNCOV
162
    fn unmap(&self, slot: u32, gpa: u64, size: u64) -> Result<()> {
UNCOV
163
        let flags = KvmMemFlag::empty();
164
        let region = KvmUserspaceMemoryRegion {
165
            slot,
166
            guest_phys_addr: gpa,
167
            memory_size: 0,
168
            userspace_addr: 0,
169
            flags,
170
        };
UNCOV
171
        unsafe { kvm_set_user_memory_region(&self.vm.fd, &region) }
UNCOV
172
            .context(error::GuestUnmap { gpa, size })?;
UNCOV
173
        log::trace!(
UNCOV
174
            "{}: slot-{slot}: unmapped: {gpa:#018x}, size={size:#x}",
175
            self.vm
176
        );
UNCOV
177
        Ok(())
178
    }
179
}
180

181
impl VmMemory for KvmMemory {
UNCOV
182
    fn mem_map(&self, gpa: u64, size: u64, hva: usize, option: MemMapOption) -> Result<(), Error> {
UNCOV
183
        let mut flags = KvmMemFlag::empty();
UNCOV
184
        if !option.read || !option.exec {
UNCOV
185
            return kvm_error::MmapOption { option }.fail()?;
186
        }
UNCOV
187
        if !option.write {
UNCOV
188
            flags |= KvmMemFlag::READONLY;
189
        }
UNCOV
190
        if option.log_dirty {
UNCOV
191
            flags |= KvmMemFlag::LOG_DIRTY_PAGES;
192
        }
UNCOV
193
        let (slot_id, slots) = &mut *self.slots.lock();
UNCOV
194
        if let Some(memfd) = &self.vm.memfd {
195
            flags |= KvmMemFlag::GUEST_MEMFD;
196
            let region = KvmUserspaceMemoryRegion2 {
197
                slot: *slot_id,
198
                guest_phys_addr: gpa as _,
199
                memory_size: size as _,
200
                userspace_addr: hva as _,
201
                flags,
202
                guest_memfd: memfd.as_raw_fd() as _,
203
                guest_memfd_offset: gpa,
204
                ..Default::default()
205
            };
206
            unsafe { kvm_set_user_memory_region2(&self.vm.fd, &region) }
207
        } else {
208
            let region = KvmUserspaceMemoryRegion {
UNCOV
209
                slot: *slot_id,
UNCOV
210
                guest_phys_addr: gpa as _,
UNCOV
211
                memory_size: size as _,
UNCOV
212
                userspace_addr: hva as _,
213
                flags,
214
            };
UNCOV
215
            unsafe { kvm_set_user_memory_region(&self.vm.fd, &region) }
216
        }
UNCOV
217
        .context(error::GuestMap { hva, gpa, size })?;
UNCOV
218
        slots.insert((gpa, size), *slot_id);
219
        log::trace!(
UNCOV
220
            "{}: slot-{slot_id}: mapped: {gpa:#018x} -> {hva:#018x}, size = {size:#x}",
221
            self.vm
222
        );
223
        *slot_id += 1;
224
        Ok(())
225
    }
226

UNCOV
227
    fn unmap(&self, gpa: u64, size: u64) -> Result<(), Error> {
UNCOV
228
        let (_, slots) = &mut *self.slots.lock();
UNCOV
229
        let Some(slot) = slots.remove(&(gpa, size)) else {
UNCOV
230
            return Err(ErrorKind::NotFound.into()).context(error::GuestUnmap { gpa, size });
231
        };
UNCOV
232
        self.unmap(slot, gpa, size)
233
    }
234

UNCOV
235
    fn register_encrypted_range(&self, range: &[u8]) -> Result<()> {
236
        let region = KvmEncRegion {
UNCOV
237
            addr: range.as_ptr() as u64,
UNCOV
238
            size: range.len() as u64,
239
        };
UNCOV
240
        unsafe { kvm_memory_encrypt_reg_region(&self.vm.fd, &region) }
UNCOV
241
            .context(error::EncryptedRegion)?;
UNCOV
242
        Ok(())
243
    }
244

UNCOV
245
    fn deregister_encrypted_range(&self, range: &[u8]) -> Result<()> {
246
        let region = KvmEncRegion {
UNCOV
247
            addr: range.as_ptr() as u64,
UNCOV
248
            size: range.len() as u64,
249
        };
UNCOV
250
        unsafe { kvm_memory_encrypt_unreg_region(&self.vm.fd, &region) }
UNCOV
251
            .context(error::EncryptedRegion)?;
UNCOV
252
        Ok(())
253
    }
254

UNCOV
255
    fn mark_private_memory(&self, gpa: u64, size: u64, private: bool) -> Result<()> {
256
        let attr = KvmMemoryAttributes {
257
            address: gpa,
258
            size,
UNCOV
259
            attributes: if private {
260
                KvmMemoryAttribute::PRIVATE
261
            } else {
262
                KvmMemoryAttribute::empty()
263
            },
264
            flags: 0,
265
        };
UNCOV
266
        unsafe { kvm_set_memory_attributes(&self.vm.fd, &attr) }.context(error::EncryptedRegion)?;
UNCOV
267
        Ok(())
268
    }
269

UNCOV
270
    fn reset(&self) -> Result<()> {
UNCOV
271
        let (slot_id, slots) = &mut *self.slots.lock();
UNCOV
272
        for ((gpa, size), slot) in slots.drain() {
UNCOV
273
            self.unmap(slot, gpa, size)?;
274
        }
UNCOV
275
        *slot_id = 0;
UNCOV
276
        Ok(())
277
    }
278
}
279

280
#[derive(Debug)]
281
pub struct KvmIrqSender {
282
    pin: u8,
283
    vm: Arc<VmInner>,
284
    event_fd: OwnedFd,
285
}
286

287
impl Drop for KvmIrqSender {
UNCOV
288
    fn drop(&mut self) {
UNCOV
289
        let pin_flag = 1 << (self.pin as u32);
UNCOV
290
        self.vm.pin_map.fetch_and(!pin_flag, Ordering::AcqRel);
291
        let request = KvmIrqfd {
UNCOV
292
            fd: self.event_fd.as_raw_fd() as u32,
UNCOV
293
            gsi: self.pin as u32,
294
            flags: KvmIrqfdFlag::DEASSIGN,
295
            ..Default::default()
296
        };
UNCOV
297
        if let Err(e) = unsafe { kvm_irqfd(&self.vm.fd, &request) } {
UNCOV
298
            log::error!(
299
                "{}: removing irqfd {:#x}: {e}",
300
                self.vm,
UNCOV
301
                self.event_fd.as_raw_fd(),
302
            )
303
        }
304
    }
305
}
306

307
impl IrqSender for KvmIrqSender {
UNCOV
308
    fn send(&self) -> Result<(), Error> {
UNCOV
309
        ffi!(unsafe { write(self.event_fd.as_raw_fd(), &1u64 as *const _ as _, 8) })
UNCOV
310
            .context(error::SendInterrupt)?;
UNCOV
311
        Ok(())
312
    }
313
}
314

315
#[derive(Debug, Default)]
316
pub(crate) struct KvmMsiEntryData {
317
    addr_lo: u32,
318
    addr_hi: u32,
319
    data: u32,
320
    masked: bool,
321
    dirty: bool,
322
    #[cfg(target_arch = "aarch64")]
323
    devid: u32,
324
}
325

326
#[derive(Debug)]
327
pub struct KvmIrqFd {
328
    event_fd: OwnedFd,
329
    vm: Arc<VmInner>,
330
    gsi: u32,
331
}
332

333
impl Drop for KvmIrqFd {
UNCOV
334
    fn drop(&mut self) {
UNCOV
335
        let mut table = self.vm.msi_table.write();
UNCOV
336
        let Some(entry) = table.remove(&self.gsi) else {
UNCOV
337
            log::error!(
UNCOV
338
                "{}: cannot find gsi {:#x} in the gsi table",
339
                self.vm,
340
                self.gsi,
341
            );
UNCOV
342
            return;
343
        };
UNCOV
344
        if entry.masked {
UNCOV
345
            return;
346
        }
UNCOV
347
        if let Err(e) = self.deassign_irqfd() {
UNCOV
348
            log::error!(
349
                "{}: removing irqfd {:#x}: {e}",
350
                self.vm,
UNCOV
351
                self.event_fd.as_raw_fd(),
352
            )
353
        }
354
    }
355
}
356

357
impl AsFd for KvmIrqFd {
UNCOV
358
    fn as_fd(&self) -> BorrowedFd<'_> {
UNCOV
359
        self.event_fd.as_fd()
360
    }
361
}
362

363
impl KvmIrqFd {
UNCOV
364
    fn assign_irqfd(&self) -> Result<()> {
365
        let request = KvmIrqfd {
UNCOV
366
            fd: self.event_fd.as_raw_fd() as u32,
UNCOV
367
            gsi: self.gsi,
368
            ..Default::default()
369
        };
UNCOV
370
        unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::IrqFd)?;
UNCOV
371
        log::debug!(
UNCOV
372
            "{}: assigned: gsi {:#x} -> irqfd {:#x}",
373
            self.vm,
374
            self.gsi,
UNCOV
375
            self.event_fd.as_raw_fd()
376
        );
UNCOV
377
        Ok(())
378
    }
379

UNCOV
380
    fn deassign_irqfd(&self) -> Result<()> {
381
        let request = KvmIrqfd {
UNCOV
382
            fd: self.event_fd.as_raw_fd() as u32,
UNCOV
383
            gsi: self.gsi,
384
            flags: KvmIrqfdFlag::DEASSIGN,
385
            ..Default::default()
386
        };
UNCOV
387
        unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::IrqFd)?;
UNCOV
388
        log::debug!(
UNCOV
389
            "{}: de-assigned: gsi {:#x} -> irqfd {:#x}",
390
            self.vm,
391
            self.gsi,
UNCOV
392
            self.event_fd.as_raw_fd()
393
        );
UNCOV
394
        Ok(())
395
    }
396
}
397

398
macro_rules! impl_irqfd_method {
399
    ($field:ident, $get:ident, $set:ident) => {
UNCOV
400
        fn $get(&self) -> u32 {
UNCOV
401
            let table = self.vm.msi_table.read();
UNCOV
402
            let Some(entry) = table.get(&self.gsi) else {
UNCOV
403
                unreachable!("cannot find gsi {}", self.gsi);
404
            };
UNCOV
405
            entry.$field
406
        }
UNCOV
407
        fn $set(&self, val: u32) -> Result<()> {
UNCOV
408
            let mut table = self.vm.msi_table.write();
UNCOV
409
            let Some(entry) = table.get_mut(&self.gsi) else {
UNCOV
410
                unreachable!("cannot find gsi {}", self.gsi);
411
            };
UNCOV
412
            if entry.$field == val {
UNCOV
413
                return Ok(());
414
            }
UNCOV
415
            entry.$field = val;
416

UNCOV
417
            if !entry.masked {
UNCOV
418
                self.vm.update_routing_table(&table)?;
419
            } else {
UNCOV
420
                entry.dirty = true;
421
            }
UNCOV
422
            Ok(())
423
        }
424
    };
425
}
426

427
impl IrqFd for KvmIrqFd {
428
    impl_irqfd_method!(addr_lo, get_addr_lo, set_addr_lo);
429

430
    impl_irqfd_method!(addr_hi, get_addr_hi, set_addr_hi);
431

432
    impl_irqfd_method!(data, get_data, set_data);
433

UNCOV
434
    fn get_masked(&self) -> bool {
UNCOV
435
        let table = self.vm.msi_table.read();
436
        let Some(entry) = table.get(&self.gsi) else {
437
            unreachable!("{}: cannot find gsi {:#x}", self.vm, self.gsi);
438
        };
UNCOV
439
        entry.masked
440
    }
441

UNCOV
442
    fn set_masked(&self, val: bool) -> Result<bool> {
UNCOV
443
        let mut table = self.vm.msi_table.write();
444
        let Some(entry) = table.get_mut(&self.gsi) else {
445
            unreachable!("{}: cannot find gsi {:#x}", self.vm, self.gsi);
446
        };
UNCOV
447
        if entry.masked == val {
UNCOV
448
            return Ok(false);
449
        }
UNCOV
450
        entry.masked = val;
UNCOV
451
        if !val {
UNCOV
452
            if entry.dirty {
UNCOV
453
                self.vm.update_routing_table(&table)?;
454
            }
UNCOV
455
            self.assign_irqfd()?;
456
        } else {
UNCOV
457
            self.deassign_irqfd()?;
458
        }
UNCOV
459
        Ok(true)
460
    }
461
}
462

463
const MAX_GSI_ROUTES: usize = 256;
464

465
#[derive(Debug)]
466
pub struct KvmMsiSender {
467
    vm: Arc<VmInner>,
468
    #[cfg(target_arch = "aarch64")]
469
    devid: u32,
470
}
471

472
impl MsiSender for KvmMsiSender {
473
    type IrqFd = KvmIrqFd;
474

UNCOV
475
    fn send(&self, addr: u64, data: u32) -> Result<()> {
476
        let kvm_msi = KvmMsi {
UNCOV
477
            address_lo: addr as u32,
UNCOV
478
            address_hi: (addr >> 32) as u32,
479
            data,
480
            #[cfg(target_arch = "aarch64")]
UNCOV
481
            devid: self.devid,
482
            #[cfg(target_arch = "aarch64")]
483
            flags: KvmMsiFlag::VALID_DEVID,
484
            ..Default::default()
485
        };
UNCOV
486
        unsafe { kvm_signal_msi(&self.vm.fd, &kvm_msi) }.context(error::SendInterrupt)?;
UNCOV
487
        Ok(())
488
    }
489

UNCOV
490
    fn create_irqfd(&self) -> Result<Self::IrqFd> {
491
        let event_fd = unsafe {
492
            OwnedFd::from_raw_fd(
UNCOV
493
                ffi!(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK)).context(error::IrqFd)?,
494
            )
495
        };
UNCOV
496
        let mut table = self.vm.msi_table.write();
UNCOV
497
        let mut allocated_gsi = None;
UNCOV
498
        for _ in 0..(MAX_GSI_ROUTES - 24) {
UNCOV
499
            let gsi = self.vm.next_msi_gsi.fetch_add(1, Ordering::AcqRel)
UNCOV
500
                % (MAX_GSI_ROUTES as u32 - 24)
UNCOV
501
                + 24;
502
            let new_entry = KvmMsiEntryData {
503
                #[cfg(target_arch = "aarch64")]
UNCOV
504
                devid: self.devid,
505
                masked: true,
506
                ..Default::default()
507
            };
UNCOV
508
            if let Some(e) = table.insert(gsi, new_entry) {
UNCOV
509
                table.insert(gsi, e);
510
            } else {
UNCOV
511
                allocated_gsi = Some(gsi);
UNCOV
512
                break;
513
            }
514
        }
UNCOV
515
        let Some(gsi) = allocated_gsi else {
UNCOV
516
            return kvm_error::AllocateGsi.fail()?;
517
        };
UNCOV
518
        log::debug!(
UNCOV
519
            "{}: allocated: gsi {gsi:#x} -> irqfd {:#x}",
520
            self.vm,
UNCOV
521
            event_fd.as_raw_fd()
522
        );
523
        let entry = KvmIrqFd {
UNCOV
524
            vm: self.vm.clone(),
525
            event_fd,
526
            gsi,
527
        };
UNCOV
528
        Ok(entry)
529
    }
530
}
531

532
#[derive(Debug)]
533
pub struct KvmIoeventFd {
534
    fd: OwnedFd,
535
}
536

537
impl AsFd for KvmIoeventFd {
UNCOV
538
    fn as_fd(&self) -> BorrowedFd<'_> {
UNCOV
539
        self.fd.as_fd()
540
    }
541
}
542

543
impl IoeventFd for KvmIoeventFd {}
544

545
#[derive(Debug)]
546
pub struct KvmIoeventFdRegistry {
547
    vm: Arc<VmInner>,
548
}
549

550
impl IoeventFdRegistry for KvmIoeventFdRegistry {
551
    type IoeventFd = KvmIoeventFd;
552

UNCOV
553
    fn create(&self) -> Result<Self::IoeventFd> {
UNCOV
554
        let fd =
UNCOV
555
            ffi!(unsafe { eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK) }).context(error::IoeventFd)?;
UNCOV
556
        Ok(KvmIoeventFd {
UNCOV
557
            fd: unsafe { OwnedFd::from_raw_fd(fd) },
558
        })
559
    }
560

UNCOV
561
    fn register(&self, fd: &Self::IoeventFd, gpa: u64, len: u8, data: Option<u64>) -> Result<()> {
562
        let mut request = KvmIoEventFd {
563
            addr: gpa,
UNCOV
564
            len: len as u32,
UNCOV
565
            fd: fd.as_fd().as_raw_fd(),
566
            ..Default::default()
567
        };
UNCOV
568
        if let Some(data) = data {
UNCOV
569
            request.datamatch = data;
UNCOV
570
            request.flags |= KvmIoEventFdFlag::DATA_MATCH;
571
        }
UNCOV
572
        unsafe { kvm_ioeventfd(&self.vm.fd, &request) }.context(error::IoeventFd)?;
UNCOV
573
        let mut fds = self.vm.ioeventfds.lock();
UNCOV
574
        fds.insert(request.fd, request);
UNCOV
575
        Ok(())
576
    }
577

578
    #[cfg(target_arch = "x86_64")]
579
    fn register_port(
580
        &self,
581
        _fd: &Self::IoeventFd,
582
        _port: u16,
583
        _len: u8,
584
        _data: Option<u64>,
585
    ) -> Result<()> {
586
        unimplemented!()
587
    }
588

UNCOV
589
    fn deregister(&self, fd: &Self::IoeventFd) -> Result<()> {
UNCOV
590
        let mut fds = self.vm.ioeventfds.lock();
UNCOV
591
        if let Some(mut request) = fds.remove(&fd.as_fd().as_raw_fd()) {
UNCOV
592
            request.flags |= KvmIoEventFdFlag::DEASSIGN;
UNCOV
593
            unsafe { kvm_ioeventfd(&self.vm.fd, &request) }.context(error::IoeventFd)?;
594
        }
UNCOV
595
        Ok(())
596
    }
597
}
598

599
impl Vm for KvmVm {
600
    #[cfg(target_arch = "aarch64")]
601
    type GicV2 = aarch64::KvmGicV2;
602
    #[cfg(target_arch = "aarch64")]
603
    type GicV2m = aarch64::KvmGicV2m;
604
    #[cfg(target_arch = "aarch64")]
605
    type GicV3 = aarch64::KvmGicV3;
606
    type IoeventFdRegistry = KvmIoeventFdRegistry;
607
    type IrqSender = KvmIrqSender;
608
    #[cfg(target_arch = "aarch64")]
609
    type Its = aarch64::KvmIts;
610
    type Memory = KvmMemory;
611
    type MsiSender = KvmMsiSender;
612
    type Vcpu = KvmVcpu;
613

UNCOV
614
    fn create_vcpu(&self, id: u32) -> Result<Self::Vcpu, Error> {
UNCOV
615
        let vcpu_fd = unsafe { kvm_create_vcpu(&self.vm.fd, id) }.context(error::CreateVcpu)?;
UNCOV
616
        let kvm_run = unsafe { KvmRunBlock::new(vcpu_fd, self.vcpu_mmap_size) }?;
UNCOV
617
        Ok(KvmVcpu {
UNCOV
618
            fd: unsafe { OwnedFd::from_raw_fd(vcpu_fd) },
UNCOV
619
            kvm_run,
UNCOV
620
            vm: self.vm.clone(),
UNCOV
621
            io_index: 0,
622
        })
623
    }
624

NEW
625
    fn stop_vcpu<T>(&self, _id: u32, handle: &JoinHandle<T>) -> Result<(), Error> {
×
626
        ffi!(unsafe { libc::pthread_kill(handle.as_pthread_t() as _, SIGRTMIN()) })
×
627
            .context(error::StopVcpu)?;
×
628
        Ok(())
×
629
    }
630

UNCOV
631
    fn create_vm_memory(&mut self) -> Result<Self::Memory, Error> {
UNCOV
632
        if self.memory_created {
UNCOV
633
            error::MemoryCreated.fail()
634
        } else {
UNCOV
635
            let kvm_memory = KvmMemory::new(self.vm.clone());
636
            self.memory_created = true;
637
            Ok(kvm_memory)
638
        }
639
    }
640

UNCOV
641
    fn create_irq_sender(&self, pin: u8) -> Result<Self::IrqSender, Error> {
UNCOV
642
        let pin_flag = 1 << pin;
UNCOV
643
        if self.vm.pin_map.fetch_or(pin_flag, Ordering::AcqRel) & pin_flag == pin_flag {
UNCOV
644
            return Err(std::io::ErrorKind::AlreadyExists.into()).context(error::CreateIrq { pin });
645
        }
UNCOV
646
        if self.vm.check_extension(KvmCap::IRQFD)? == 0 {
UNCOV
647
            return error::Capability {
UNCOV
648
                cap: "KVM_CAP_IRQFD",
649
            }
UNCOV
650
            .fail();
651
        }
UNCOV
652
        let event_fd = ffi!(unsafe { eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK) })
UNCOV
653
            .context(error::CreateIrq { pin })?;
654
        let request = KvmIrqfd {
UNCOV
655
            fd: event_fd as u32,
UNCOV
656
            gsi: pin as u32,
657
            ..Default::default()
658
        };
UNCOV
659
        self.vm.update_routing_table(&self.vm.msi_table.read())?;
UNCOV
660
        unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::CreateIrq { pin })?;
UNCOV
661
        Ok(KvmIrqSender {
UNCOV
662
            pin,
UNCOV
663
            vm: self.vm.clone(),
UNCOV
664
            event_fd: unsafe { OwnedFd::from_raw_fd(event_fd) },
665
        })
666
    }
667

UNCOV
668
    fn create_msi_sender(
669
        &self,
670
        #[cfg(target_arch = "aarch64")] devid: u32,
671
    ) -> Result<Self::MsiSender> {
UNCOV
672
        if self.vm.check_extension(KvmCap::SIGNAL_MSI)? == 0 {
UNCOV
673
            return error::Capability {
UNCOV
674
                cap: "KVM_CAP_SIGNAL_MSI",
675
            }
UNCOV
676
            .fail();
677
        }
UNCOV
678
        Ok(KvmMsiSender {
UNCOV
679
            vm: self.vm.clone(),
UNCOV
680
            #[cfg(target_arch = "aarch64")]
UNCOV
681
            devid,
682
        })
683
    }
684

UNCOV
685
    fn create_ioeventfd_registry(&self) -> Result<Self::IoeventFdRegistry> {
UNCOV
686
        Ok(KvmIoeventFdRegistry {
UNCOV
687
            vm: self.vm.clone(),
688
        })
689
    }
690

691
    #[cfg(target_arch = "x86_64")]
692
    fn sev_launch_start(&self, policy: u32) -> Result<(), Error> {
693
        self.kvm_sev_launch_start(policy)
694
    }
695

696
    #[cfg(target_arch = "x86_64")]
697
    fn sev_launch_update_data(&self, range: &mut [u8]) -> Result<(), Error> {
698
        self.kvm_sev_launch_update_data(range)
699
    }
700

701
    #[cfg(target_arch = "x86_64")]
702
    fn sev_launch_update_vmsa(&self) -> Result<(), Error> {
703
        self.kvm_sev_launch_update_vmsa()
704
    }
705

706
    #[cfg(target_arch = "x86_64")]
707
    fn sev_launch_measure(&self) -> Result<Vec<u8>, Error> {
708
        self.kvm_sev_launch_measure()
709
    }
710

711
    #[cfg(target_arch = "x86_64")]
712
    fn sev_launch_finish(&self) -> Result<(), Error> {
713
        self.kvm_sev_launch_finish()
714
    }
715

716
    #[cfg(target_arch = "x86_64")]
717
    fn snp_launch_start(&self, policy: SnpPolicy) -> Result<()> {
718
        self.kvm_snp_launch_start(policy)
719
    }
720

721
    #[cfg(target_arch = "x86_64")]
722
    fn snp_launch_update(&self, range: &mut [u8], gpa: u64, type_: SnpPageType) -> Result<()> {
723
        self.kvm_snp_launch_update(range, gpa, type_)
724
    }
725

726
    #[cfg(target_arch = "x86_64")]
727
    fn snp_launch_finish(&self) -> Result<()> {
728
        self.kvm_snp_launch_finish()
729
    }
730

731
    #[cfg(target_arch = "aarch64")]
UNCOV
732
    fn create_gic_v2(&self, distributor_base: u64, cpu_interface_base: u64) -> Result<Self::GicV2> {
UNCOV
733
        self.kvm_create_gic_v2(distributor_base, cpu_interface_base)
734
    }
735

736
    #[cfg(target_arch = "aarch64")]
NEW
737
    fn create_gic_v2m(&self, _base: u64) -> Result<Self::GicV2m> {
NEW
738
        Err(std::io::ErrorKind::Unsupported.into()).context(error::CreateDevice)
739
    }
740

741
    #[cfg(target_arch = "aarch64")]
UNCOV
742
    fn create_gic_v3(
743
        &self,
744
        distributor_base: u64,
745
        redistributor_base: u64,
746
        redistributor_count: u32,
747
    ) -> Result<Self::GicV3> {
UNCOV
748
        self.kvm_create_gic_v3(distributor_base, redistributor_base, redistributor_count)
749
    }
750

751
    #[cfg(target_arch = "aarch64")]
UNCOV
752
    fn create_its(&self, base: u64) -> Result<Self::Its> {
UNCOV
753
        self.kvm_create_its(base)
754
    }
755
}
756

757
#[cfg(test)]
758
mod test {
759
    use std::ptr::null_mut;
760

761
    use assert_matches::assert_matches;
762
    use libc::{MAP_ANONYMOUS, MAP_FAILED, MAP_PRIVATE, PROT_EXEC, PROT_READ, PROT_WRITE, mmap};
763

764
    use super::*;
765
    use crate::ffi;
766
    use crate::hv::kvm::KvmConfig;
767
    use crate::hv::{Hypervisor, Kvm, MemMapOption, VmConfig};
768

769
    #[test]
770
    #[cfg_attr(not(feature = "test-hv"), ignore)]
771
    fn test_mem_map() {
772
        let kvm = Kvm::new(KvmConfig::default()).unwrap();
773
        let vm_config = VmConfig { coco: None };
774
        let mut vm = kvm.create_vm(&vm_config).unwrap();
775
        let vm_memory = vm.create_vm_memory().unwrap();
776

777
        let prot = PROT_WRITE | PROT_READ | PROT_EXEC;
778
        let flag = MAP_ANONYMOUS | MAP_PRIVATE;
779
        let user_mem = ffi!(
780
            unsafe { mmap(null_mut(), 0x1000, prot, flag, -1, 0,) },
781
            MAP_FAILED
782
        )
783
        .unwrap();
784
        let option_no_write = MemMapOption {
785
            read: false,
786
            write: true,
787
            exec: true,
788
            log_dirty: true,
789
        };
790
        assert_matches!(
791
            vm_memory.mem_map(0x0, 0x1000, user_mem as usize, option_no_write),
792
            Err(Error::KvmErr { .. })
793
        );
794
        let option_no_exec = MemMapOption {
795
            read: false,
796
            write: true,
797
            exec: true,
798
            log_dirty: true,
799
        };
800
        assert_matches!(
801
            vm_memory.mem_map(0x0, 0x1000, user_mem as usize, option_no_exec),
802
            Err(Error::KvmErr { .. })
803
        );
804
        let option = MemMapOption {
805
            read: true,
806
            write: false,
807
            exec: true,
808
            log_dirty: true,
809
        };
810
        vm_memory
811
            .mem_map(0x0, 0x1000, user_mem as usize, option)
812
            .unwrap();
813
    }
814
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc