• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 17385983897

01 Sep 2025 07:41PM UTC coverage: 18.009% (-0.1%) from 18.149%
17385983897

Pull #281

github

web-flow
Merge 782c57b11 into 6ec9a6d6b
Pull Request #281: Port to Apple Hypervisor framework

0 of 154 new or added lines in 11 files covered. (0.0%)

1166 existing lines in 29 files now uncovered.

1362 of 7563 relevant lines covered (18.01%)

18.77 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/alioth/src/board/board.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
#[cfg(target_arch = "aarch64")]
16
mod aarch64;
17
#[cfg(target_arch = "x86_64")]
18
mod x86_64;
19

20
#[cfg(target_os = "linux")]
21
use std::collections::HashMap;
22
use std::ffi::CStr;
23
use std::sync::Arc;
24
use std::sync::mpsc::{Receiver, Sender};
25
use std::thread::JoinHandle;
26

27
use libc::{MAP_PRIVATE, MAP_SHARED};
28
use parking_lot::{Condvar, Mutex, RwLock, RwLockReadGuard};
29
use snafu::{ResultExt, Snafu};
30

31
#[cfg(target_arch = "x86_64")]
32
use crate::arch::layout::PORT_PCI_ADDRESS;
33
use crate::arch::layout::{
34
    MEM_64_START, PCIE_CONFIG_START, PCIE_MMIO_32_NON_PREFETCHABLE_END,
35
    PCIE_MMIO_32_NON_PREFETCHABLE_START, PCIE_MMIO_32_PREFETCHABLE_END,
36
    PCIE_MMIO_32_PREFETCHABLE_START, RAM_32_SIZE,
37
};
38
#[cfg(target_arch = "x86_64")]
39
use crate::device::fw_cfg::FwCfg;
40
use crate::errors::{DebugTrace, trace_error};
41
use crate::hv::{Coco, Vcpu, Vm, VmEntry, VmExit};
42
#[cfg(target_arch = "x86_64")]
43
use crate::loader::xen;
44
use crate::loader::{ExecType, InitState, Payload, firmware, linux};
45
use crate::mem::emulated::Mmio;
46
use crate::mem::mapped::ArcMemPages;
47
use crate::mem::{MemBackend, MemConfig, MemRegion, MemRegionType, Memory};
48
use crate::pci::Bdf;
49
use crate::pci::bus::PciBus;
50
#[cfg(target_os = "linux")]
51
use crate::vfio::container::Container;
52
#[cfg(target_os = "linux")]
53
use crate::vfio::iommu::Ioas;
54

55
#[cfg(target_arch = "aarch64")]
56
pub(crate) use self::aarch64::ArchBoard;
57
#[cfg(target_arch = "x86_64")]
58
pub(crate) use self::x86_64::ArchBoard;
59

60
#[trace_error]
61
#[derive(Snafu, DebugTrace)]
62
#[snafu(module, context(suffix(false)))]
63
pub enum Error {
64
    #[snafu(display("Hypervisor internal error"), context(false))]
65
    HvError { source: Box<crate::hv::Error> },
66
    #[snafu(display("Failed to access guest memory"), context(false))]
67
    Memory { source: Box<crate::mem::Error> },
68
    #[snafu(display("Failed to load payload"), context(false))]
69
    Loader { source: Box<crate::loader::Error> },
70
    #[snafu(display("Failed to create VCPU-{id}"))]
71
    CreateVcpu {
72
        id: u32,
73
        source: Box<crate::hv::Error>,
74
    },
75
    #[snafu(display("Failed to run VCPU-{id}"))]
76
    RunVcpu {
77
        id: u32,
78
        source: Box<crate::hv::Error>,
79
    },
80
    #[snafu(display("Failed to stop VCPU-{id}"))]
81
    StopVcpu {
82
        id: u32,
83
        source: Box<crate::hv::Error>,
84
    },
85
    #[snafu(display("Failed to reset PCI {bdf}"))]
86
    ResetPci {
87
        bdf: Bdf,
88
        source: Box<crate::pci::Error>,
89
    },
90
    #[snafu(display("Failed to configure firmware"))]
91
    Firmware { error: std::io::Error },
92
    #[snafu(display("Failed to notify the VMM thread"))]
93
    NotifyVmm,
94
    #[snafu(display("Another VCPU thread has signaled failure"))]
95
    PeerFailure,
96
}
97

98
type Result<T, E = Error> = std::result::Result<T, E>;
99

100
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
101
enum BoardState {
102
    Created,
103
    Running,
104
    Shutdown,
105
    RebootPending,
106
}
107

108
#[derive(Debug)]
109
struct MpSync {
110
    state: BoardState,
111
    fatal: bool,
112
    count: u32,
113
}
114

115
pub const PCIE_MMIO_64_SIZE: u64 = 1 << 40;
116

117
pub struct BoardConfig {
118
    pub mem: MemConfig,
119
    pub num_cpu: u32,
120
    pub coco: Option<Coco>,
121
}
122

123
impl BoardConfig {
124
    pub fn pcie_mmio_64_start(&self) -> u64 {
×
125
        (self.mem.size.saturating_sub(RAM_32_SIZE) + MEM_64_START).next_power_of_two()
×
126
    }
127
}
128

129
type VcpuGuard<'a> = RwLockReadGuard<'a, Vec<(JoinHandle<Result<()>>, Sender<()>)>>;
130
type VcpuHandle = (JoinHandle<Result<()>>, Sender<()>);
131

132
pub struct Board<V>
133
where
134
    V: Vm,
135
{
136
    pub vm: V,
137
    pub memory: Memory,
138
    pub vcpus: Arc<RwLock<Vec<VcpuHandle>>>,
139
    pub arch: ArchBoard<V>,
140
    pub config: BoardConfig,
141
    pub payload: RwLock<Option<Payload>>,
142
    pub io_devs: RwLock<Vec<(u16, Arc<dyn Mmio>)>>,
143
    #[cfg(target_arch = "aarch64")]
144
    pub mmio_devs: RwLock<Vec<(u64, Arc<MemRegion>)>>,
145
    pub pci_bus: PciBus,
146
    #[cfg(target_arch = "x86_64")]
147
    pub fw_cfg: Mutex<Option<Arc<Mutex<FwCfg>>>>,
148
    #[cfg(target_os = "linux")]
149
    pub vfio_ioases: Mutex<HashMap<Box<str>, Arc<Ioas>>>,
150
    #[cfg(target_os = "linux")]
151
    pub vfio_containers: Mutex<HashMap<Box<str>, Arc<Container>>>,
152

153
    mp_sync: Mutex<MpSync>,
154
    cond_var: Condvar,
155
}
156

157
impl<V> Board<V>
158
where
159
    V: Vm,
160
{
161
    pub fn new(vm: V, memory: Memory, arch: ArchBoard<V>, config: BoardConfig) -> Self {
×
162
        Board {
163
            vm,
164
            memory,
165
            arch,
166
            config,
167
            payload: RwLock::new(None),
×
168
            vcpus: Arc::new(RwLock::new(Vec::new())),
×
169
            io_devs: RwLock::new(Vec::new()),
×
170
            #[cfg(target_arch = "aarch64")]
171
            mmio_devs: RwLock::new(Vec::new()),
×
172
            pci_bus: PciBus::new(),
×
173
            #[cfg(target_arch = "x86_64")]
174
            fw_cfg: Mutex::new(None),
×
175
            #[cfg(target_os = "linux")]
176
            vfio_ioases: Mutex::new(HashMap::new()),
×
177
            #[cfg(target_os = "linux")]
178
            vfio_containers: Mutex::new(HashMap::new()),
×
179

180
            mp_sync: Mutex::new(MpSync {
×
181
                state: BoardState::Created,
182
                count: 0,
183
                fatal: false,
184
            }),
185
            cond_var: Condvar::new(),
×
186
        }
187
    }
188

189
    pub fn boot(&self) -> Result<()> {
×
190
        let vcpus = self.vcpus.read();
×
191
        let mut mp_sync = self.mp_sync.lock();
×
192
        mp_sync.state = BoardState::Running;
×
193
        for (_, boot_tx) in vcpus.iter() {
×
194
            boot_tx.send(()).unwrap();
×
195
        }
196
        Ok(())
×
197
    }
198

199
    fn load_payload(&self) -> Result<InitState, Error> {
×
200
        let payload = self.payload.read();
×
201
        let Some(payload) = payload.as_ref() else {
×
202
            return Ok(InitState::default());
×
203
        };
204
        let mem_regions = self.memory.mem_region_entries();
×
205
        let init_state = match payload.exec_type {
×
206
            ExecType::Linux => linux::load(
207
                &self.memory.ram_bus(),
×
208
                &mem_regions,
×
209
                &payload.executable,
×
210
                payload.cmd_line.as_deref(),
×
211
                payload.initramfs.as_ref(),
×
212
            )?,
213
            #[cfg(target_arch = "x86_64")]
214
            ExecType::Pvh => xen::load(
215
                &self.memory.ram_bus(),
×
216
                &mem_regions,
×
217
                &payload.executable,
×
218
                payload.cmd_line.as_deref(),
×
219
                payload.initramfs.as_ref(),
×
220
            )?,
221
            ExecType::Firmware => {
×
222
                let (init_state, mut rom) = firmware::load(&self.memory, &payload.executable)?;
×
223
                self.setup_firmware(&mut rom)?;
×
224
                init_state
×
225
            }
226
        };
227
        Ok(init_state)
×
228
    }
229

230
    fn add_pci_devs(&self) -> Result<()> {
×
231
        #[cfg(target_arch = "x86_64")]
×
232
        self.memory
×
233
            .add_io_dev(PORT_PCI_ADDRESS, self.pci_bus.io_bus.clone())?;
×
234
        self.memory.add_region(
×
235
            PCIE_CONFIG_START,
×
236
            Arc::new(MemRegion::with_emulated(
×
237
                self.pci_bus.segment.clone(),
×
238
                MemRegionType::Reserved,
×
239
            )),
240
        )?;
241
        let pcie_mmio_64_start = self.config.pcie_mmio_64_start();
×
242
        self.pci_bus.assign_resources(&[
×
243
            (0x1000, 0x10000),
×
244
            (
245
                PCIE_MMIO_32_NON_PREFETCHABLE_START,
×
246
                PCIE_MMIO_32_NON_PREFETCHABLE_END,
×
247
            ),
248
            (
249
                PCIE_MMIO_32_PREFETCHABLE_START,
×
250
                PCIE_MMIO_32_PREFETCHABLE_END,
×
251
            ),
252
            (pcie_mmio_64_start, pcie_mmio_64_start + PCIE_MMIO_64_SIZE),
×
253
        ]);
254
        Ok(())
×
255
    }
256

257
    fn vcpu_loop(&self, vcpu: &mut <V as Vm>::Vcpu, id: u32) -> Result<bool, Error> {
×
258
        let mut vm_entry = VmEntry::None;
×
259
        loop {
×
260
            let vm_exit = vcpu.run(vm_entry).context(error::RunVcpu { id })?;
×
261
            vm_entry = match vm_exit {
×
262
                VmExit::Io { port, write, size } => self.memory.handle_io(port, write, size)?,
×
263
                VmExit::Mmio { addr, write, size } => self.memory.handle_mmio(addr, write, size)?,
×
264
                VmExit::Shutdown => {
×
265
                    log::info!("vcpu {id} requested shutdown");
×
266
                    break Ok(false);
×
267
                }
268
                VmExit::Reboot => {
×
269
                    break Ok(true);
×
270
                }
271
                VmExit::Interrupted => {
×
272
                    let mp_sync = self.mp_sync.lock();
×
273
                    match mp_sync.state {
×
274
                        BoardState::Shutdown => VmEntry::Shutdown,
×
275
                        BoardState::RebootPending => VmEntry::Reboot,
×
276
                        _ => VmEntry::None,
×
277
                    }
278
                }
279
                VmExit::ConvertMemory { gpa, size, private } => {
×
280
                    self.memory.mark_private_memory(gpa, size, private)?;
×
281
                    VmEntry::None
×
282
                }
283
            };
284
        }
285
    }
286

287
    fn sync_vcpus(&self, vcpus: &VcpuGuard) -> Result<()> {
×
288
        let mut mp_sync = self.mp_sync.lock();
×
289
        if mp_sync.fatal {
×
290
            return error::PeerFailure.fail();
×
291
        }
292

293
        mp_sync.count += 1;
×
294
        if mp_sync.count == vcpus.len() as u32 {
×
295
            mp_sync.count = 0;
×
296
            self.cond_var.notify_all();
×
297
        } else {
298
            self.cond_var.wait(&mut mp_sync)
×
299
        }
300

301
        if mp_sync.fatal {
×
302
            return error::PeerFailure.fail();
×
303
        }
304

305
        Ok(())
×
306
    }
307

308
    fn run_vcpu_inner(
×
309
        &self,
310
        id: u32,
311
        vcpu: &mut V::Vcpu,
312
        boot_rx: &Receiver<()>,
313
    ) -> Result<(), Error> {
314
        self.init_vcpu(id, vcpu)?;
×
315
        boot_rx.recv().unwrap();
×
316
        if self.mp_sync.lock().state != BoardState::Running {
×
317
            return Ok(());
×
318
        }
319
        loop {
×
320
            let vcpus = self.vcpus.read();
×
321
            self.coco_init(id)?;
×
322
            if id == 0 {
×
323
                self.create_ram()?;
×
324
                for (port, dev) in self.io_devs.read().iter() {
×
325
                    self.memory.add_io_dev(*port, dev.clone())?;
×
326
                }
327
                #[cfg(target_arch = "aarch64")]
328
                for (addr, dev) in self.mmio_devs.read().iter() {
×
329
                    self.memory.add_region(*addr, dev.clone())?;
×
330
                }
331
                self.add_pci_devs()?;
×
332
                let init_state = self.load_payload()?;
×
333
                self.init_boot_vcpu(vcpu, &init_state)?;
×
334
                self.create_firmware_data(&init_state)?;
×
335
            }
336
            self.init_ap(id, vcpu, &vcpus)?;
×
337
            self.coco_finalize(id, &vcpus)?;
×
338
            self.sync_vcpus(&vcpus)?;
×
339
            drop(vcpus);
×
340

341
            let maybe_reboot = self.vcpu_loop(vcpu, id);
×
342

343
            let vcpus = self.vcpus.read();
×
344
            let mut mp_sync = self.mp_sync.lock();
×
345
            if mp_sync.state == BoardState::Running {
×
346
                mp_sync.state = if matches!(maybe_reboot, Ok(true)) {
×
347
                    BoardState::RebootPending
×
348
                } else {
349
                    BoardState::Shutdown
×
350
                };
351
                for (vcpu_id, (handle, _)) in vcpus.iter().enumerate() {
×
352
                    if id != vcpu_id as u32 {
×
353
                        log::info!("VCPU-{id}: stopping VCPU-{vcpu_id}");
×
NEW
354
                        self.vm
×
NEW
355
                            .stop_vcpu(vcpu_id as u32, handle)
×
NEW
356
                            .context(error::StopVcpu { id })?;
×
357
                    }
358
                }
359
            }
360
            drop(mp_sync);
×
361
            self.sync_vcpus(&vcpus)?;
×
362

363
            if id == 0 {
×
364
                let devices = self.pci_bus.segment.devices.read();
×
365
                for (bdf, dev) in devices.iter() {
×
366
                    dev.dev.reset().context(error::ResetPci { bdf: *bdf })?;
×
367
                    dev.dev.config().reset();
×
368
                }
369
                self.memory.reset()?;
×
370
            }
371
            self.reset_vcpu(id, vcpu)?;
×
372

373
            if let Err(e) = maybe_reboot {
×
374
                break Err(e);
×
375
            }
376

377
            let mut mp_sync = self.mp_sync.lock();
×
378
            if mp_sync.state == BoardState::Shutdown {
×
379
                break Ok(());
×
380
            }
381
            mp_sync.state = BoardState::Running;
×
382
        }
383
    }
384

385
    fn create_vcpu(&self, id: u32, event_tx: &Sender<u32>) -> Result<V::Vcpu> {
×
386
        let vcpu = self.vm.create_vcpu(id).context(error::CreateVcpu { id })?;
×
387
        if event_tx.send(id).is_err() {
×
388
            error::NotifyVmm.fail()
×
389
        } else {
390
            Ok(vcpu)
×
391
        }
392
    }
393

394
    pub fn run_vcpu(
×
395
        &self,
396
        id: u32,
397
        event_tx: Sender<u32>,
398
        boot_rx: Receiver<()>,
399
    ) -> Result<(), Error> {
400
        let mut vcpu = self.create_vcpu(id, &event_tx)?;
×
401

402
        let ret = self.run_vcpu_inner(id, &mut vcpu, &boot_rx);
×
403
        event_tx.send(id).unwrap();
×
404

405
        if matches!(ret, Ok(_) | Err(Error::PeerFailure { .. })) {
×
406
            return Ok(());
×
407
        }
408

409
        log::warn!("VCPU-{id} reported error, unblocking other VCPUs...");
×
410
        let mut mp_sync = self.mp_sync.lock();
×
411
        mp_sync.fatal = true;
×
412
        if mp_sync.count > 0 {
×
413
            self.cond_var.notify_all();
×
414
        }
415
        ret
×
416
    }
417

418
    fn create_ram_pages(
×
419
        &self,
420
        size: u64,
421
        #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] name: &CStr,
422
    ) -> Result<ArcMemPages> {
423
        let mmap_flag = if self.config.mem.shared {
×
424
            Some(MAP_SHARED)
×
425
        } else {
426
            Some(MAP_PRIVATE)
×
427
        };
428
        let pages = match self.config.mem.backend {
×
429
            #[cfg(target_os = "linux")]
430
            MemBackend::Memfd => ArcMemPages::from_memfd(name, size as usize, None),
×
431
            MemBackend::Anonymous => ArcMemPages::from_anonymous(size as usize, None, mmap_flag),
×
432
        }?;
433
        #[cfg(target_os = "linux")]
×
434
        if self.config.mem.transparent_hugepage {
×
435
            pages.madvise_hugepage()?;
×
436
        }
437
        Ok(pages)
×
438
    }
439
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc