• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 17385983897

01 Sep 2025 07:41PM UTC coverage: 18.009% (-0.1%) from 18.149%
17385983897

Pull #281

github

web-flow
Merge 782c57b11 into 6ec9a6d6b
Pull Request #281: Port to Apple Hypervisor framework

0 of 154 new or added lines in 11 files covered. (0.0%)

1166 existing lines in 29 files now uncovered.

1362 of 7563 relevant lines covered (18.01%)

18.77 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/alioth/src/mem/mem.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
pub mod addressable;
16
pub mod emulated;
17
pub mod mapped;
18

19
use std::any::type_name;
20
use std::fmt::Debug;
21
use std::sync::Arc;
22

23
use parking_lot::{Mutex, RwLock};
24
use serde::Deserialize;
25
use serde_aco::Help;
26
use snafu::Snafu;
27

28
use crate::errors::{DebugTrace, trace_error};
29
use crate::hv::{MemMapOption, VmEntry, VmMemory};
30

31
use self::addressable::{Addressable, SlotBackend};
32
use self::emulated::{Action, MmioBus, MmioRange};
33
use self::mapped::{ArcMemPages, Ram, RamBus};
34

35
#[trace_error]
36
#[derive(Snafu, DebugTrace)]
37
#[snafu(module, visibility(pub), context(suffix(false)))]
38
pub enum Error {
39
    #[snafu(display("Hypervisor internal error"), context(false))]
40
    HvError { source: Box<crate::hv::Error> },
41
    #[snafu(display("Cannot add a zero-sized slot"))]
42
    ZeroSizedSlot,
43
    #[snafu(display("(addr={addr:#x}, size={size:#x}) exceeds the address limit"))]
44
    ExceedsLimit { addr: u64, size: u64 },
45
    #[snafu(display("{new_item:#x?} overlaps with {exist_item:#x?}"))]
46
    Overlap {
47
        new_item: [u64; 2],
48
        exist_item: [u64; 2],
49
    },
50
    #[snafu(display("{addr:#x} is not mapped"))]
51
    NotMapped { addr: u64 },
52
    #[snafu(display("Sum of backend range sizes {sum:#x} exceeds the region total size"))]
53
    BackendTooBig { sum: u64, size: u64 },
54
    #[snafu(display("address {addr:#x} is not {align}-byte aligned"))]
55
    NotAligned { addr: u64, align: usize },
56
    #[snafu(display(
57
        "Guest address {addr:#x} (size = {:#x}) is not backed by continuous host memory"
58
    ))]
59
    NotContinuous { addr: u64, size: u64 },
60
    #[snafu(display("Error from OS"), context(false))]
61
    System { error: std::io::Error },
62
    #[snafu(display("Failed to write data to destination"))]
63
    Write { error: std::io::Error },
64
    #[snafu(display("Failed to read data from source"))]
65
    Read { error: std::io::Error },
66
    #[snafu(display("Failed to do MMIO"))]
67
    Mmio {
68
        source: Box<dyn DebugTrace + Send + Sync + 'static>,
69
    },
70
    #[snafu(display("Failed to change memory layout"))]
71
    ChangeLayout {
72
        source: Box<dyn DebugTrace + Send + Sync + 'static>,
73
    },
74
}
75

76
pub type Result<T, E = Error> = std::result::Result<T, E>;
77

78
fn default_memory_size() -> u64 {
79
    1 << 30
80
}
81

82
#[derive(Debug, Deserialize, Default, Help)]
83
pub struct MemConfig {
84
    /// Total guest memory size in bytes. [default: 1G]
85
    #[serde(default = "default_memory_size")]
86
    pub size: u64,
87
    /// Host backend [default: anon]
88
    #[serde(default)]
89
    pub backend: MemBackend,
90
    /// mmap() guest memory with MAP_SHARED or MAP_PRIVATE.
91
    /// [default: false]
92
    #[serde(default)]
93
    pub shared: bool,
94
    /// Enable transparent hugepage. [default: false]
95
    #[cfg(target_os = "linux")]
96
    #[serde(default, alias = "thp")]
97
    pub transparent_hugepage: bool,
98
}
99

100
#[derive(Debug, Deserialize, Default, Help)]
101
pub enum MemBackend {
102
    /// Anonymous memory by MAP_ANONYMOUS.
103
    #[default]
104
    #[serde(alias = "anon")]
105
    Anonymous,
106
    /// Anonymous file by memfd_create(). Always uses MAP_SHARED.
107
    #[cfg(target_os = "linux")]
108
    #[serde(alias = "memfd")]
109
    Memfd,
110
}
111

112
impl MemConfig {
113
    pub fn has_shared_fd(&self) -> bool {
×
114
        match &self.backend {
×
115
            #[cfg(target_os = "linux")]
UNCOV
116
            MemBackend::Memfd => true,
117
            MemBackend::Anonymous => false,
118
        }
119
    }
120
}
121

122
#[derive(Debug)]
123
pub enum MemRange {
124
    Ram(ArcMemPages),
125
    DevMem(ArcMemPages),
126
    Emulated(MmioRange),
127
    Span(u64),
128
}
129

130
impl MemRange {
131
    pub fn size(&self) -> u64 {
×
132
        match self {
×
133
            MemRange::Ram(pages) | MemRange::DevMem(pages) => pages.size(),
×
134
            MemRange::Emulated(range) => range.size(),
×
135
            MemRange::Span(size) => *size,
×
136
        }
137
    }
138
}
139

140
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
141
pub enum MemRegionType {
142
    Hidden,
143
    Ram,
144
    Reserved,
145
    Acpi,
146
    Pmem,
147
}
148

149
#[derive(Debug, Clone, Copy)]
150
pub struct MemRegionEntry {
151
    pub size: u64,
152
    pub type_: MemRegionType,
153
}
154

155
pub trait MemRegionCallback: Debug + Send + Sync + 'static {
156
    fn mapped(&self, addr: u64) -> Result<()>;
157
    fn unmapped(&self) -> Result<()> {
×
158
        log::debug!("{} unmapped", type_name::<Self>());
×
159
        Ok(())
×
160
    }
161
}
162

163
#[derive(Debug)]
164
pub struct MemRegion {
165
    pub ranges: Vec<MemRange>,
166
    pub entries: Vec<MemRegionEntry>,
167
    pub callbacks: Mutex<Vec<Box<dyn MemRegionCallback>>>,
168
}
169

170
impl MemRegion {
171
    pub fn size(&self) -> u64 {
×
172
        self.entries.iter().fold(0, |accu, e| accu + e.size)
×
173
    }
174

175
    pub fn with_ram(pages: ArcMemPages, type_: MemRegionType) -> MemRegion {
×
176
        let size = pages.size();
×
177
        MemRegion {
178
            ranges: vec![MemRange::Ram(pages)],
×
179
            entries: vec![MemRegionEntry { type_, size }],
×
180
            callbacks: Mutex::new(vec![]),
×
181
        }
182
    }
183

184
    pub fn with_dev_mem(pages: ArcMemPages, type_: MemRegionType) -> MemRegion {
×
185
        let size = pages.size();
×
186
        MemRegion {
187
            ranges: vec![MemRange::DevMem(pages)],
×
188
            entries: vec![MemRegionEntry { type_, size }],
×
189
            callbacks: Mutex::new(vec![]),
×
190
        }
191
    }
192

193
    pub fn with_emulated(range: MmioRange, type_: MemRegionType) -> MemRegion {
×
194
        let size = range.size();
×
195
        MemRegion {
196
            ranges: vec![MemRange::Emulated(range)],
×
197
            entries: vec![MemRegionEntry { type_, size }],
×
198
            callbacks: Mutex::new(vec![]),
×
199
        }
200
    }
201

202
    pub fn validate(&self) -> Result<()> {
×
203
        let entries_size = self.size();
×
204
        let ranges_size = self.ranges.iter().fold(0, |accu, r| accu + r.size());
×
205
        if ranges_size > entries_size {
×
206
            return error::BackendTooBig {
207
                sum: ranges_size,
208
                size: entries_size,
209
            }
210
            .fail();
×
211
        }
212
        Ok(())
×
213
    }
214
}
215

216
impl SlotBackend for Arc<MemRegion> {
217
    fn size(&self) -> u64 {
×
218
        MemRegion::size(self.as_ref())
×
219
    }
220
}
221

222
#[derive(Debug)]
223
pub struct IoRegion {
224
    pub range: MmioRange,
225
    pub callbacks: Mutex<Vec<Box<dyn MemRegionCallback>>>,
226
}
227

228
impl IoRegion {
229
    pub fn new(range: MmioRange) -> IoRegion {
×
230
        IoRegion {
231
            range,
232
            callbacks: Mutex::new(vec![]),
×
233
        }
234
    }
235
}
236

237
impl SlotBackend for Arc<IoRegion> {
238
    fn size(&self) -> u64 {
×
239
        self.range.size()
×
240
    }
241
}
242

243
pub trait LayoutChanged: Debug + Send + Sync + 'static {
244
    fn ram_added(&self, gpa: u64, pages: &ArcMemPages) -> Result<()>;
245
    fn ram_removed(&self, gpa: u64, pages: &ArcMemPages) -> Result<()>;
246
}
247

248
pub trait LayoutUpdated: Debug + Send + Sync + 'static {
249
    fn ram_updated(&self, ram: &Ram) -> Result<()>;
250
}
251

252
#[derive(Debug, Default)]
253
struct LayoutCallbacks {
254
    changed: Vec<Box<dyn LayoutChanged>>,
255
    updated: Vec<Box<dyn LayoutUpdated>>,
256
}
257

258
// lock order: region -> callbacks -> bus
259
#[derive(Debug)]
260
pub struct Memory {
261
    regions: Mutex<Addressable<Arc<MemRegion>>>,
262
    callbacks: Mutex<LayoutCallbacks>,
263
    ram_bus: Arc<RamBus>,
264
    mmio_bus: RwLock<MmioBus>,
265
    vm_memory: Box<dyn VmMemory>,
266

267
    io_bus: RwLock<MmioBus>,
268
    io_regions: Mutex<Addressable<Arc<IoRegion>>>,
269
}
270

271
impl Memory {
272
    pub fn new(vm_memory: impl VmMemory) -> Self {
×
273
        Memory {
274
            regions: Mutex::new(Addressable::new()),
×
275
            callbacks: Mutex::new(LayoutCallbacks::default()),
×
276
            ram_bus: Arc::new(RamBus::new()),
×
277
            mmio_bus: RwLock::new(MmioBus::new()),
×
278
            vm_memory: Box::new(vm_memory),
×
279
            io_bus: RwLock::new(MmioBus::new()),
×
280
            io_regions: Mutex::new(Addressable::new()),
×
281
        }
282
    }
283

284
    pub fn register_change_callback(&self, callback: Box<dyn LayoutChanged>) -> Result<()> {
×
285
        let regions = self.regions.lock();
×
286
        for (addr, region) in regions.iter() {
×
287
            let mut offset = 0;
×
288
            for range in &region.ranges {
×
289
                let gpa = addr + offset;
×
290
                match range {
×
291
                    MemRange::Ram(r) => callback.ram_added(gpa, r)?,
×
292
                    MemRange::Span(_) | MemRange::Emulated(_) | MemRange::DevMem(_) => {}
293
                }
294
                offset += range.size();
×
295
            }
296
        }
297
        let mut callbacks = self.callbacks.lock();
×
298
        callbacks.changed.push(callback);
×
299
        Ok(())
×
300
    }
301

302
    pub fn register_update_callback(&self, callback: Box<dyn LayoutUpdated>) -> Result<()> {
×
303
        let _regions = self.regions.lock();
×
304
        let mut callbacks = self.callbacks.lock();
×
305
        let ram = self.ram_bus.lock_layout();
×
306
        callback.ram_updated(&ram)?;
×
307
        callbacks.updated.push(callback);
×
308
        Ok(())
×
309
    }
310

311
    pub fn reset(&self) -> Result<()> {
×
312
        self.clear()?;
×
313
        self.vm_memory.reset()?;
×
314
        Ok(())
×
315
    }
316

317
    pub fn ram_bus(&self) -> Arc<RamBus> {
×
318
        self.ram_bus.clone()
×
319
    }
320

321
    fn map_to_vm(&self, gpa: u64, user_mem: &ArcMemPages) -> Result<(), Error> {
×
322
        let mem_options = MemMapOption {
323
            read: true,
324
            write: true,
325
            exec: true,
326
            log_dirty: false,
327
        };
328
        self.vm_memory
×
329
            .mem_map(gpa, user_mem.size(), user_mem.addr(), mem_options)?;
×
330
        Ok(())
×
331
    }
332

333
    fn unmap_from_vm(&self, gpa: u64, user_mem: &ArcMemPages) -> Result<(), Error> {
×
334
        self.vm_memory.unmap(gpa, user_mem.size())?;
×
335
        Ok(())
×
336
    }
337

338
    pub fn add_region(&self, addr: u64, region: Arc<MemRegion>) -> Result<()> {
×
339
        region.validate()?;
×
340
        let mut regions = self.regions.lock();
×
341
        regions.add(addr, region.clone())?;
×
342
        let mut offset = 0;
×
343
        let callbacks = self.callbacks.lock();
×
344
        let mut ram_updated = false;
×
345
        for range in &region.ranges {
×
346
            let gpa = addr + offset;
×
347
            match range {
×
348
                MemRange::Emulated(r) => {
×
349
                    let mut mmio_bus = self.mmio_bus.write();
×
350
                    mmio_bus.add(gpa, r.clone())?
×
351
                }
352
                MemRange::Ram(r) => {
×
353
                    self.map_to_vm(gpa, r)?;
×
354
                    for callback in &callbacks.changed {
×
355
                        callback.ram_added(gpa, r)?;
×
356
                    }
357
                    self.ram_bus.add(gpa, r.clone())?;
×
358
                    ram_updated = true;
×
359
                }
360
                MemRange::DevMem(r) => self.map_to_vm(gpa, r)?,
×
361
                MemRange::Span(_) => {}
362
            }
363
            offset += range.size();
×
364
        }
365
        if ram_updated {
×
366
            let ram = self.ram_bus.lock_layout();
×
367
            for update_callback in &callbacks.updated {
×
368
                update_callback.ram_updated(&ram)?;
×
369
            }
370
        }
371
        let region_callbacks = region.callbacks.lock();
×
372
        for callback in region_callbacks.iter() {
×
373
            callback.mapped(addr)?;
×
374
        }
375
        Ok(())
×
376
    }
377

378
    fn unmap_region(&self, addr: u64, region: &MemRegion) -> Result<()> {
×
379
        let mut offset = 0;
×
380
        let callbacks = self.callbacks.lock();
×
381
        let mut ram_updated = false;
×
382
        for range in &region.ranges {
×
383
            let gpa = addr + offset;
×
384
            match range {
×
385
                MemRange::Emulated(_) => {
386
                    let mut mmio_bus = self.mmio_bus.write();
×
387
                    mmio_bus.remove(gpa)?;
×
388
                }
389
                MemRange::Ram(r) => {
×
390
                    self.ram_bus.remove(gpa)?;
×
391
                    for callback in &callbacks.changed {
×
392
                        callback.ram_removed(gpa, r)?;
×
393
                    }
394
                    self.unmap_from_vm(gpa, r)?;
×
395
                    ram_updated = true;
×
396
                }
397
                MemRange::DevMem(r) => self.unmap_from_vm(gpa, r)?,
×
398
                MemRange::Span(_) => {}
399
            };
400
            offset += range.size();
×
401
        }
402
        if ram_updated {
×
403
            let ram = self.ram_bus.lock_layout();
×
404
            for callback in &callbacks.updated {
×
405
                callback.ram_updated(&ram)?;
×
406
            }
407
        }
408
        let region_callbacks = region.callbacks.lock();
×
409
        for callback in region_callbacks.iter() {
×
410
            callback.unmapped()?;
×
411
        }
412
        Ok(())
×
413
    }
414

415
    pub fn remove_region(&self, addr: u64) -> Result<Arc<MemRegion>> {
×
416
        let mut regions = self.regions.lock();
×
417
        let region = regions.remove(addr)?;
×
418
        self.unmap_region(addr, &region)?;
×
419
        Ok(region)
×
420
    }
421

422
    // TODO can be optimized
423
    fn clear(&self) -> Result<()> {
×
424
        let mut regions = self.regions.lock();
×
425
        let regions = regions.drain(..);
×
426
        for (addr, region) in regions {
×
427
            self.unmap_region(addr, &region)?;
×
428
        }
429
        #[cfg(target_arch = "x86_64")]
430
        {
431
            let mut io_regions = self.io_regions.lock();
432
            let io_regions = io_regions.drain(..);
433
            for (port, io_region) in io_regions {
434
                self.unmap_io_region(port as u16, &io_region)?;
435
            }
436
        }
437
        Ok(())
×
438
    }
439

440
    pub fn mem_region_entries(&self) -> Vec<(u64, MemRegionEntry)> {
×
441
        let mut entries = vec![];
×
442
        let regions = self.regions.lock();
×
443
        for (start, region) in regions.iter() {
×
444
            let mut offset = 0;
×
445
            for entry in region.entries.iter() {
×
446
                entries.push((start + offset, *entry));
×
447
                offset += entry.size;
×
448
            }
449
        }
450
        entries
×
451
    }
452

453
    pub fn add_io_dev(&self, port: u16, dev: MmioRange) -> Result<()> {
×
454
        self.add_io_region(port, Arc::new(IoRegion::new(dev)))
×
455
    }
456

457
    pub fn add_io_region(&self, port: u16, region: Arc<IoRegion>) -> Result<()> {
×
458
        let mut regions = self.io_regions.lock();
×
459
        regions.add(port as u64, region.clone())?;
×
460
        let mut io_bus = self.io_bus.write();
×
461
        io_bus.add(port as u64, region.range.clone())?;
×
462
        let callbacks = region.callbacks.lock();
×
463
        for callback in callbacks.iter() {
×
464
            callback.mapped(port as u64)?;
×
465
        }
466
        Ok(())
×
467
    }
468

469
    fn unmap_io_region(&self, port: u16, region: &IoRegion) -> Result<()> {
×
470
        let mut io_bus = self.io_bus.write();
×
471
        io_bus.remove(port as u64)?;
×
472
        let callbacks = region.callbacks.lock();
×
473
        for callback in callbacks.iter() {
×
474
            callback.unmapped()?;
×
475
        }
476
        Ok(())
×
477
    }
478

479
    pub fn remove_io_region(&self, port: u16) -> Result<Arc<IoRegion>> {
×
480
        let mut io_regions = self.io_regions.lock();
×
481
        let io_region = io_regions.remove(port as u64)?;
×
482
        self.unmap_io_region(port, &io_region)?;
×
483
        Ok(io_region)
×
484
    }
485

486
    pub fn register_encrypted_pages(&self, pages: &ArcMemPages) -> Result<()> {
×
487
        self.vm_memory.register_encrypted_range(pages.as_slice())?;
×
488
        Ok(())
×
489
    }
490

491
    pub fn deregister_encrypted_pages(&self, pages: &ArcMemPages) -> Result<()> {
×
492
        self.vm_memory
×
493
            .deregister_encrypted_range(pages.as_slice())?;
×
494
        Ok(())
×
495
    }
496

497
    pub fn mark_private_memory(&self, gpa: u64, size: u64, private: bool) -> Result<()> {
×
498
        let vm_memory = &self.vm_memory;
×
499
        let regions = self.regions.lock();
×
500
        let end = gpa + size;
×
501
        let mut start = gpa;
×
502
        'out: while let Some((mut addr, region)) = regions.search_next(start) {
×
503
            let next_start = addr + region.size();
×
504
            for range in &region.ranges {
×
505
                let (MemRange::DevMem(r) | MemRange::Ram(r)) = range else {
×
506
                    addr += range.size();
×
507
                    continue;
×
508
                };
509
                let range_end = addr + r.size();
×
510
                if range_end <= start {
×
511
                    addr = range_end;
×
512
                    continue;
513
                }
514
                let gpa_start = std::cmp::max(addr, start);
×
515
                let gpa_end = std::cmp::min(end, range_end);
×
516
                if gpa_start >= gpa_end {
×
517
                    break 'out;
518
                }
519
                vm_memory.mark_private_memory(gpa_start, gpa_end - gpa_start, private)?;
×
520
                start = gpa_end;
×
521
            }
522
            if next_start >= end {
×
523
                break;
524
            }
525
            start = next_start;
×
526
        }
527
        Ok(())
×
528
    }
529
}
530

531
impl Drop for Memory {
532
    fn drop(&mut self) {
×
533
        if let Err(e) = self.clear() {
×
534
            log::info!("dropping memory: {e}")
×
535
        }
536
    }
537
}
538

539
impl Memory {
540
    fn handle_action(&self, action: Action, none: VmEntry) -> Result<VmEntry> {
×
541
        match action {
×
542
            Action::None => Ok(none),
×
543
            Action::Shutdown => Ok(VmEntry::Shutdown),
×
544
            Action::Reset => Ok(VmEntry::Reboot),
×
545
            Action::ChangeLayout { callback } => {
×
546
                callback.change(self)?;
×
547
                Ok(none)
×
548
            }
549
        }
550
    }
551

552
    pub fn handle_mmio(&self, gpa: u64, write: Option<u64>, size: u8) -> Result<VmEntry> {
×
553
        let mmio_bus = self.mmio_bus.read();
×
554
        if let Some(val) = write {
×
555
            let action = mmio_bus.write(gpa, size, val)?;
×
556
            drop(mmio_bus);
×
557
            self.handle_action(action, VmEntry::None)
×
558
        } else {
559
            let data = mmio_bus.read(gpa, size)?;
×
560
            Ok(VmEntry::Mmio { data })
×
561
        }
562
    }
563

564
    pub fn handle_io(&self, port: u16, write: Option<u32>, size: u8) -> Result<VmEntry> {
×
565
        let io_bus = self.io_bus.read();
×
566
        if let Some(val) = write {
×
567
            let action = io_bus.write(port as u64, size, val as u64)?;
×
568
            drop(io_bus);
×
569
            self.handle_action(action, VmEntry::Io { data: None })
×
570
        } else {
571
            let data = io_bus.read(port as u64, size)? as u32;
×
572
            Ok(VmEntry::Io { data: Some(data) })
×
573
        }
574
    }
575
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc