• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 17385686062

01 Sep 2025 07:20PM UTC coverage: 18.016% (-0.1%) from 18.149%
17385686062

Pull #281

github

web-flow
Merge f6f978f6a into 6ec9a6d6b
Pull Request #281: Port to Apple Hypervisor framework

0 of 152 new or added lines in 11 files covered. (0.0%)

1323 existing lines in 30 files now uncovered.

1362 of 7560 relevant lines covered (18.02%)

18.79 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/alioth/src/virtio/pci.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
use std::marker::PhantomData;
16
use std::mem::size_of;
17
use std::os::fd::{AsFd, AsRawFd, BorrowedFd};
18
use std::sync::Arc;
19
use std::sync::atomic::{AtomicU16, Ordering};
20
use std::sync::mpsc::Sender;
21

22
use alioth_macros::Layout;
23
use parking_lot::{Mutex, RwLock};
24
use zerocopy::{FromZeros, Immutable, IntoBytes};
25

26
use crate::hv::{IoeventFd, IoeventFdRegistry, IrqFd, MsiSender};
27
use crate::mem::emulated::{Action, Mmio};
28
use crate::mem::{MemRange, MemRegion, MemRegionCallback, MemRegionEntry};
29
use crate::pci::cap::{
30
    MsixCap, MsixCapMmio, MsixCapOffset, MsixMsgCtrl, MsixTableEntry, MsixTableMmio,
31
    MsixTableMmioEntry, PciCap, PciCapHdr, PciCapId, PciCapList,
32
};
33
use crate::pci::config::{
34
    BAR_MEM32, BAR_MEM64, BAR_PREFETCHABLE, CommonHeader, DeviceHeader, EmulatedConfig, HeaderType,
35
    PciConfig, PciConfigArea,
36
};
37
use crate::pci::{self, Pci, PciBar};
38
use crate::utils::{get_atomic_high32, get_atomic_low32, set_atomic_high32, set_atomic_low32};
39
use crate::virtio::dev::{Register, StartParam, VirtioDevice, WakeEvent};
40
use crate::virtio::queue::QueueReg;
41
use crate::virtio::worker::Waker;
42
use crate::virtio::{DevStatus, DeviceId, IrqSender, Result, error};
43
use crate::{impl_mmio_for_zerocopy, mem};
44

45
const VIRTIO_MSI_NO_VECTOR: u16 = 0xffff;
46

47
#[derive(Debug)]
48
struct VirtioPciMsixVector {
49
    config: AtomicU16,
50
    queues: Vec<AtomicU16>,
51
}
52

53
#[derive(Debug)]
54
pub struct PciIrqSender<S>
55
where
56
    S: MsiSender,
57
{
58
    msix_vector: VirtioPciMsixVector,
59
    msix_table: Arc<MsixTableMmio<S::IrqFd>>,
60
    msi_sender: S,
61
}
62

63
impl<S> PciIrqSender<S>
64
where
65
    S: MsiSender,
66
{
UNCOV
67
    fn send(&self, vector: u16) {
×
68
        let entries = self.msix_table.entries.read();
×
69
        let Some(entry) = entries.get(vector as usize) else {
×
70
            log::error!("invalid config vector: {vector:x}");
×
71
            return;
×
72
        };
UNCOV
73
        if entry.get_masked() {
×
74
            log::info!("{vector} is masked");
×
75
            return;
×
76
        }
UNCOV
77
        let data = entry.get_data();
×
78
        let addr = ((entry.get_addr_hi() as u64) << 32) | (entry.get_addr_lo() as u64);
×
79
        if let Err(e) = self.msi_sender.send(addr, data) {
×
80
            log::error!("send msi data = {data:#x} to {addr:#x}: {e}")
×
81
        } else {
UNCOV
82
            log::trace!("send msi data = {data:#x} to {addr:#x}: done")
×
83
        }
84
    }
85

UNCOV
86
    fn get_irqfd<F, T>(&self, vector: u16, f: F) -> Result<T>
×
87
    where
88
        F: FnOnce(BorrowedFd) -> Result<T>,
89
    {
UNCOV
90
        let mut entries = self.msix_table.entries.write();
×
91
        let Some(entry) = entries.get_mut(vector as usize) else {
×
92
            return error::InvalidMsixVector { vector }.fail();
×
93
        };
UNCOV
94
        match &*entry {
×
95
            MsixTableMmioEntry::Entry(e) => {
×
96
                let irqfd = self.msi_sender.create_irqfd()?;
×
97
                irqfd.set_addr_hi(e.addr_hi)?;
×
98
                irqfd.set_addr_lo(e.addr_lo)?;
×
99
                irqfd.set_data(e.data)?;
×
100
                irqfd.set_masked(e.control.masked())?;
×
101
                let r = f(irqfd.as_fd())?;
×
102
                *entry = MsixTableMmioEntry::IrqFd(irqfd);
×
103
                Ok(r)
×
104
            }
UNCOV
105
            MsixTableMmioEntry::IrqFd(fd) => f(fd.as_fd()),
×
106
        }
107
    }
108
}
109

110
impl<S> IrqSender for PciIrqSender<S>
111
where
112
    S: MsiSender,
113
{
UNCOV
114
    fn config_irq(&self) {
×
115
        let vector = self.msix_vector.config.load(Ordering::Acquire);
×
116
        if vector != VIRTIO_MSI_NO_VECTOR {
×
117
            self.send(vector)
×
118
        }
119
    }
120

UNCOV
121
    fn queue_irq(&self, idx: u16) {
×
122
        let Some(vector) = self.msix_vector.queues.get(idx as usize) else {
×
123
            log::error!("invalid queue index: {idx}");
×
124
            return;
×
125
        };
UNCOV
126
        let vector = vector.load(Ordering::Acquire);
×
127
        if vector != VIRTIO_MSI_NO_VECTOR {
×
128
            self.send(vector);
×
129
        }
130
    }
131

UNCOV
132
    fn config_irqfd<F, T>(&self, f: F) -> Result<T>
133
    where
134
        F: FnOnce(BorrowedFd) -> Result<T>,
135
    {
UNCOV
136
        self.get_irqfd(self.msix_vector.config.load(Ordering::Acquire), f)
×
137
    }
138

UNCOV
139
    fn queue_irqfd<F, T>(&self, idx: u16, f: F) -> Result<T>
×
140
    where
141
        F: FnOnce(BorrowedFd) -> Result<T>,
142
    {
UNCOV
143
        let Some(vector) = self.msix_vector.queues.get(idx as usize) else {
×
144
            return error::InvalidQueueIndex { index: idx }.fail();
×
145
        };
UNCOV
146
        self.get_irqfd(vector.load(Ordering::Acquire), f)
×
147
    }
148
}
149

150
#[repr(C, align(4))]
151
#[derive(Layout)]
152
pub struct VirtioCommonCfg {
153
    device_feature_select: u32,
154
    device_feature: u32,
155
    driver_feature_select: u32,
156
    driver_feature: u32,
157
    config_msix_vector: u16,
158
    num_queues: u16,
159
    device_status: u8,
160
    config_generation: u8,
161
    queue_select: u16,
162
    queue_size: u16,
163
    queue_msix_vector: u16,
164
    queue_enable: u16,
165
    queue_notify_off: u16,
166
    queue_desc_lo: u32,
167
    queue_desc_hi: u32,
168
    queue_driver_lo: u32,
169
    queue_driver_hi: u32,
170
    queue_device_lo: u32,
171
    queue_device_hi: u32,
172
    queue_notify_data: u16,
173
    queue_reset: u16,
174
}
175

176
#[derive(Layout)]
177
#[repr(C, align(4))]
178
pub struct VirtioPciRegister {
179
    common: VirtioCommonCfg,
180
    isr_status: u32,
181
    queue_notify: PhantomData<[u32]>,
182
}
183

184
#[derive(Debug)]
185
pub struct VirtioPciRegisterMmio<M, E>
186
where
187
    M: MsiSender,
188
    E: IoeventFd,
189
{
190
    name: Arc<str>,
191
    reg: Register,
192
    queues: Arc<[QueueReg]>,
193
    irq_sender: Arc<PciIrqSender<M>>,
194
    ioeventfds: Option<Arc<[E]>>,
195
    event_tx: Sender<WakeEvent<PciIrqSender<M>, E>>,
196
    waker: Arc<Waker>,
197
}
198

199
impl<M, E> VirtioPciRegisterMmio<M, E>
200
where
201
    M: MsiSender,
202
    E: IoeventFd,
203
{
UNCOV
204
    fn wake_up_dev(&self, event: WakeEvent<PciIrqSender<M>, E>) {
×
205
        let is_start = matches!(event, WakeEvent::Start { .. });
×
206
        if let Err(e) = self.event_tx.send(event) {
×
207
            log::error!("{}: failed to send event: {e}", self.name);
×
208
            return;
×
209
        }
UNCOV
210
        if is_start {
×
211
            return;
×
212
        }
UNCOV
213
        if let Err(e) = self.waker.wake() {
×
214
            log::error!("{}: failed to wake up device: {e}", self.name);
×
215
        }
216
    }
217

UNCOV
218
    fn reset(&self) {
×
219
        let config_msix = &self.irq_sender.msix_vector.config;
×
220
        config_msix.store(VIRTIO_MSI_NO_VECTOR, Ordering::Release);
×
221
        for q_vector in self.irq_sender.msix_vector.queues.iter() {
×
222
            q_vector.store(VIRTIO_MSI_NO_VECTOR, Ordering::Release);
×
223
        }
UNCOV
224
        self.irq_sender.msix_table.reset();
×
225
        for q in self.queues.iter() {
×
226
            q.enabled.store(false, Ordering::Release);
×
227
        }
228
    }
229

UNCOV
230
    fn msix_change_allowed(&self, old: u16) -> bool {
×
231
        let entries = self.irq_sender.msix_table.entries.read();
×
232
        let Some(entry) = entries.get(old as usize) else {
×
233
            return true;
×
234
        };
UNCOV
235
        if let MsixTableMmioEntry::IrqFd(fd) = entry {
×
236
            log::error!(
×
237
                "{}: MSI-X vector {old:#x} was assigned to irqfd {:#x}",
×
238
                self.name,
×
239
                fd.as_fd().as_raw_fd(),
×
240
            );
UNCOV
241
            false
×
242
        } else {
UNCOV
243
            true
×
244
        }
245
    }
246
}
247

248
impl<M, E> Mmio for VirtioPciRegisterMmio<M, E>
249
where
250
    M: MsiSender,
251
    E: IoeventFd,
252
{
UNCOV
253
    fn size(&self) -> u64 {
×
254
        (size_of::<VirtioPciRegister>() + size_of::<u32>() * self.queues.len()) as u64
×
255
    }
256

UNCOV
257
    fn read(&self, offset: u64, size: u8) -> mem::Result<u64> {
×
258
        let reg = &self.reg;
×
259
        let ret = match (offset as usize, size as usize) {
×
260
            VirtioCommonCfg::LAYOUT_DEVICE_FEATURE_SELECT => {
×
261
                reg.device_feature_sel.load(Ordering::Acquire) as u64
×
262
            }
UNCOV
263
            VirtioCommonCfg::LAYOUT_DEVICE_FEATURE => {
×
264
                let sel = reg.device_feature_sel.load(Ordering::Acquire);
×
265
                if let Some(feature) = reg.device_feature.get(sel as usize) {
×
266
                    *feature as u64
×
267
                } else {
UNCOV
268
                    0
×
269
                }
270
            }
UNCOV
271
            VirtioCommonCfg::LAYOUT_DRIVER_FEATURE_SELECT => {
×
272
                reg.driver_feature_sel.load(Ordering::Acquire) as u64
×
273
            }
UNCOV
274
            VirtioCommonCfg::LAYOUT_DRIVER_FEATURE => {
×
275
                let sel = reg.driver_feature_sel.load(Ordering::Acquire);
×
276
                if let Some(feature) = reg.driver_feature.get(sel as usize) {
×
277
                    feature.load(Ordering::Acquire) as u64
×
278
                } else {
UNCOV
279
                    0
×
280
                }
281
            }
UNCOV
282
            VirtioCommonCfg::LAYOUT_CONFIG_MSIX_VECTOR => {
×
283
                self.irq_sender.msix_vector.config.load(Ordering::Acquire) as u64
×
284
            }
UNCOV
285
            VirtioCommonCfg::LAYOUT_NUM_QUEUES => self.queues.len() as u64,
×
286
            VirtioCommonCfg::LAYOUT_DEVICE_STATUS => reg.status.load(Ordering::Acquire) as u64,
×
287
            VirtioCommonCfg::LAYOUT_CONFIG_GENERATION => {
×
288
                0 // TODO: support device config change at runtime
×
289
            }
UNCOV
290
            VirtioCommonCfg::LAYOUT_QUEUE_SELECT => reg.queue_sel.load(Ordering::Acquire) as u64,
×
291
            VirtioCommonCfg::LAYOUT_QUEUE_SIZE => {
×
292
                let q_sel = reg.queue_sel.load(Ordering::Acquire) as usize;
×
293
                if let Some(q) = self.queues.get(q_sel) {
×
294
                    q.size.load(Ordering::Acquire) as u64
×
295
                } else {
UNCOV
296
                    0
×
297
                }
298
            }
UNCOV
299
            VirtioCommonCfg::LAYOUT_QUEUE_MSIX_VECTOR => {
×
300
                let q_sel = reg.queue_sel.load(Ordering::Acquire) as usize;
×
301
                if let Some(msix_vector) = self.irq_sender.msix_vector.queues.get(q_sel) {
×
302
                    msix_vector.load(Ordering::Acquire) as u64
×
303
                } else {
UNCOV
304
                    VIRTIO_MSI_NO_VECTOR as u64
×
305
                }
306
            }
UNCOV
307
            VirtioCommonCfg::LAYOUT_QUEUE_ENABLE => {
×
308
                let q_sel = reg.queue_sel.load(Ordering::Acquire) as usize;
×
309
                if let Some(q) = self.queues.get(q_sel) {
×
310
                    q.enabled.load(Ordering::Acquire) as u64
×
311
                } else {
UNCOV
312
                    0
×
313
                }
314
            }
UNCOV
315
            VirtioCommonCfg::LAYOUT_QUEUE_NOTIFY_OFF => {
×
316
                reg.queue_sel.load(Ordering::Acquire) as u64
×
317
            }
UNCOV
318
            VirtioCommonCfg::LAYOUT_QUEUE_DESC_LO => {
×
319
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
320
                if let Some(q) = self.queues.get(q_sel as usize) {
×
321
                    get_atomic_low32(&q.desc) as u64
×
322
                } else {
UNCOV
323
                    0
×
324
                }
325
            }
UNCOV
326
            VirtioCommonCfg::LAYOUT_QUEUE_DESC_HI => {
×
327
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
328
                if let Some(q) = self.queues.get(q_sel as usize) {
×
329
                    get_atomic_high32(&q.desc) as u64
×
330
                } else {
UNCOV
331
                    0
×
332
                }
333
            }
UNCOV
334
            VirtioCommonCfg::LAYOUT_QUEUE_DRIVER_LO => {
×
335
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
336
                if let Some(q) = self.queues.get(q_sel as usize) {
×
337
                    get_atomic_high32(&q.driver) as u64
×
338
                } else {
UNCOV
339
                    0
×
340
                }
341
            }
UNCOV
342
            VirtioCommonCfg::LAYOUT_QUEUE_DRIVER_HI => {
×
343
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
344
                if let Some(q) = self.queues.get(q_sel as usize) {
×
345
                    get_atomic_high32(&q.driver) as u64
×
346
                } else {
UNCOV
347
                    0
×
348
                }
349
            }
UNCOV
350
            VirtioCommonCfg::LAYOUT_QUEUE_DEVICE_LO => {
×
351
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
352
                if let Some(q) = self.queues.get(q_sel as usize) {
×
353
                    get_atomic_high32(&q.device) as u64
×
354
                } else {
UNCOV
355
                    0
×
356
                }
357
            }
UNCOV
358
            VirtioCommonCfg::LAYOUT_QUEUE_DEVICE_HI => {
×
359
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
360
                if let Some(q) = self.queues.get(q_sel as usize) {
×
361
                    get_atomic_high32(&q.device) as u64
×
362
                } else {
UNCOV
363
                    0
×
364
                }
365
            }
UNCOV
366
            VirtioCommonCfg::LAYOUT_QUEUE_NOTIFY_DATA => {
×
367
                todo!()
368
            }
UNCOV
369
            VirtioCommonCfg::LAYOUT_QUEUE_RESET => {
×
370
                todo!()
371
            }
UNCOV
372
            _ => {
×
373
                log::error!(
×
374
                    "{}: read invalid register: offset = {offset:#x}, size = {size}",
×
375
                    self.name
×
376
                );
UNCOV
377
                0
×
378
            }
379
        };
UNCOV
380
        Ok(ret)
×
381
    }
382

UNCOV
383
    fn write(&self, offset: u64, size: u8, val: u64) -> mem::Result<Action> {
×
384
        let reg = &self.reg;
×
385
        match (offset as usize, size as usize) {
×
386
            VirtioCommonCfg::LAYOUT_DEVICE_FEATURE_SELECT => {
×
387
                reg.device_feature_sel.store(val as u8, Ordering::Release);
×
388
            }
UNCOV
389
            VirtioCommonCfg::LAYOUT_DRIVER_FEATURE_SELECT => {
×
390
                reg.driver_feature_sel.store(val as u8, Ordering::Release);
×
391
            }
UNCOV
392
            VirtioCommonCfg::LAYOUT_DRIVER_FEATURE => {
×
393
                let sel = reg.driver_feature_sel.load(Ordering::Acquire);
×
394
                if let Some(feature) = reg.driver_feature.get(sel as usize) {
×
395
                    feature.store(val as u32, Ordering::Release);
×
396
                } else if val != 0 {
×
397
                    log::error!("{}: unknown feature {val:#x} for sel {sel}", self.name);
×
398
                }
399
            }
UNCOV
400
            VirtioCommonCfg::LAYOUT_CONFIG_MSIX_VECTOR => {
×
401
                let config_msix = &self.irq_sender.msix_vector.config;
×
402
                let old = config_msix.load(Ordering::Acquire);
×
403
                if self.msix_change_allowed(old) {
×
404
                    config_msix.store(val as u16, Ordering::Release);
×
405
                    log::trace!(
×
406
                        "{}: config MSI-X vector update: {old:#x} -> {val:#x}",
×
407
                        self.name
×
408
                    );
409
                } else {
UNCOV
410
                    log::error!(
×
411
                        "{}: cannot change config MSI-X vector from {old:#x} to {val:#x}",
UNCOV
412
                        self.name
×
413
                    )
414
                }
415
            }
UNCOV
416
            VirtioCommonCfg::LAYOUT_DEVICE_STATUS => {
×
417
                let status = DevStatus::from_bits_truncate(val as u8);
×
418
                let old = reg.status.swap(status.bits(), Ordering::AcqRel);
×
419
                let old = DevStatus::from_bits_retain(old);
×
420
                if (old ^ status).contains(DevStatus::DRIVER_OK) {
×
421
                    let event = if status.contains(DevStatus::DRIVER_OK) {
×
422
                        let mut feature = 0;
×
423
                        for (i, v) in reg.driver_feature.iter().enumerate() {
×
424
                            feature |= (v.load(Ordering::Acquire) as u128) << (i << 5);
×
425
                        }
426
                        let param = StartParam {
427
                            feature,
UNCOV
428
                            irq_sender: self.irq_sender.clone(),
×
429
                            ioeventfds: self.ioeventfds.clone(),
×
430
                        };
431
                        WakeEvent::Start { param }
432
                    } else {
UNCOV
433
                        self.reset();
×
434
                        WakeEvent::Reset
×
435
                    };
UNCOV
436
                    self.wake_up_dev(event);
×
437
                }
438
            }
UNCOV
439
            VirtioCommonCfg::LAYOUT_QUEUE_SELECT => {
×
440
                reg.queue_sel.store(val as u16, Ordering::Relaxed);
×
441
                if self.queues.get(val as usize).is_none() {
×
442
                    log::error!("{}: unknown queue index {val}", self.name)
×
443
                }
444
            }
UNCOV
445
            VirtioCommonCfg::LAYOUT_QUEUE_SIZE => {
×
446
                let q_sel = reg.queue_sel.load(Ordering::Relaxed) as usize;
×
447
                if let Some(q) = self.queues.get(q_sel) {
×
448
                    // TODO: validate queue size
UNCOV
449
                    q.size.store(val as u16, Ordering::Release);
×
450
                }
451
            }
UNCOV
452
            VirtioCommonCfg::LAYOUT_QUEUE_MSIX_VECTOR => {
×
453
                let q_sel = reg.queue_sel.load(Ordering::Relaxed) as usize;
×
454
                if let Some(msix_vector) = self.irq_sender.msix_vector.queues.get(q_sel) {
×
455
                    let old = msix_vector.load(Ordering::Acquire);
×
456
                    if self.msix_change_allowed(old) {
×
457
                        msix_vector.store(val as u16, Ordering::Release);
×
458
                        log::trace!(
×
459
                            "{}: queue {q_sel} MSI-X vector update: {old:#x} -> {val:#x}",
×
460
                            self.name
×
461
                        );
462
                    } else {
UNCOV
463
                        log::error!(
×
464
                            "{}: cannot change queue {q_sel} MSI-X vector from {old:#x} to {val:#x}",
UNCOV
465
                            self.name
×
466
                        )
467
                    }
468
                }
469
            }
UNCOV
470
            VirtioCommonCfg::LAYOUT_QUEUE_ENABLE => {
×
471
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
472
                if let Some(q) = self.queues.get(q_sel as usize) {
×
473
                    q.enabled.store(val != 0, Ordering::Release);
×
474
                };
475
            }
UNCOV
476
            VirtioCommonCfg::LAYOUT_QUEUE_DESC_LO => {
×
477
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
478
                if let Some(q) = self.queues.get(q_sel as usize) {
×
479
                    set_atomic_low32(&q.desc, val as u32)
×
480
                }
481
            }
UNCOV
482
            VirtioCommonCfg::LAYOUT_QUEUE_DESC_HI => {
×
483
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
484
                if let Some(q) = self.queues.get(q_sel as usize) {
×
485
                    set_atomic_high32(&q.desc, val as u32)
×
486
                }
487
            }
UNCOV
488
            VirtioCommonCfg::LAYOUT_QUEUE_DRIVER_LO => {
×
489
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
490
                if let Some(q) = self.queues.get(q_sel as usize) {
×
491
                    set_atomic_low32(&q.driver, val as u32)
×
492
                }
493
            }
UNCOV
494
            VirtioCommonCfg::LAYOUT_QUEUE_DRIVER_HI => {
×
495
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
496
                if let Some(q) = self.queues.get(q_sel as usize) {
×
497
                    set_atomic_high32(&q.driver, val as u32)
×
498
                }
499
            }
UNCOV
500
            VirtioCommonCfg::LAYOUT_QUEUE_DEVICE_LO => {
×
501
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
502
                if let Some(q) = self.queues.get(q_sel as usize) {
×
503
                    set_atomic_low32(&q.device, val as u32)
×
504
                }
505
            }
UNCOV
506
            VirtioCommonCfg::LAYOUT_QUEUE_DEVICE_HI => {
×
507
                let q_sel = reg.queue_sel.load(Ordering::Relaxed);
×
508
                if let Some(q) = self.queues.get(q_sel as usize) {
×
509
                    set_atomic_high32(&q.device, val as u32)
×
510
                }
511
            }
UNCOV
512
            VirtioCommonCfg::LAYOUT_QUEUE_RESET => {
×
513
                todo!()
514
            }
UNCOV
515
            (offset, _)
×
516
                if offset >= VirtioPciRegister::OFFSET_QUEUE_NOTIFY
×
517
                    && offset
×
518
                        < VirtioPciRegister::OFFSET_QUEUE_NOTIFY
×
519
                            + size_of::<u32>() * self.queues.len() =>
×
520
            {
UNCOV
521
                let q_index = (offset - VirtioPciRegister::OFFSET_QUEUE_NOTIFY) as u16 / 4;
×
522
                log::warn!("{}: notifying queue-{q_index} by vm exit!", self.name);
×
NEW
523
                let event = WakeEvent::Notify { q_index };
×
NEW
524
                self.wake_up_dev(event)
×
525
            }
526
            _ => {
×
527
                log::error!(
×
UNCOV
528
                    "{}: write 0x{val:0width$x} to invalid register offset = {offset:#x}",
×
529
                    self.name,
×
530
                    width = 2 * size as usize
×
531
                );
532
            }
533
        }
UNCOV
534
        Ok(Action::None)
×
535
    }
536
}
537

538
#[derive(Debug)]
539
struct IoeventFdCallback<R>
540
where
541
    R: IoeventFdRegistry,
542
{
543
    registry: R,
544
    ioeventfds: Arc<[R::IoeventFd]>,
545
}
546

547
impl<R> MemRegionCallback for IoeventFdCallback<R>
548
where
549
    R: IoeventFdRegistry,
550
{
UNCOV
551
    fn mapped(&self, addr: u64) -> mem::Result<()> {
×
UNCOV
552
        for (q_index, fd) in self.ioeventfds.iter().enumerate() {
×
UNCOV
553
            let base_addr = addr + (12 << 10) + VirtioPciRegister::OFFSET_QUEUE_NOTIFY as u64;
×
554
            let notify_addr = base_addr + (q_index * size_of::<u32>()) as u64;
×
555
            self.registry.register(fd, notify_addr, 0, None)?;
×
556
            log::info!("q-{q_index} ioeventfd registered at {notify_addr:x}",)
×
557
        }
558
        Ok(())
×
559
    }
560

561
    fn unmapped(&self) -> mem::Result<()> {
×
UNCOV
562
        for fd in self.ioeventfds.iter() {
×
UNCOV
563
            self.registry.deregister(fd)?;
×
564
            log::info!("ioeventfd {fd:?} de-registered")
×
565
        }
566
        Ok(())
×
567
    }
568
}
569

570
const VIRTIO_VENDOR_ID: u16 = 0x1af4;
571
const VIRTIO_DEVICE_ID_BASE: u16 = 0x1040;
572

UNCOV
573
fn get_class(id: DeviceId) -> (u8, u8) {
×
UNCOV
574
    match id {
×
UNCOV
575
        DeviceId::Net => (0x02, 0x00),
×
576
        DeviceId::FileSystem => (0x01, 0x80),
×
577
        DeviceId::Block => (0x01, 0x00),
×
578
        DeviceId::Socket => (0x02, 0x80),
×
579
        _ => (0xff, 0x00),
×
580
    }
581
}
582

583
#[repr(u8)]
584
pub enum VirtioPciCfg {
585
    Common = 1,
586
    Notify = 2,
587
    Isr = 3,
588
    Device = 4,
589
    Pci = 5,
590
    SharedMemory = 8,
591
    Vendor = 9,
592
}
593

594
#[repr(C, align(4))]
595
#[derive(Debug, Default, FromZeros, Immutable, IntoBytes)]
596
pub struct VirtioPciCap {
597
    header: PciCapHdr,
598
    cap_len: u8,
599
    cfg_type: u8,
600
    bar: u8,
601
    id: u8,
602
    padding: [u8; 2],
603
    offset: u32,
604
    length: u32,
605
}
606
impl_mmio_for_zerocopy!(VirtioPciCap);
607

608
impl PciConfigArea for VirtioPciCap {
UNCOV
609
    fn reset(&self) {}
×
610
}
611

612
impl PciCap for VirtioPciCap {
UNCOV
613
    fn set_next(&mut self, val: u8) {
×
UNCOV
614
        self.header.next = val
×
615
    }
616
}
617

618
#[repr(C, align(4))]
619
#[derive(Debug, Default, FromZeros, Immutable, IntoBytes)]
620
pub struct VirtioPciCap64 {
621
    cap: VirtioPciCap,
622
    offset_hi: u32,
623
    length_hi: u32,
624
}
625
impl_mmio_for_zerocopy!(VirtioPciCap64);
626

627
impl PciConfigArea for VirtioPciCap64 {
UNCOV
628
    fn reset(&self) {}
×
629
}
630

631
impl PciCap for VirtioPciCap64 {
UNCOV
632
    fn set_next(&mut self, val: u8) {
×
UNCOV
633
        PciCap::set_next(&mut self.cap, val)
×
634
    }
635
}
636

637
#[repr(C, align(4))]
638
#[derive(Debug, Default, FromZeros, Immutable, IntoBytes)]
639
pub struct VirtioPciNotifyCap {
640
    cap: VirtioPciCap,
641
    multiplier: u32,
642
}
643
impl_mmio_for_zerocopy!(VirtioPciNotifyCap);
644

645
impl PciConfigArea for VirtioPciNotifyCap {
UNCOV
646
    fn reset(&self) {}
×
647
}
648

649
impl PciCap for VirtioPciNotifyCap {
UNCOV
650
    fn set_next(&mut self, val: u8) {
×
UNCOV
651
        self.cap.header.next = val;
×
652
    }
653
}
654

655
#[derive(Debug)]
656
pub struct VirtioPciDevice<M, E>
657
where
658
    M: MsiSender,
659
    E: IoeventFd,
660
{
661
    pub dev: VirtioDevice<PciIrqSender<M>, E>,
662
    pub config: EmulatedConfig,
663
    pub registers: Arc<VirtioPciRegisterMmio<M, E>>,
664
}
665

666
impl<M, E> VirtioPciDevice<M, E>
667
where
668
    M: MsiSender,
669
    E: IoeventFd,
670
{
UNCOV
671
    pub fn new<R>(
×
672
        dev: VirtioDevice<PciIrqSender<M>, E>,
673
        msi_sender: M,
674
        ioeventfd_reg: R,
675
    ) -> Result<Self>
676
    where
677
        R: IoeventFdRegistry<IoeventFd = E>,
678
    {
UNCOV
679
        let (class, subclass) = get_class(dev.id);
×
680
        let mut header = DeviceHeader {
UNCOV
681
            common: CommonHeader {
×
682
                vendor: VIRTIO_VENDOR_ID,
683
                device: VIRTIO_DEVICE_ID_BASE + dev.id as u16,
684
                revision: 0x1,
685
                header_type: HeaderType::Device as u8,
686
                class,
687
                subclass,
688
                ..Default::default()
689
            },
UNCOV
690
            subsystem: VIRTIO_DEVICE_ID_BASE + dev.id as u16,
×
691
            ..Default::default()
692
        };
693
        let device_config = dev.device_config.clone();
×
UNCOV
694
        let num_queues = dev.queue_regs.len();
×
UNCOV
695
        let table_entries = num_queues + 1;
×
696

697
        let msix_table_offset = 0;
×
698
        let msix_table_size = size_of::<MsixTableEntry>() * table_entries;
×
699

700
        let msix_pba_offset = 8 << 10;
×
701

UNCOV
702
        let virtio_register_offset = 12 << 10;
×
703
        let device_config_offset =
×
UNCOV
704
            virtio_register_offset + size_of::<VirtioPciRegister>() + size_of::<u32>() * num_queues;
×
705

706
        let msix_msg_ctrl = MsixMsgCtrl::new(table_entries as u16);
×
707

708
        let cap_msix = MsixCap {
709
            header: PciCapHdr {
×
710
                id: PciCapId::Msix as u8,
711
                ..Default::default()
712
            },
713
            control: msix_msg_ctrl,
UNCOV
714
            table_offset: MsixCapOffset(msix_table_offset as u32),
×
UNCOV
715
            pba_offset: MsixCapOffset(msix_pba_offset as u32),
×
716
        };
717
        let cap_common = VirtioPciCap {
718
            header: PciCapHdr {
×
719
                id: PciCapId::Vendor as u8,
720
                ..Default::default()
721
            },
UNCOV
722
            cap_len: size_of::<VirtioPciCap>() as u8,
×
UNCOV
723
            cfg_type: VirtioPciCfg::Common as u8,
×
724
            bar: 0,
725
            id: 0,
726
            offset: (virtio_register_offset + VirtioPciRegister::OFFSET_COMMON) as u32,
×
UNCOV
727
            length: size_of::<VirtioCommonCfg>() as u32,
×
728
            ..Default::default()
729
        };
730
        let cap_isr = VirtioPciCap {
UNCOV
731
            header: PciCapHdr {
×
732
                id: PciCapId::Vendor as u8,
733
                ..Default::default()
734
            },
UNCOV
735
            cap_len: size_of::<VirtioPciCap>() as u8,
×
UNCOV
736
            cfg_type: VirtioPciCfg::Isr as u8,
×
737
            bar: 0,
738
            id: 0,
739
            offset: (virtio_register_offset + VirtioPciRegister::OFFSET_ISR_STATUS) as u32,
×
UNCOV
740
            length: size_of::<u32>() as u32,
×
741
            ..Default::default()
742
        };
743
        let cap_notify = VirtioPciNotifyCap {
UNCOV
744
            cap: VirtioPciCap {
×
745
                header: PciCapHdr {
746
                    id: PciCapId::Vendor as u8,
747
                    ..Default::default()
748
                },
749
                cap_len: size_of::<VirtioPciNotifyCap>() as u8,
750
                cfg_type: VirtioPciCfg::Notify as u8,
751
                bar: 0,
752
                id: 0,
753
                offset: (virtio_register_offset + VirtioPciRegister::OFFSET_QUEUE_NOTIFY) as u32,
754
                length: (size_of::<u32>() * num_queues) as u32,
755
                ..Default::default()
756
            },
UNCOV
757
            multiplier: size_of::<u32>() as u32,
×
758
        };
759
        let cap_device_config = VirtioPciCap {
760
            header: PciCapHdr {
×
761
                id: PciCapId::Vendor as u8,
762
                ..Default::default()
763
            },
UNCOV
764
            cap_len: size_of::<VirtioPciCap>() as u8,
×
UNCOV
765
            cfg_type: VirtioPciCfg::Device as u8,
×
766
            bar: 0,
767
            id: 0,
768
            offset: device_config_offset as u32,
×
UNCOV
769
            length: device_config.size() as u32,
×
770
            ..Default::default()
771
        };
772
        let entries = RwLock::new(
UNCOV
773
            (0..table_entries)
×
UNCOV
774
                .map(|_| MsixTableMmioEntry::Entry(MsixTableEntry::default()))
×
UNCOV
775
                .collect(),
×
776
        );
777
        let msix_table = Arc::new(MsixTableMmio { entries });
×
778
        let bar0_size = 16 << 10;
×
779
        let mut bar0 = MemRegion {
780
            ranges: vec![],
×
781
            entries: vec![MemRegionEntry {
×
782
                size: bar0_size,
783
                type_: mem::MemRegionType::Hidden,
784
            }],
UNCOV
785
            callbacks: Mutex::new(vec![]),
×
786
        };
787

788
        let mut caps: Vec<Box<(dyn PciCap)>> = vec![
×
UNCOV
789
            Box::new(MsixCapMmio {
×
UNCOV
790
                cap: RwLock::new(cap_msix),
×
791
            }),
792
            Box::new(cap_common),
×
793
            Box::new(cap_isr),
×
UNCOV
794
            Box::new(cap_notify),
×
795
        ];
796
        if device_config.size() > 0 {
×
797
            caps.push(Box::new(cap_device_config));
×
798
        }
799
        if let Some(region) = &dev.shared_mem_regions {
×
800
            let mut offset = 0;
×
UNCOV
801
            for (index, entry) in region.entries.iter().enumerate() {
×
802
                let share_mem_cap = VirtioPciCap64 {
803
                    cap: VirtioPciCap {
×
804
                        header: PciCapHdr {
805
                            id: PciCapId::Vendor as u8,
806
                            ..Default::default()
807
                        },
808
                        cap_len: size_of::<VirtioPciCap64>() as u8,
809
                        cfg_type: VirtioPciCfg::SharedMemory as u8,
810
                        bar: 2,
811
                        id: index as u8,
812
                        offset: offset as u32,
813
                        length: entry.size as u32,
814
                        ..Default::default()
815
                    },
UNCOV
816
                    length_hi: (entry.size >> 32) as u32,
×
UNCOV
817
                    offset_hi: (offset >> 32) as u32,
×
818
                };
819
                caps.push(Box::new(share_mem_cap));
×
820
                offset += entry.size;
×
821
            }
822
        }
823

UNCOV
824
        let cap_list = PciCapList::try_from(caps)?;
×
825

826
        let msix_vector = VirtioPciMsixVector {
827
            config: AtomicU16::new(VIRTIO_MSI_NO_VECTOR),
×
UNCOV
828
            queues: (0..num_queues)
×
829
                .map(|_| AtomicU16::new(VIRTIO_MSI_NO_VECTOR))
830
                .collect(),
831
        };
832

UNCOV
833
        let maybe_ioeventfds = (0..num_queues)
×
UNCOV
834
            .map(|_| ioeventfd_reg.create())
×
835
            .collect::<Result<Arc<_>, _>>();
836
        let ioeventfds = match maybe_ioeventfds {
×
837
            Ok(fds) => Some(fds),
×
UNCOV
838
            Err(e) => {
×
839
                log::warn!("{}: failed to create ioeventfds: {e:?}", dev.name);
×
840
                None
×
841
            }
842
        };
843

844
        let mut device_feature = [0u32; 4];
×
845
        for (i, v) in device_feature.iter_mut().enumerate() {
×
846
            *v = (dev.device_feature >> (i << 5)) as u32;
×
847
        }
UNCOV
848
        let registers = Arc::new(VirtioPciRegisterMmio {
×
UNCOV
849
            name: dev.name.clone(),
×
850
            reg: Register {
×
851
                device_feature,
×
852
                ..Default::default()
×
853
            },
854
            event_tx: dev.event_tx.clone(),
×
855
            waker: dev.waker.clone(),
×
856
            queues: dev.queue_regs.clone(),
×
857
            irq_sender: Arc::new(PciIrqSender {
×
858
                msix_vector,
×
UNCOV
859
                msix_table: msix_table.clone(),
×
860
                msi_sender,
×
861
            }),
862
            ioeventfds: ioeventfds.clone(),
×
863
        });
864
        bar0.ranges.push(MemRange::Emulated(msix_table));
×
865
        bar0.ranges
×
866
            .push(MemRange::Span((12 << 10) - msix_table_size as u64));
×
UNCOV
867
        bar0.ranges.push(MemRange::Emulated(registers.clone()));
×
868
        if let Some(ioeventfds) = ioeventfds {
×
UNCOV
869
            bar0.callbacks.lock().push(Box::new(IoeventFdCallback {
×
870
                registry: ioeventfd_reg,
×
871
                ioeventfds,
×
872
            }));
873
        }
874
        if device_config.size() > 0 {
×
875
            bar0.ranges.push(MemRange::Emulated(device_config))
×
876
        }
877
        let mut bars = [const { PciBar::Empty }; 6];
×
UNCOV
878
        let mut bar_masks = [0; 6];
×
UNCOV
879
        let bar0_mask = !(bar0_size - 1);
×
880
        bar_masks[0] = bar0_mask as u32;
×
881
        bars[0] = PciBar::Mem(Arc::new(bar0));
×
UNCOV
882
        header.bars[0] = BAR_MEM32;
×
883

884
        if let Some(region) = &dev.shared_mem_regions {
×
885
            let region_size = region.size();
×
886
            let bar2_mask = !(region_size.next_power_of_two() - 1);
×
887
            bar_masks[2] = bar2_mask as u32;
×
888
            let mut not_emulated = |r| !matches!(r, &MemRange::Emulated(_));
×
UNCOV
889
            let prefetchable = region.ranges.iter().all(&mut not_emulated);
×
890
            if prefetchable {
×
891
                bar_masks[3] = (bar2_mask >> 32) as u32;
×
892
                bars[2] = PciBar::Mem(region.clone());
×
893
                header.bars[2] = BAR_MEM64 | BAR_PREFETCHABLE;
×
894
            } else {
895
                assert!(region_size <= u32::MAX as u64);
×
896
                bars[2] = PciBar::Mem(region.clone());
×
897
                header.bars[2] = BAR_MEM32;
×
898
            }
899
        }
900

901
        let config = EmulatedConfig::new_device(header, bar_masks, bars, cap_list);
×
902

903
        Ok(VirtioPciDevice {
×
UNCOV
904
            dev,
×
UNCOV
905
            config,
×
UNCOV
906
            registers,
×
907
        })
908
    }
909
}
910

911
impl<M, E> Pci for VirtioPciDevice<M, E>
912
where
913
    M: MsiSender,
914
    E: IoeventFd,
915
{
UNCOV
916
    fn config(&self) -> &dyn PciConfig {
×
UNCOV
917
        &self.config
×
918
    }
919

UNCOV
920
    fn reset(&self) -> pci::Result<()> {
×
UNCOV
921
        self.registers.wake_up_dev(WakeEvent::Reset);
×
922
        self.registers.reset();
×
923
        self.registers.reg.status.store(0, Ordering::Release);
×
UNCOV
924
        Ok(())
×
925
    }
926
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc