• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 17119523075

21 Aug 2025 01:27AM UTC coverage: 11.963% (+1.6%) from 10.411%
17119523075

push

github

Lencerf
feat(virtio): support packed queue

Signed-off-by: Changyuan Lyu <changyuanl@google.com>

91 of 132 new or added lines in 5 files covered. (68.94%)

155 existing lines in 8 files now uncovered.

838 of 7005 relevant lines covered (11.96%)

17.32 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

71.77
/alioth/src/virtio/queue/split.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
#[path = "split_test.rs"]
16
#[cfg(test)]
17
mod tests;
18

19
use std::mem::size_of;
20
use std::sync::atomic::{Ordering, fence};
21

22
use alioth_macros::Layout;
23
use bitflags::bitflags;
24
use zerocopy::{FromBytes, Immutable, IntoBytes};
25

26
use crate::mem::mapped::Ram;
27
use crate::virtio::queue::private::VirtQueuePrivate;
28
use crate::virtio::queue::{DescChain, DescFlag, QueueReg, VirtQueue};
29
use crate::virtio::{Result, error};
30

31
#[repr(C, align(16))]
32
#[derive(Debug, Clone, Default, FromBytes, Immutable, IntoBytes)]
33
pub struct Desc {
34
    pub addr: u64,
35
    pub len: u32,
36
    pub flag: u16,
37
    pub next: u16,
38
}
39

40
bitflags! {
41
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
42
    pub struct AvailFlag: u16 {
43
        const NO_INTERRUPT = 1;
44
    }
45
}
46

47
#[repr(C, align(2))]
48
#[derive(Debug, Clone, Layout, Immutable, FromBytes, IntoBytes)]
49
pub struct AvailHeader {
50
    flags: u16,
51
    idx: u16,
52
}
53

54
bitflags! {
55
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
56
    pub struct UsedFlag: u16 {
57
        const NO_NOTIFY = 1;
58
    }
59
}
60

61
#[repr(C, align(4))]
62
#[derive(Debug, Clone, Layout)]
63
pub struct UsedHeader {
64
    flags: u16,
65
    idx: u16,
66
}
67

68
#[repr(C)]
69
#[derive(Debug, Clone, Default)]
70
pub struct UsedElem {
71
    id: u32,
72
    len: u32,
73
}
74

75
#[derive(Debug)]
76
pub struct SplitQueue<'r, 'm> {
77
    reg: &'r QueueReg,
78
    ram: &'m Ram,
79
    size: u16,
80
    avail_hdr: *mut AvailHeader,
81
    avail_ring: *mut u16,
82
    used_event: Option<*mut u16>,
83
    used_hdr: *mut UsedHeader,
84
    used_ring: *mut UsedElem,
85
    avail_event: Option<*mut u16>,
86
    used_index: u16,
87
    desc: *mut Desc,
88
}
89

90
type DescIov = (Vec<(u64, u64)>, Vec<(u64, u64)>);
91

92
impl<'m> SplitQueue<'_, 'm> {
93
    pub fn avail_index(&self) -> u16 {
125✔
94
        unsafe { &*self.avail_hdr }.idx
125✔
95
    }
96

97
    pub fn set_used_index(&self, val: u16) {
21✔
98
        unsafe { &mut *self.used_hdr }.idx = val;
21✔
99
    }
100

101
    pub fn used_event(&self) -> Option<u16> {
19✔
102
        self.used_event.map(|event| unsafe { *event })
43✔
103
    }
104

105
    pub fn set_avail_event(&self, op: impl FnOnce(&mut u16)) -> bool {
48✔
106
        match self.avail_event {
48✔
107
            Some(avail_event) => {
3✔
108
                op(unsafe { &mut *avail_event });
4✔
109
                true
3✔
110
            }
111
            None => false,
45✔
112
        }
113
    }
114

115
    pub fn set_flag_notification(&self, enabled: bool) {
45✔
116
        unsafe { &mut *self.used_hdr }.flags = (!enabled) as _;
89✔
117
    }
118

119
    pub fn flag_interrupt_enabled(&self) -> bool {
17✔
120
        unsafe { &*self.avail_hdr }.flags == 0
17✔
121
    }
122

123
    fn get_desc(&self, id: u16) -> Result<&Desc> {
41✔
124
        if id < self.size {
43✔
125
            Ok(unsafe { &*self.desc.offset(id as isize) })
86✔
126
        } else {
127
            error::InvalidDescriptor { id }.fail()
128
        }
129
    }
130

UNCOV
131
    fn get_indirect(
×
132
        &self,
133
        addr: u64,
134
        readable: &mut Vec<(u64, u64)>,
135
        writeable: &mut Vec<(u64, u64)>,
136
    ) -> Result<()> {
UNCOV
137
        let mut id = 0;
×
UNCOV
138
        loop {
×
UNCOV
139
            let desc: Desc = self.ram.read_t(addr + id * size_of::<Desc>() as u64)?;
×
140
            let flag = DescFlag::from_bits_retain(desc.flag);
×
UNCOV
141
            assert!(!flag.contains(DescFlag::INDIRECT));
×
UNCOV
142
            if flag.contains(DescFlag::WRITE) {
×
UNCOV
143
                writeable.push((desc.addr, desc.len as u64));
×
144
            } else {
UNCOV
145
                readable.push((desc.addr, desc.len as u64));
×
146
            }
147
            if flag.contains(DescFlag::NEXT) {
×
148
                id = desc.next as u64;
×
149
            } else {
150
                return Ok(());
×
151
            }
152
        }
153
    }
154

155
    pub fn get_desc_iov(&self, mut id: u16) -> Result<DescIov> {
33✔
156
        let mut readable = Vec::new();
65✔
157
        let mut writeable = Vec::new();
66✔
UNCOV
158
        loop {
1✔
159
            let desc = self.get_desc(id)?;
163✔
UNCOV
160
            let flag = DescFlag::from_bits_retain(desc.flag);
2✔
UNCOV
161
            if flag.contains(DescFlag::INDIRECT) {
2✔
UNCOV
162
                assert_eq!(desc.len & 0xf, 0);
×
UNCOV
163
                self.get_indirect(desc.addr, &mut readable, &mut writeable)?;
×
164
            } else if flag.contains(DescFlag::WRITE) {
60✔
165
                writeable.push((desc.addr, desc.len as u64));
50✔
166
            } else {
167
                readable.push((desc.addr, desc.len as u64));
26✔
168
            }
169
            if flag.contains(DescFlag::NEXT) {
52✔
170
                id = desc.next;
9✔
171
            } else {
172
                break;
32✔
173
            }
174
        }
UNCOV
175
        Ok((readable, writeable))
1✔
176
    }
177
}
178

179
impl<'r, 'm> SplitQueue<'r, 'm> {
180
    pub fn new(
13✔
181
        reg: &'r QueueReg,
182
        ram: &'m Ram,
183
        event_idx: bool,
184
    ) -> Result<Option<SplitQueue<'r, 'm>>> {
185
        if !reg.enabled.load(Ordering::Acquire) {
25✔
186
            return Ok(None);
3✔
187
        }
188
        let size = reg.size.load(Ordering::Acquire) as u64;
31✔
189
        let mut avail_event = None;
21✔
190
        let mut used_event = None;
21✔
191
        let used = reg.device.load(Ordering::Acquire);
41✔
192
        let avail = reg.driver.load(Ordering::Acquire);
41✔
193
        if event_idx {
12✔
194
            let avail_event_gpa =
3✔
195
                used + size_of::<UsedHeader>() as u64 + size * size_of::<UsedElem>() as u64;
4✔
196
            avail_event = Some(ram.get_ptr(avail_event_gpa)?);
7✔
197
            let used_event_gpa =
3✔
198
                avail + size_of::<AvailHeader>() as u64 + size * size_of::<u16>() as u64;
×
UNCOV
199
            used_event = Some(ram.get_ptr(used_event_gpa)?);
1✔
200
        }
201
        let used_hdr = ram.get_ptr::<UsedHeader>(used)?;
21✔
UNCOV
202
        let used_index = unsafe { &*used_hdr }.idx;
1✔
UNCOV
203
        let avail_ring_gpa = avail + size_of::<AvailHeader>() as u64;
2✔
UNCOV
204
        let used_ring_gpa = used + size_of::<UsedHeader>() as u64;
2✔
UNCOV
205
        let desc = reg.desc.load(Ordering::Acquire);
1✔
UNCOV
206
        Ok(Some(SplitQueue {
2✔
UNCOV
207
            reg,
×
UNCOV
208
            ram,
×
UNCOV
209
            size: size as u16,
×
UNCOV
210
            avail_hdr: ram.get_ptr(avail)?,
1✔
211
            avail_ring: ram.get_ptr(avail_ring_gpa)?,
11✔
212
            used_event,
11✔
UNCOV
213
            used_hdr,
×
UNCOV
214
            used_ring: ram.get_ptr(used_ring_gpa)?,
1✔
215
            avail_event,
11✔
UNCOV
216
            used_index,
×
UNCOV
217
            desc: ram.get_ptr(desc)?,
1✔
218
        }))
219
    }
220
}
221

222
impl<'m> VirtQueuePrivate<'m> for SplitQueue<'_, 'm> {
223
    type Index = u16;
224

225
    const INIT_INDEX: u16 = 0;
226

227
    fn desc_avail(&self, index: u16) -> bool {
99✔
228
        let avail_index = self.avail_index();
295✔
229
        index < avail_index || index - avail_index >= !(self.size - 1)
137✔
230
    }
231

232
    fn get_desc_chain(&self, index: u16) -> Result<Option<DescChain<'m>>> {
47✔
233
        if !self.desc_avail(index) {
93✔
234
            return Ok(None);
15✔
235
        }
236
        let wrapped_index = index & (self.size - 1);
66✔
237
        let desc_id = unsafe { *self.avail_ring.offset(wrapped_index as isize) };
98✔
238
        let (readable, writable) = self.get_desc_iov(desc_id)?;
129✔
239
        let readable = self.ram.translate_iov(&readable)?;
35✔
240
        let writable = self.ram.translate_iov_mut(&writable)?;
36✔
241
        Ok(Some(DescChain {
1✔
242
            id: desc_id,
×
UNCOV
243
            index,
×
NEW
244
            count: 1,
×
UNCOV
245
            readable,
1✔
UNCOV
246
            writable,
×
247
        }))
248
    }
249

250
    fn push_used(&mut self, chain: DescChain, len: u32) {
21✔
251
        let used_elem = UsedElem {
252
            id: chain.id as u32,
21✔
253
            len,
254
        };
255
        let wrapped_index = self.used_index & (self.size - 1);
41✔
256
        unsafe { *self.used_ring.offset(wrapped_index as isize) = used_elem };
62✔
257
        fence(Ordering::SeqCst);
41✔
258
        self.used_index = self.used_index.wrapping_add(1);
21✔
259
        self.set_used_index(self.used_index);
61✔
260
    }
261

262
    fn enable_notification(&self, enabled: bool) {
45✔
263
        if !self.set_avail_event(|event| {
89✔
UNCOV
264
            let mut avail_index = self.avail_index();
×
265
            if enabled {
×
266
                loop {
×
UNCOV
267
                    *event = avail_index;
×
UNCOV
268
                    fence(Ordering::SeqCst);
×
269
                    let new_avail_index = self.avail_index();
×
270
                    if new_avail_index == avail_index {
×
271
                        break;
×
272
                    } else {
273
                        avail_index = new_avail_index;
×
274
                    }
275
                }
276
            } else {
277
                *event = avail_index.wrapping_sub(1);
×
278
            }
279
        }) {
280
            self.set_flag_notification(enabled);
45✔
281
        }
282
    }
283

284
    fn interrupt_enabled(&self, _: u16) -> bool {
17✔
285
        match self.used_event() {
17✔
UNCOV
286
            Some(used_event) => used_event == self.used_index.wrapping_sub(1),
×
287
            None => self.flag_interrupt_enabled(),
17✔
288
        }
289
    }
290

291
    fn next_index(&self, chain: &DescChain) -> u16 {
29✔
292
        chain.index.wrapping_add(1)
57✔
293
    }
294
}
295

296
impl<'m> VirtQueue<'m> for SplitQueue<'_, 'm> {
297
    fn reg(&self) -> &QueueReg {
5✔
298
        self.reg
5✔
299
    }
300
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc