• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 17114712255

21 Aug 2025 01:38AM UTC coverage: 10.46% (+0.05%) from 10.411%
17114712255

Pull #273

github

web-flow
Merge 46cfac353 into 7925c9625
Pull Request #273: feat: virtio packed queue

65 of 149 new or added lines in 10 files covered. (43.62%)

70 existing lines in 3 files now uncovered.

719 of 6874 relevant lines covered (10.46%)

16.14 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

72.58
/alioth/src/virtio/queue/split.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
#[path = "split_test.rs"]
16
#[cfg(test)]
17
mod tests;
18

19
use std::mem::size_of;
20
use std::sync::atomic::{Ordering, fence};
21

22
use alioth_macros::Layout;
23
use bitflags::bitflags;
24
use zerocopy::{FromBytes, Immutable, IntoBytes};
25

26
use crate::mem::mapped::Ram;
27
use crate::virtio::queue::private::VirtQueuePrivate;
28
use crate::virtio::queue::{DescChain, QueueReg, VirtQueue};
29
use crate::virtio::{Result, error};
30

31
#[repr(C, align(16))]
32
#[derive(Debug, Clone, Default, FromBytes, Immutable, IntoBytes)]
33
pub struct Desc {
34
    pub addr: u64,
35
    pub len: u32,
36
    pub flag: u16,
37
    pub next: u16,
38
}
39

40
bitflags! {
10✔
41
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
42
    pub struct DescFlag: u16 {
43
        const NEXT = 1;
44
        const WRITE = 2;
45
        const INDIRECT = 4;
46
    }
47
}
48

49
bitflags! {
50
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
51
    pub struct AvailFlag: u16 {
52
        const NO_INTERRUPT = 1;
53
    }
54
}
55

56
#[repr(C, align(2))]
57
#[derive(Debug, Clone, Layout, Immutable, FromBytes, IntoBytes)]
58
pub struct AvailHeader {
59
    flags: u16,
60
    idx: u16,
61
}
62

63
bitflags! {
64
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
65
    pub struct UsedFlag: u16 {
66
        const NO_NOTIFY = 1;
67
    }
68
}
69

70
#[repr(C, align(4))]
71
#[derive(Debug, Clone, Layout)]
72
pub struct UsedHeader {
73
    flags: u16,
74
    idx: u16,
75
}
76

77
#[repr(C)]
78
#[derive(Debug, Clone, Default)]
79
pub struct UsedElem {
80
    id: u32,
81
    len: u32,
82
}
83

84
#[derive(Debug)]
85
pub struct SplitQueue<'r, 'm> {
86
    reg: &'r QueueReg,
87
    ram: &'m Ram,
88
    size: u16,
89
    avail_hdr: *mut AvailHeader,
90
    avail_ring: *mut u16,
91
    used_event: Option<*mut u16>,
92
    used_hdr: *mut UsedHeader,
93
    used_ring: *mut UsedElem,
94
    avail_event: Option<*mut u16>,
95
    used_index: u16,
96
    desc: *mut Desc,
97
}
98

99
type DescIov = (Vec<(u64, u64)>, Vec<(u64, u64)>);
100

101
impl<'m> SplitQueue<'_, 'm> {
102
    pub fn avail_index(&self) -> u16 {
111✔
103
        unsafe { &*self.avail_hdr }.idx
111✔
104
    }
105

106
    pub fn set_used_index(&self, val: u16) {
17✔
107
        unsafe { &mut *self.used_hdr }.idx = val;
17✔
108
    }
109

110
    pub fn used_event(&self) -> Option<u16> {
15✔
111
        self.used_event.map(|event| unsafe { *event })
35✔
112
    }
113

114
    pub fn set_avail_event(&self, op: impl FnOnce(&mut u16)) -> bool {
48✔
115
        match self.avail_event {
48✔
116
            Some(avail_event) => {
3✔
117
                op(unsafe { &mut *avail_event });
4✔
118
                true
3✔
119
            }
120
            None => false,
45✔
121
        }
122
    }
123

124
    pub fn set_flag_notification(&self, enabled: bool) {
45✔
125
        unsafe { &mut *self.used_hdr }.flags = (!enabled) as _;
89✔
126
    }
127

128
    pub fn flag_interrupt_enabled(&self) -> bool {
14✔
129
        unsafe { &*self.avail_hdr }.flags == 0
14✔
130
    }
131

132
    fn get_desc(&self, id: u16) -> Result<&Desc> {
35✔
133
        if id < self.size {
38✔
134
            Ok(unsafe { &*self.desc.offset(id as isize) })
72✔
135
        } else {
136
            error::InvalidDescriptor { id }.fail()
137
        }
138
    }
139

140
    fn get_indirect(
×
141
        &self,
142
        addr: u64,
143
        readable: &mut Vec<(u64, u64)>,
144
        writeable: &mut Vec<(u64, u64)>,
145
    ) -> Result<()> {
146
        let mut id = 0;
×
147
        loop {
×
148
            let desc: Desc = self.ram.read_t(addr + id * size_of::<Desc>() as u64)?;
×
149
            let flag = DescFlag::from_bits_retain(desc.flag);
×
150
            assert!(!flag.contains(DescFlag::INDIRECT));
×
151
            if flag.contains(DescFlag::WRITE) {
×
152
                writeable.push((desc.addr, desc.len as u64));
×
153
            } else {
154
                readable.push((desc.addr, desc.len as u64));
×
155
            }
156
            if flag.contains(DescFlag::NEXT) {
×
157
                id = desc.next as u64;
×
158
            } else {
159
                return Ok(());
×
160
            }
161
        }
162
    }
163

164
    pub fn get_desc_iov(&self, mut id: u16) -> Result<DescIov> {
30✔
165
        let mut readable = Vec::new();
57✔
166
        let mut writeable = Vec::new();
59✔
167
        loop {
1✔
168
            let desc = self.get_desc(id)?;
139✔
169
            let flag = DescFlag::from_bits_retain(desc.flag);
2✔
170
            if flag.contains(DescFlag::INDIRECT) {
2✔
171
                assert_eq!(desc.len & 0xf, 0);
×
172
                self.get_indirect(desc.addr, &mut readable, &mut writeable)?;
×
173
            } else if flag.contains(DescFlag::WRITE) {
53✔
174
                writeable.push((desc.addr, desc.len as u64));
50✔
175
            } else {
176
                readable.push((desc.addr, desc.len as u64));
21✔
177
            }
178
            if flag.contains(DescFlag::NEXT) {
43✔
179
                id = desc.next;
7✔
180
            } else {
181
                break;
28✔
182
            }
183
        }
184
        Ok((readable, writeable))
1✔
185
    }
186
}
187

188
impl<'r, 'm> SplitQueue<'r, 'm> {
189
    pub fn new(
12✔
190
        reg: &'r QueueReg,
191
        ram: &'m Ram,
192
        event_idx: bool,
193
    ) -> Result<Option<SplitQueue<'r, 'm>>> {
194
        if !reg.enabled.load(Ordering::Acquire) {
21✔
195
            return Ok(None);
3✔
196
        }
197
        let size = reg.size.load(Ordering::Acquire) as u64;
26✔
198
        let mut avail_event = None;
18✔
199
        let mut used_event = None;
18✔
200
        let used = reg.device.load(Ordering::Acquire);
34✔
201
        let avail = reg.driver.load(Ordering::Acquire);
34✔
202
        if event_idx {
11✔
203
            let avail_event_gpa =
3✔
204
                used + size_of::<UsedHeader>() as u64 + size * size_of::<UsedElem>() as u64;
4✔
205
            avail_event = Some(ram.get_ptr(avail_event_gpa)?);
7✔
206
            let used_event_gpa =
3✔
207
                avail + size_of::<AvailHeader>() as u64 + size * size_of::<u16>() as u64;
×
208
            used_event = Some(ram.get_ptr(used_event_gpa)?);
1✔
209
        }
210
        let used_hdr = ram.get_ptr::<UsedHeader>(used)?;
17✔
211
        let used_index = unsafe { &*used_hdr }.idx;
1✔
212
        let avail_ring_gpa = avail + size_of::<AvailHeader>() as u64;
2✔
213
        let used_ring_gpa = used + size_of::<UsedHeader>() as u64;
2✔
214
        let desc = reg.desc.load(Ordering::Acquire);
1✔
215
        Ok(Some(SplitQueue {
1✔
216
            reg,
×
217
            ram,
×
218
            size: size as u16,
×
219
            avail_hdr: ram.get_ptr(avail)?,
1✔
220
            avail_ring: ram.get_ptr(avail_ring_gpa)?,
9✔
221
            used_event,
10✔
222
            used_hdr,
×
223
            used_ring: ram.get_ptr(used_ring_gpa)?,
1✔
224
            avail_event,
10✔
225
            used_index,
×
226
            desc: ram.get_ptr(desc)?,
1✔
227
        }))
228
    }
229
}
230

231
impl<'m> VirtQueuePrivate<'m> for SplitQueue<'_, 'm> {
232
    fn desc_avail(&self, index: u16) -> bool {
89✔
233
        let avail_index = self.avail_index();
265✔
234
        index < avail_index || index - avail_index >= !(self.size - 1)
123✔
235
    }
236

237
    fn get_desc_chain(&self, index: u16) -> Result<Option<DescChain<'m>>> {
41✔
238
        if !self.desc_avail(index) {
81✔
239
            return Ok(None);
14✔
240
        }
241
        let wrapped_index = index & (self.size - 1);
59✔
242
        let desc_id = unsafe { *self.avail_ring.offset(wrapped_index as isize) };
87✔
243
        let (readable, writable) = self.get_desc_iov(desc_id)?;
113✔
244
        let readable = self.ram.translate_iov(&readable)?;
30✔
245
        let writable = self.ram.translate_iov_mut(&writable)?;
30✔
NEW
246
        Ok(Some(DescChain {
3✔
247
            id: desc_id,
×
NEW
248
            index,
×
249
            readable,
2✔
250
            writable,
×
251
        }))
252
    }
253

254
    fn push_used(&mut self, chain: DescChain, len: u32) {
17✔
255
        let used_elem = UsedElem {
256
            id: chain.id as u32,
17✔
257
            len,
258
        };
259
        let wrapped_index = self.used_index & (self.size - 1);
33✔
260
        unsafe { *self.used_ring.offset(wrapped_index as isize) = used_elem };
50✔
261
        fence(Ordering::SeqCst);
33✔
262
        self.used_index = self.used_index.wrapping_add(1);
17✔
263
        self.set_used_index(self.used_index);
49✔
264
    }
265

266
    fn enable_notification(&self, enabled: bool) {
45✔
267
        if !self.set_avail_event(|event| {
89✔
268
            let mut avail_index = self.avail_index();
×
269
            if enabled {
×
270
                loop {
×
NEW
271
                    *event = avail_index;
×
272
                    fence(Ordering::SeqCst);
×
273
                    let new_avail_index = self.avail_index();
×
274
                    if new_avail_index == avail_index {
×
275
                        break;
×
276
                    } else {
277
                        avail_index = new_avail_index;
×
278
                    }
279
                }
280
            } else {
NEW
281
                *event = avail_index.wrapping_sub(1);
×
282
            }
283
        }) {
284
            self.set_flag_notification(enabled);
45✔
285
        }
286
    }
287

288
    fn interrupt_enabled(&self) -> bool {
14✔
289
        match self.used_event() {
14✔
290
            Some(used_event) => used_event == self.used_index.wrapping_sub(1),
×
291
            None => self.flag_interrupt_enabled(),
14✔
292
        }
293
    }
294

295
    fn next_index(&self, chain: &DescChain) -> u16 {
26✔
296
        chain.index.wrapping_add(1)
50✔
297
    }
298
}
299

300
impl<'m> VirtQueue<'m> for SplitQueue<'_, 'm> {
301
    fn reg(&self) -> &QueueReg {
5✔
302
        self.reg
5✔
303
    }
304
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc