• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 17114644709

21 Aug 2025 01:34AM UTC coverage: 10.411%. Remained the same
17114644709

Pull #273

github

web-flow
Merge 13942e21e into 7925c9625
Pull Request #273: feat: virtio packed queue

26 of 57 new or added lines in 9 files covered. (45.61%)

2 existing lines in 2 files now uncovered.

714 of 6858 relevant lines covered (10.41%)

16.1 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

68.09
/alioth/src/virtio/queue/split.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
#[path = "split_test.rs"]
16
#[cfg(test)]
17
mod tests;
18

19
use std::mem::size_of;
20
use std::sync::atomic::{Ordering, fence};
21

22
use alioth_macros::Layout;
23
use bitflags::bitflags;
24
use zerocopy::{FromBytes, Immutable, IntoBytes};
25

26
use crate::mem::mapped::Ram;
27
use crate::virtio::queue::{DescChain, QueueReg, VirtQueue};
28
use crate::virtio::{Result, VirtioFeature, error};
29

30
#[repr(C, align(16))]
31
#[derive(Debug, Clone, Default, FromBytes, Immutable, IntoBytes)]
32
pub struct Desc {
33
    pub addr: u64,
34
    pub len: u32,
35
    pub flag: u16,
36
    pub next: u16,
37
}
38

39
bitflags! {
16✔
40
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
41
    pub struct DescFlag: u16 {
42
        const NEXT = 1;
43
        const WRITE = 2;
44
        const INDIRECT = 4;
45
    }
46
}
47

48
bitflags! {
49
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
50
    pub struct AvailFlag: u16 {
51
        const NO_INTERRUPT = 1;
52
    }
53
}
54

55
#[repr(C, align(2))]
56
#[derive(Debug, Clone, Layout, Immutable, FromBytes, IntoBytes)]
57
pub struct AvailHeader {
58
    flags: u16,
59
    idx: u16,
60
}
61

62
bitflags! {
63
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
64
    pub struct UsedFlag: u16 {
65
        const NO_NOTIFY = 1;
66
    }
67
}
68

69
#[repr(C, align(4))]
70
#[derive(Debug, Clone, Layout)]
71
pub struct UsedHeader {
72
    flags: u16,
73
    idx: u16,
74
}
75

76
#[repr(C)]
77
#[derive(Debug, Clone, Default)]
78
pub struct UsedElem {
79
    id: u32,
80
    len: u32,
81
}
82

83
#[derive(Debug)]
84
pub struct SplitQueue<'r, 'm> {
85
    reg: &'r QueueReg,
86

87
    ram: &'m Ram,
88

89
    size: u16,
90

91
    avail_hdr: *mut AvailHeader,
92
    avail_ring: *mut u16,
93
    used_event: Option<*mut u16>,
94

95
    used_hdr: *mut UsedHeader,
96
    used_ring: *mut UsedElem,
97
    avail_event: Option<*mut u16>,
98
    used_index: u16,
99

100
    desc: *mut Desc,
101
}
102

103
type DescIov = (Vec<(u64, u64)>, Vec<(u64, u64)>);
104

105
impl<'m> SplitQueue<'_, 'm> {
106
    pub fn avail_index(&self) -> u16 {
111✔
107
        unsafe { &*self.avail_hdr }.idx
111✔
108
    }
109

110
    pub fn set_used_index(&self) {
17✔
111
        unsafe { &mut *self.used_hdr }.idx = self.used_index
17✔
112
    }
113

114
    pub fn used_event(&self) -> Option<u16> {
15✔
115
        self.used_event.map(|event| unsafe { *event })
35✔
116
    }
117

118
    pub fn set_avail_event(&self, index: u16) -> Option<()> {
3✔
119
        match self.avail_event {
3✔
120
            Some(avail_event) => {
3✔
121
                unsafe { *avail_event = index };
4✔
122
                Some(())
3✔
123
            }
124
            None => None,
×
125
        }
126
    }
127

128
    pub fn set_flag_notification(&self, enabled: bool) {
41✔
129
        unsafe { &mut *self.used_hdr }.flags = (!enabled) as _;
82✔
130
    }
131

132
    pub fn flag_interrupt_enabled(&self) -> bool {
13✔
133
        unsafe { &*self.avail_hdr }.flags == 0
13✔
134
    }
135

136
    pub fn read_avail(&self, index: u16) -> u16 {
31✔
137
        let wrapped_index = index & (self.size - 1);
61✔
138
        unsafe { *self.avail_ring.offset(wrapped_index as isize) }
63✔
139
    }
140

141
    pub fn get_desc(&self, id: u16) -> Result<&Desc> {
36✔
142
        if id < self.size {
36✔
143
            Ok(unsafe { &*self.desc.offset(id as isize) })
73✔
144
        } else {
145
            error::InvalidDescriptor { id }.fail()
146
        }
147
    }
148

149
    fn get_indirect(
×
150
        &self,
151
        addr: u64,
152
        readable: &mut Vec<(u64, u64)>,
153
        writeable: &mut Vec<(u64, u64)>,
154
    ) -> Result<()> {
155
        let mut id = 0;
×
156
        loop {
×
157
            let desc: Desc = self.ram.read_t(addr + id * size_of::<Desc>() as u64)?;
×
158
            let flag = DescFlag::from_bits_retain(desc.flag);
×
159
            assert!(!flag.contains(DescFlag::INDIRECT));
×
160
            if flag.contains(DescFlag::WRITE) {
×
161
                writeable.push((desc.addr, desc.len as u64));
×
162
            } else {
163
                readable.push((desc.addr, desc.len as u64));
×
164
            }
165
            if flag.contains(DescFlag::NEXT) {
×
166
                id = desc.next as u64;
×
167
            } else {
168
                return Ok(());
×
169
            }
170
        }
171
    }
172

173
    pub fn get_desc_iov(&self, mut id: u16) -> Result<DescIov> {
31✔
174
        let mut readable = Vec::new();
58✔
175
        let mut writeable = Vec::new();
59✔
176
        loop {
1✔
177
            let desc = self.get_desc(id)?;
139✔
178
            let flag = DescFlag::from_bits_retain(desc.flag);
1✔
179
            if flag.contains(DescFlag::INDIRECT) {
1✔
180
                assert_eq!(desc.len & 0xf, 0);
×
181
                self.get_indirect(desc.addr, &mut readable, &mut writeable)?;
×
182
            } else if flag.contains(DescFlag::WRITE) {
53✔
183
                writeable.push((desc.addr, desc.len as u64));
50✔
184
            } else {
185
                readable.push((desc.addr, desc.len as u64));
22✔
186
            }
187
            if flag.contains(DescFlag::NEXT) {
43✔
188
                id = desc.next;
7✔
189
            } else {
190
                break;
28✔
191
            }
192
        }
193
        Ok((readable, writeable))
1✔
194
    }
195

196
    fn get_next_desc_chain(&self) -> Result<Option<DescChain<'m>>> {
43✔
197
        if self.used_index == self.avail_index() {
83✔
198
            return Ok(None);
14✔
199
        }
200
        let desc_id = self.read_avail(self.used_index);
3✔
201
        let (readable, writable) = self.get_desc_iov(desc_id)?;
31✔
202
        let readable = self.ram.translate_iov(&readable)?;
30✔
203
        let writable = self.ram.translate_iov_mut(&writable)?;
30✔
NEW
204
        Ok(Some(DescChain {
1✔
205
            id: desc_id,
×
206
            readable,
1✔
207
            writable,
×
208
        }))
209
    }
210
}
211

212
impl<'r, 'm> SplitQueue<'r, 'm> {
213
    pub fn new(
11✔
214
        reg: &'r QueueReg,
215
        ram: &'m Ram,
216
        feature: u128,
217
    ) -> Result<Option<SplitQueue<'r, 'm>>> {
218
        if !reg.enabled.load(Ordering::Acquire) {
21✔
219
            return Ok(None);
3✔
220
        }
221
        let size = reg.size.load(Ordering::Acquire) as u64;
25✔
222
        let mut avail_event = None;
18✔
223
        let mut used_event = None;
18✔
224
        let feature = VirtioFeature::from_bits_retain(feature);
26✔
225
        let used = reg.device.load(Ordering::Acquire);
34✔
226
        let avail = reg.driver.load(Ordering::Acquire);
34✔
227
        if feature.contains(VirtioFeature::EVENT_IDX) {
19✔
228
            let avail_event_gpa =
3✔
229
                used + size_of::<UsedHeader>() as u64 + size * size_of::<UsedElem>() as u64;
4✔
230
            avail_event = Some(ram.get_ptr(avail_event_gpa)?);
7✔
231
            let used_event_gpa =
3✔
232
                avail + size_of::<AvailHeader>() as u64 + size * size_of::<u16>() as u64;
×
233
            used_event = Some(ram.get_ptr(used_event_gpa)?);
1✔
234
        }
235
        let used_hdr = ram.get_ptr::<UsedHeader>(used)?;
17✔
236
        let used_index = unsafe { &*used_hdr }.idx;
1✔
237
        let avail_ring_gpa = avail + size_of::<AvailHeader>() as u64;
2✔
238
        let used_ring_gpa = used + size_of::<UsedHeader>() as u64;
2✔
239
        let desc = reg.desc.load(Ordering::Acquire);
1✔
240
        Ok(Some(SplitQueue {
3✔
241
            reg,
×
242
            ram,
×
243
            size: size as u16,
×
244
            avail_hdr: ram.get_ptr(avail)?,
1✔
245
            avail_ring: ram.get_ptr(avail_ring_gpa)?,
10✔
246
            used_event,
9✔
247
            used_hdr,
×
248
            used_ring: ram.get_ptr(used_ring_gpa)?,
2✔
249
            avail_event,
9✔
250
            used_index,
×
251
            desc: ram.get_ptr(desc)?,
2✔
252
        }))
253
    }
254
}
255

256
impl<'m> VirtQueue<'m> for SplitQueue<'_, 'm> {
257
    fn reg(&self) -> &QueueReg {
3✔
258
        self.reg
3✔
259
    }
260

261
    fn size(&self) -> u16 {
3✔
262
        self.size
3✔
263
    }
264

265
    fn next_desc_chain(&self) -> Option<Result<DescChain<'m>>> {
43✔
266
        self.get_next_desc_chain().transpose()
123✔
267
    }
268

269
    fn has_next_desc(&self) -> bool {
49✔
270
        self.used_index != self.avail_index()
97✔
271
    }
272

273
    fn avail_index(&self) -> u16 {
×
274
        self.avail_index()
×
275
    }
276

NEW
277
    fn get_desc_chain(&self, index: u16) -> Result<DescChain<'m>> {
×
278
        let desc_id = self.read_avail(index);
×
279
        let (readable, writable) = self.get_desc_iov(desc_id)?;
×
280
        let readable = self.ram.translate_iov(&readable)?;
×
281
        let writable = self.ram.translate_iov_mut(&writable)?;
×
NEW
282
        Ok(DescChain {
×
283
            id: desc_id,
×
284
            readable,
×
285
            writable,
×
286
        })
287
    }
288

289
    fn push_used(&mut self, chain: DescChain, len: usize) -> u16 {
17✔
290
        let used_index = self.used_index;
33✔
291
        let used_elem = UsedElem {
292
            id: chain.id as u32,
17✔
293
            len: len as u32,
17✔
294
        };
295
        let wrapped_index = used_index & (self.size - 1);
33✔
296
        unsafe { *self.used_ring.offset(wrapped_index as isize) = used_elem };
50✔
297
        fence(Ordering::SeqCst);
33✔
298
        self.used_index = used_index.wrapping_add(1);
17✔
299
        self.set_used_index();
33✔
300
        used_index
16✔
301
    }
302

303
    fn enable_notification(&self, enabled: bool) {
41✔
304
        if self.avail_event.is_some() {
81✔
305
            let mut avail_index = self.avail_index();
×
306
            if enabled {
×
307
                loop {
×
308
                    self.set_avail_event(avail_index);
×
309
                    fence(Ordering::SeqCst);
×
310
                    let new_avail_index = self.avail_index();
×
311
                    if new_avail_index == avail_index {
×
312
                        break;
×
313
                    } else {
314
                        avail_index = new_avail_index;
×
315
                    }
316
                }
317
            } else {
318
                self.set_avail_event(avail_index.wrapping_sub(1));
×
319
            }
320
        } else {
321
            self.set_flag_notification(enabled);
41✔
322
        }
323
    }
324

325
    fn interrupt_enabled(&self) -> bool {
13✔
326
        match self.used_event() {
13✔
327
            Some(used_event) => used_event == self.used_index.wrapping_sub(1),
×
328
            None => self.flag_interrupt_enabled(),
13✔
329
        }
330
    }
331
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc