• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 17114607742

21 Aug 2025 01:31AM UTC coverage: 10.411%. Remained the same
17114607742

Pull #273

github

web-flow
Merge 897d3fdbf into 7925c9625
Pull Request #273: feat: virtio packed queue

30 of 57 new or added lines in 9 files covered. (52.63%)

50 existing lines in 4 files now uncovered.

714 of 6858 relevant lines covered (10.41%)

16.1 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

68.09
/alioth/src/virtio/queue/split.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
#[path = "split_test.rs"]
16
#[cfg(test)]
17
mod tests;
18

19
use std::mem::size_of;
20
use std::sync::atomic::{Ordering, fence};
21

22
use alioth_macros::Layout;
23
use bitflags::bitflags;
24
use zerocopy::{FromBytes, Immutable, IntoBytes};
25

26
use crate::mem::mapped::Ram;
27
use crate::virtio::queue::{Descriptor, Queue, VirtQueue};
28
use crate::virtio::{Result, VirtioFeature, error};
29

30
#[repr(C, align(16))]
31
#[derive(Debug, Clone, Default, FromBytes, Immutable, IntoBytes)]
32
pub struct Desc {
33
    pub addr: u64,
34
    pub len: u32,
35
    pub flag: u16,
36
    pub next: u16,
37
}
38

39
bitflags! {
14✔
40
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
41
    pub struct DescFlag: u16 {
42
        const NEXT = 1;
43
        const WRITE = 2;
44
        const INDIRECT = 4;
45
    }
46
}
47

48
bitflags! {
49
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
50
    pub struct AvailFlag: u16 {
51
        const NO_INTERRUPT = 1;
52
    }
53
}
54

55
#[repr(C, align(2))]
56
#[derive(Debug, Clone, Layout, Immutable, FromBytes, IntoBytes)]
57
pub struct AvailHeader {
58
    flags: u16,
59
    idx: u16,
60
}
61

62
bitflags! {
63
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
64
    pub struct UsedFlag: u16 {
65
        const NO_NOTIFY = 1;
66
    }
67
}
68

69
#[repr(C, align(4))]
70
#[derive(Debug, Clone, Layout)]
71
pub struct UsedHeader {
72
    flags: u16,
73
    idx: u16,
74
}
75

76
#[repr(C)]
77
#[derive(Debug, Clone, Default)]
78
pub struct UsedElem {
79
    id: u32,
80
    len: u32,
81
}
82

83
#[derive(Debug)]
84
pub struct SplitQueue<'q, 'm> {
85
    reg: &'q Queue,
86

87
    ram: &'m Ram,
88

89
    size: u16,
90

91
    avail_hdr: *mut AvailHeader,
92
    avail_ring: *mut u16,
93
    used_event: Option<*mut u16>,
94

95
    used_hdr: *mut UsedHeader,
96
    used_ring: *mut UsedElem,
97
    avail_event: Option<*mut u16>,
98
    used_index: u16,
99

100
    desc: *mut Desc,
101
}
102

103
type DescIov = (Vec<(u64, u64)>, Vec<(u64, u64)>);
104

105
impl<'m> SplitQueue<'_, 'm> {
106
    pub fn avail_index(&self) -> u16 {
111✔
107
        unsafe { &*self.avail_hdr }.idx
111✔
108
    }
109

110
    pub fn set_used_index(&self) {
17✔
111
        unsafe { &mut *self.used_hdr }.idx = self.used_index
17✔
112
    }
113

114
    pub fn used_event(&self) -> Option<u16> {
15✔
115
        self.used_event.map(|event| unsafe { *event })
35✔
116
    }
117

118
    pub fn set_avail_event(&self, index: u16) -> Option<()> {
3✔
119
        match self.avail_event {
3✔
120
            Some(avail_event) => {
3✔
121
                unsafe { *avail_event = index };
4✔
122
                Some(())
3✔
123
            }
124
            None => None,
×
125
        }
126
    }
127

128
    pub fn set_flag_notification(&self, enabled: bool) {
41✔
129
        unsafe { &mut *self.used_hdr }.flags = (!enabled) as _;
81✔
130
    }
131

132
    pub fn flag_interrupt_enabled(&self) -> bool {
13✔
133
        unsafe { &*self.avail_hdr }.flags == 0
13✔
134
    }
135

136
    pub fn read_avail(&self, index: u16) -> u16 {
31✔
137
        let wrapped_index = index & (self.size - 1);
61✔
138
        unsafe { *self.avail_ring.offset(wrapped_index as isize) }
63✔
139
    }
140

141
    pub fn get_desc(&self, id: u16) -> Result<&Desc> {
35✔
142
        if id < self.size {
38✔
143
            Ok(unsafe { &*self.desc.offset(id as isize) })
72✔
144
        } else {
145
            error::InvalidDescriptor { id }.fail()
146
        }
147
    }
148

149
    fn get_indirect(
×
150
        &self,
151
        addr: u64,
152
        readable: &mut Vec<(u64, u64)>,
153
        writeable: &mut Vec<(u64, u64)>,
154
    ) -> Result<()> {
155
        let mut id = 0;
×
156
        loop {
×
157
            let desc: Desc = self.ram.read_t(addr + id * size_of::<Desc>() as u64)?;
×
158
            let flag = DescFlag::from_bits_retain(desc.flag);
×
159
            assert!(!flag.contains(DescFlag::INDIRECT));
×
160
            if flag.contains(DescFlag::WRITE) {
×
161
                writeable.push((desc.addr, desc.len as u64));
×
162
            } else {
163
                readable.push((desc.addr, desc.len as u64));
×
164
            }
165
            if flag.contains(DescFlag::NEXT) {
×
166
                id = desc.next as u64;
×
167
            } else {
168
                return Ok(());
×
169
            }
170
        }
171
    }
172

173
    pub fn get_desc_iov(&self, mut id: u16) -> Result<DescIov> {
30✔
174
        let mut readable = Vec::new();
57✔
175
        let mut writeable = Vec::new();
59✔
176
        loop {
1✔
177
            let desc = self.get_desc(id)?;
139✔
178
            let flag = DescFlag::from_bits_retain(desc.flag);
2✔
179
            if flag.contains(DescFlag::INDIRECT) {
2✔
180
                assert_eq!(desc.len & 0xf, 0);
×
181
                self.get_indirect(desc.addr, &mut readable, &mut writeable)?;
×
182
            } else if flag.contains(DescFlag::WRITE) {
52✔
183
                writeable.push((desc.addr, desc.len as u64));
50✔
184
            } else {
185
                readable.push((desc.addr, desc.len as u64));
22✔
186
            }
187
            if flag.contains(DescFlag::NEXT) {
43✔
188
                id = desc.next;
7✔
189
            } else {
190
                break;
28✔
191
            }
192
        }
193
        Ok((readable, writeable))
1✔
194
    }
195

196
    fn get_next_desc(&self) -> Result<Option<Descriptor<'m>>> {
42✔
197
        if self.used_index == self.avail_index() {
81✔
198
            return Ok(None);
13✔
199
        }
200
        let desc_id = self.read_avail(self.used_index);
2✔
201
        let (readable, writable) = self.get_desc_iov(desc_id)?;
29✔
202
        let readable = self.ram.translate_iov(&readable)?;
30✔
203
        let writable = self.ram.translate_iov_mut(&writable)?;
30✔
NEW
204
        Ok(Some(Descriptor {
2✔
205
            id: desc_id,
×
206
            readable,
2✔
207
            writable,
×
208
        }))
209
    }
210
}
211

212
impl<'q, 'm> SplitQueue<'q, 'm> {
213
    pub fn new(reg: &'q Queue, ram: &'m Ram, feature: u128) -> Result<Option<SplitQueue<'q, 'm>>> {
12✔
214
        if !reg.enabled.load(Ordering::Acquire) {
22✔
215
            return Ok(None);
3✔
216
        }
217
        let size = reg.size.load(Ordering::Acquire) as u64;
26✔
218
        let mut avail_event = None;
18✔
219
        let mut used_event = None;
18✔
220
        let feature = VirtioFeature::from_bits_retain(feature);
26✔
221
        let used = reg.device.load(Ordering::Acquire);
34✔
222
        let avail = reg.driver.load(Ordering::Acquire);
34✔
223
        if feature.contains(VirtioFeature::EVENT_IDX) {
20✔
224
            let avail_event_gpa =
3✔
225
                used + size_of::<UsedHeader>() as u64 + size * size_of::<UsedElem>() as u64;
4✔
226
            avail_event = Some(ram.get_ptr(avail_event_gpa)?);
7✔
227
            let used_event_gpa =
3✔
UNCOV
228
                avail + size_of::<AvailHeader>() as u64 + size * size_of::<u16>() as u64;
×
UNCOV
229
            used_event = Some(ram.get_ptr(used_event_gpa)?);
1✔
230
        }
231
        let used_hdr = ram.get_ptr::<UsedHeader>(used)?;
18✔
232
        let used_index = unsafe { &*used_hdr }.idx;
1✔
233
        let avail_ring_gpa = avail + size_of::<AvailHeader>() as u64;
2✔
UNCOV
234
        let used_ring_gpa = used + size_of::<UsedHeader>() as u64;
2✔
UNCOV
235
        let desc = reg.desc.load(Ordering::Acquire);
1✔
236
        Ok(Some(SplitQueue {
2✔
237
            reg,
×
238
            ram,
×
239
            size: size as u16,
×
240
            avail_hdr: ram.get_ptr(avail)?,
1✔
241
            avail_ring: ram.get_ptr(avail_ring_gpa)?,
9✔
242
            used_event,
10✔
243
            used_hdr,
×
244
            used_ring: ram.get_ptr(used_ring_gpa)?,
1✔
245
            avail_event,
10✔
UNCOV
246
            used_index,
×
247
            desc: ram.get_ptr(desc)?,
1✔
248
        }))
249
    }
250
}
251

252
impl<'m> VirtQueue<'m> for SplitQueue<'_, 'm> {
253
    fn reg(&self) -> &Queue {
3✔
254
        self.reg
3✔
255
    }
256

257
    fn size(&self) -> u16 {
3✔
258
        self.size
3✔
259
    }
260

261
    fn next_desc(&self) -> Option<Result<Descriptor<'m>>> {
41✔
262
        self.get_next_desc().transpose()
121✔
263
    }
264

265
    fn has_next_desc(&self) -> bool {
49✔
266
        self.used_index != self.avail_index()
97✔
267
    }
268

UNCOV
269
    fn avail_index(&self) -> u16 {
×
UNCOV
270
        self.avail_index()
×
271
    }
272

273
    fn get_descriptor(&self, index: u16) -> Result<Descriptor<'m>> {
×
274
        let desc_id = self.read_avail(index);
×
UNCOV
275
        let (readable, writable) = self.get_desc_iov(desc_id)?;
×
UNCOV
276
        let readable = self.ram.translate_iov(&readable)?;
×
NEW
277
        let writable = self.ram.translate_iov_mut(&writable)?;
×
278
        Ok(Descriptor {
×
279
            id: desc_id,
×
280
            readable,
×
281
            writable,
×
282
        })
283
    }
284

285
    fn push_used(&mut self, desc: Descriptor, len: usize) -> u16 {
17✔
286
        let used_index = self.used_index;
33✔
287
        let used_elem = UsedElem {
288
            id: desc.id as u32,
17✔
289
            len: len as u32,
17✔
290
        };
291
        let wrapped_index = used_index & (self.size - 1);
33✔
292
        unsafe { *self.used_ring.offset(wrapped_index as isize) = used_elem };
50✔
293
        fence(Ordering::SeqCst);
33✔
294
        self.used_index = used_index.wrapping_add(1);
17✔
295
        self.set_used_index();
33✔
296
        used_index
16✔
297
    }
298

299
    fn enable_notification(&self, enabled: bool) {
41✔
300
        if self.avail_event.is_some() {
81✔
UNCOV
301
            let mut avail_index = self.avail_index();
×
UNCOV
302
            if enabled {
×
UNCOV
303
                loop {
×
UNCOV
304
                    self.set_avail_event(avail_index);
×
305
                    fence(Ordering::SeqCst);
×
306
                    let new_avail_index = self.avail_index();
×
307
                    if new_avail_index == avail_index {
×
308
                        break;
×
309
                    } else {
310
                        avail_index = new_avail_index;
×
311
                    }
312
                }
313
            } else {
314
                self.set_avail_event(avail_index.wrapping_sub(1));
×
315
            }
316
        } else {
317
            self.set_flag_notification(enabled);
41✔
318
        }
319
    }
320

321
    fn interrupt_enabled(&self) -> bool {
13✔
322
        match self.used_event() {
13✔
UNCOV
323
            Some(used_event) => used_event == self.used_index.wrapping_sub(1),
×
324
            None => self.flag_interrupt_enabled(),
13✔
325
        }
326
    }
327
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc