• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 16866018918

10 Aug 2025 08:28PM UTC coverage: 7.185% (+0.04%) from 7.146%
16866018918

Pull #267

github

web-flow
Merge 58df90b46 into d49daec05
Pull Request #267: Add unit tests for mod virtio

19 of 83 new or added lines in 11 files covered. (22.89%)

43 existing lines in 1 file now uncovered.

486 of 6764 relevant lines covered (7.19%)

14.89 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/alioth/src/virtio/queue/split.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
use std::mem::size_of;
16
use std::sync::atomic::{Ordering, fence};
17

18
use alioth_macros::Layout;
19
use bitflags::bitflags;
20
use zerocopy::{FromBytes, Immutable, IntoBytes};
21

22
use crate::mem::mapped::Ram;
23
use crate::virtio::queue::{Descriptor, Queue, VirtQueue};
24
use crate::virtio::{Result, VirtioFeature, error};
25

26
#[repr(C, align(16))]
27
#[derive(Debug, Clone, Default, FromBytes, Immutable, IntoBytes)]
28
pub struct Desc {
29
    pub addr: u64,
30
    pub len: u32,
31
    pub flag: u16,
32
    pub next: u16,
33
}
34

35
bitflags! {
36
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
37
    pub struct DescFlag: u16 {
38
        const NEXT = 1;
39
        const WRITE = 2;
40
        const INDIRECT = 4;
41
    }
42
}
43

44
bitflags! {
45
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
46
    pub struct AvailFlag: u16 {
47
        const NO_INTERRUPT = 1;
48
    }
49
}
50

51
#[repr(C, align(2))]
52
#[derive(Debug, Clone, Layout)]
53
pub struct AvailHeader {
54
    flags: u16,
55
    idx: u16,
56
}
57

58
bitflags! {
59
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
60
    pub struct UsedFlag: u16 {
61
        const NO_NOTIFY = 1;
62
    }
63
}
64

65
#[repr(C, align(4))]
66
#[derive(Debug, Clone, Layout)]
67
pub struct UsedHeader {
68
    flags: u16,
69
    idx: u16,
70
}
71

72
#[repr(C)]
73
#[derive(Debug, Clone, Default)]
74
pub struct UsedElem {
75
    id: u32,
76
    len: u32,
77
}
78

79
#[derive(Debug)]
80
pub struct SplitQueue<'q, 'm> {
81
    reg: &'q Queue,
82

83
    ram: &'m Ram,
84

85
    size: u16,
86

87
    avail_hdr: *mut AvailHeader,
88
    avail_ring: *mut u16,
89
    used_event: Option<*mut u16>,
90

91
    used_hdr: *mut UsedHeader,
92
    used_ring: *mut UsedElem,
93
    avail_event: Option<*mut u16>,
94
    used_index: u16,
95

96
    desc: *mut Desc,
97
}
98

99
type DescIov = (Vec<(u64, u64)>, Vec<(u64, u64)>);
100

101
impl<'m> SplitQueue<'_, 'm> {
UNCOV
102
    pub fn avail_index(&self) -> u16 {
×
UNCOV
103
        unsafe { &*self.avail_hdr }.idx
×
104
    }
105

106
    pub fn set_used_index(&self) {
×
107
        unsafe { &mut *self.used_hdr }.idx = self.used_index
×
108
    }
109

110
    pub fn used_event(&self) -> Option<u16> {
×
111
        self.used_event.map(|event| unsafe { *event })
×
112
    }
113

114
    pub fn set_avail_event(&self, index: u16) -> Option<()> {
×
115
        match self.avail_event {
×
UNCOV
116
            Some(avail_event) => {
×
UNCOV
117
                unsafe { *avail_event = index };
×
118
                Some(())
×
119
            }
120
            None => None,
×
121
        }
122
    }
123

124
    pub fn set_flag_notification(&self, enabled: bool) {
×
UNCOV
125
        unsafe { &mut *self.used_hdr }.flags = (!enabled) as _;
×
126
    }
127

128
    pub fn flag_interrupt_enabled(&self) -> bool {
×
129
        unsafe { &*self.avail_hdr }.flags == 0
×
130
    }
131

132
    pub fn read_avail(&self, index: u16) -> u16 {
×
133
        let wrapped_index = index & (self.size - 1);
×
UNCOV
134
        unsafe { *self.avail_ring.offset(wrapped_index as isize) }
×
135
    }
136

137
    pub fn get_desc(&self, id: u16) -> Result<&Desc> {
×
138
        if id < self.size {
×
UNCOV
139
            Ok(unsafe { &*self.desc.offset(id as isize) })
×
140
        } else {
141
            error::InvalidDescriptor { id }.fail()
142
        }
143
    }
144

UNCOV
145
    fn get_indirect(
×
146
        &self,
147
        addr: u64,
148
        readable: &mut Vec<(u64, u64)>,
149
        writeable: &mut Vec<(u64, u64)>,
150
    ) -> Result<()> {
UNCOV
151
        let mut id = 0;
×
UNCOV
152
        loop {
×
UNCOV
153
            let desc: Desc = self.ram.read_t(addr + id * size_of::<Desc>() as u64)?;
×
UNCOV
154
            let flag = DescFlag::from_bits_retain(desc.flag);
×
155
            assert!(!flag.contains(DescFlag::INDIRECT));
×
156
            if flag.contains(DescFlag::WRITE) {
×
NEW
157
                writeable.push((desc.addr, desc.len as u64));
×
158
            } else {
159
                readable.push((desc.addr, desc.len as u64));
×
160
            }
161
            if flag.contains(DescFlag::NEXT) {
×
UNCOV
162
                id = desc.next as u64;
×
163
            } else {
UNCOV
164
                return Ok(());
×
165
            }
166
        }
167
    }
168

UNCOV
169
    pub fn get_desc_iov(&self, mut id: u16) -> Result<DescIov> {
×
UNCOV
170
        let mut readable = Vec::new();
×
UNCOV
171
        let mut writeable = Vec::new();
×
UNCOV
172
        loop {
×
173
            let desc = self.get_desc(id)?;
×
174
            let flag = DescFlag::from_bits_retain(desc.flag);
×
175
            if flag.contains(DescFlag::INDIRECT) {
×
176
                assert_eq!(desc.len & 0xf, 0);
×
177
                self.get_indirect(desc.addr, &mut readable, &mut writeable)?;
×
178
            } else if flag.contains(DescFlag::WRITE) {
×
179
                writeable.push((desc.addr, desc.len as u64));
×
180
            } else {
181
                readable.push((desc.addr, desc.len as u64));
×
182
            }
183
            if flag.contains(DescFlag::NEXT) {
×
UNCOV
184
                id = desc.next;
×
185
            } else {
UNCOV
186
                break;
×
187
            }
188
        }
UNCOV
189
        Ok((readable, writeable))
×
190
    }
191

UNCOV
192
    fn get_next_desc(&self) -> Result<Option<Descriptor<'m>>> {
×
193
        if self.used_index == self.avail_index() {
×
UNCOV
194
            return Ok(None);
×
195
        }
196
        let desc_id = self.read_avail(self.used_index);
×
197
        let (readable, writable) = self.get_desc_iov(desc_id)?;
×
198
        let readable = self.ram.translate_iov(&readable)?;
×
UNCOV
199
        let writable = self.ram.translate_iov_mut(&writable)?;
×
200
        Ok(Some(Descriptor {
×
201
            id: desc_id,
×
202
            readable,
×
203
            writable,
×
204
        }))
205
    }
206
}
207

208
impl<'q, 'm> SplitQueue<'q, 'm> {
UNCOV
209
    pub fn new(reg: &'q Queue, ram: &'m Ram, feature: u64) -> Result<Option<SplitQueue<'q, 'm>>> {
×
UNCOV
210
        if !reg.enabled.load(Ordering::Acquire) {
×
UNCOV
211
            return Ok(None);
×
212
        }
213
        let size = reg.size.load(Ordering::Acquire) as u64;
×
214
        let mut avail_event = None;
×
215
        let mut used_event = None;
×
UNCOV
216
        let feature = VirtioFeature::from_bits_retain(feature);
×
217
        let used = reg.device.load(Ordering::Acquire);
×
218
        let avail = reg.driver.load(Ordering::Acquire);
×
219
        if feature.contains(VirtioFeature::EVENT_IDX) {
×
220
            let avail_event_gpa =
×
221
                used + size_of::<UsedHeader>() as u64 + size * size_of::<UsedElem>() as u64;
×
222
            avail_event = Some(ram.get_ptr(avail_event_gpa)?);
×
223
            let used_event_gpa =
×
224
                avail + size_of::<AvailHeader>() as u64 + size * size_of::<u16>() as u64;
×
225
            used_event = Some(ram.get_ptr(used_event_gpa)?);
×
226
        }
227
        let used_hdr = ram.get_ptr::<UsedHeader>(used)?;
×
228
        let used_index = unsafe { &*used_hdr }.idx;
×
229
        let avail_ring_gpa = avail + size_of::<AvailHeader>() as u64;
×
UNCOV
230
        let used_ring_gpa = used + size_of::<UsedHeader>() as u64;
×
231
        let desc = reg.desc.load(Ordering::Acquire);
×
232
        Ok(Some(SplitQueue {
×
233
            reg,
×
234
            ram,
×
235
            size: size as u16,
×
236
            avail_hdr: ram.get_ptr(avail)?,
×
237
            avail_ring: ram.get_ptr(avail_ring_gpa)?,
×
238
            used_event,
×
239
            used_hdr,
×
240
            used_ring: ram.get_ptr(used_ring_gpa)?,
×
241
            avail_event,
×
242
            used_index,
×
243
            desc: ram.get_ptr(desc)?,
×
244
        }))
245
    }
246
}
247

248
impl<'m> VirtQueue<'m> for SplitQueue<'_, 'm> {
UNCOV
249
    fn reg(&self) -> &Queue {
×
UNCOV
250
        self.reg
×
251
    }
252

253
    fn size(&self) -> u16 {
×
254
        self.size
×
255
    }
256

257
    fn next_desc(&self) -> Option<Result<Descriptor<'m>>> {
×
258
        self.get_next_desc().transpose()
×
259
    }
260

261
    fn has_next_desc(&self) -> bool {
×
262
        self.used_index != self.avail_index()
×
263
    }
264

265
    fn avail_index(&self) -> u16 {
×
266
        self.avail_index()
×
267
    }
268

269
    fn get_descriptor(&self, index: u16) -> Result<Descriptor<'m>> {
×
270
        let desc_id = self.read_avail(index);
×
UNCOV
271
        let (readable, writable) = self.get_desc_iov(desc_id)?;
×
UNCOV
272
        let readable = self.ram.translate_iov(&readable)?;
×
273
        let writable = self.ram.translate_iov_mut(&writable)?;
×
274
        Ok(Descriptor {
×
275
            id: desc_id,
×
276
            readable,
×
277
            writable,
×
278
        })
279
    }
280

281
    fn push_used(&mut self, desc: Descriptor, len: usize) -> u16 {
×
UNCOV
282
        let used_index = self.used_index;
×
283
        let used_elem = UsedElem {
UNCOV
284
            id: desc.id as u32,
×
285
            len: len as u32,
×
286
        };
UNCOV
287
        let wrapped_index = used_index & (self.size - 1);
×
288
        unsafe { *self.used_ring.offset(wrapped_index as isize) = used_elem };
×
289
        fence(Ordering::SeqCst);
×
UNCOV
290
        self.used_index = used_index.wrapping_add(1);
×
291
        self.set_used_index();
×
292
        used_index
×
293
    }
294

295
    fn enable_notification(&self, enabled: bool) {
×
296
        if self.avail_event.is_some() {
×
UNCOV
297
            let mut avail_index = self.avail_index();
×
UNCOV
298
            if enabled {
×
299
                loop {
×
300
                    self.set_avail_event(avail_index);
×
301
                    fence(Ordering::SeqCst);
×
302
                    let new_avail_index = self.avail_index();
×
303
                    if new_avail_index == avail_index {
×
304
                        break;
×
305
                    } else {
306
                        avail_index = new_avail_index;
×
307
                    }
308
                }
309
            } else {
310
                self.set_avail_event(avail_index.wrapping_sub(1));
×
311
            }
312
        } else {
UNCOV
313
            self.set_flag_notification(enabled);
×
314
        }
315
    }
316

317
    fn interrupt_enabled(&self) -> bool {
×
UNCOV
318
        match self.used_event() {
×
UNCOV
319
            Some(used_event) => used_event == self.used_index.wrapping_sub(1),
×
UNCOV
320
            None => self.flag_interrupt_enabled(),
×
321
        }
322
    }
323
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc