• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 17186132660

24 Aug 2025 07:45AM UTC coverage: 17.88% (+4.0%) from 13.887%
17186132660

push

github

Lencerf
test: replace tempdir with tempfile

Signed-off-by: Changyuan Lyu <changyuanl@google.com>

1336 of 7472 relevant lines covered (17.88%)

19.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

74.56
/alioth/src/virtio/queue/split.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
#[path = "split_test.rs"]
16
#[cfg(test)]
17
mod tests;
18

19
use std::marker::PhantomData;
20
use std::mem::size_of;
21
use std::sync::atomic::{Ordering, fence};
22

23
use alioth_macros::Layout;
24
use bitflags::bitflags;
25
use zerocopy::{FromBytes, Immutable, IntoBytes};
26

27
use crate::mem::mapped::Ram;
28
use crate::virtio::queue::{DescChain, DescFlag, QueueReg, VirtQueue};
29
use crate::virtio::{Result, error};
30

31
#[repr(C, align(16))]
32
#[derive(Debug, Clone, Default, FromBytes, Immutable, IntoBytes)]
33
pub struct Desc {
34
    pub addr: u64,
35
    pub len: u32,
36
    pub flag: u16,
37
    pub next: u16,
38
}
39

40
bitflags! {
41
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
42
    pub struct AvailFlag: u16 {
43
        const NO_INTERRUPT = 1;
44
    }
45
}
46

47
#[repr(C, align(2))]
48
#[derive(Debug, Clone, Layout, Immutable, FromBytes, IntoBytes)]
49
pub struct AvailHeader {
50
    flags: u16,
51
    idx: u16,
52
}
53

54
bitflags! {
55
    #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
56
    pub struct UsedFlag: u16 {
57
        const NO_NOTIFY = 1;
58
    }
59
}
60

61
#[repr(C, align(4))]
62
#[derive(Debug, Clone, Layout)]
63
pub struct UsedHeader {
64
    flags: u16,
65
    idx: u16,
66
}
67

68
#[repr(C)]
69
#[derive(Debug, Clone, Default)]
70
pub struct UsedElem {
71
    id: u32,
72
    len: u32,
73
}
74

75
#[derive(Debug)]
76
pub struct SplitQueue<'m> {
77
    size: u16,
78
    avail_hdr: *mut AvailHeader,
79
    avail_ring: *mut u16,
80
    used_event: Option<*mut u16>,
81
    used_hdr: *mut UsedHeader,
82
    used_ring: *mut UsedElem,
83
    avail_event: Option<*mut u16>,
84
    desc: *mut Desc,
85
    _phantom: PhantomData<&'m ()>,
86
}
87

88
impl SplitQueue<'_> {
89
    pub fn avail_index(&self) -> u16 {
242✔
90
        unsafe { &*self.avail_hdr }.idx
242✔
91
    }
92

93
    pub fn set_used_index(&self, val: u16) {
43✔
94
        unsafe { &mut *self.used_hdr }.idx = val;
43✔
95
    }
96

97
    pub fn used_event(&self) -> Option<u16> {
41✔
98
        self.used_event.map(|event| unsafe { *event })
87✔
99
    }
100

101
    pub fn set_avail_event(&self, op: impl FnOnce(&mut u16)) -> bool {
92✔
102
        match self.avail_event {
92✔
103
            Some(avail_event) => {
3✔
104
                op(unsafe { &mut *avail_event });
4✔
105
                true
3✔
106
            }
107
            None => false,
89✔
108
        }
109
    }
110

111
    pub fn set_flag_notification(&self, enabled: bool) {
89✔
112
        unsafe { &mut *self.used_hdr }.flags = (!enabled) as _;
177✔
113
    }
114

115
    pub fn flag_interrupt_enabled(&self) -> bool {
39✔
116
        unsafe { &*self.avail_hdr }.flags == 0
39✔
117
    }
118

119
    fn get_desc(&self, id: u16) -> Result<&Desc> {
75✔
120
        if id < self.size {
76✔
121
            Ok(unsafe { &*self.desc.offset(id as isize) })
151✔
122
        } else {
123
            error::InvalidDescriptor { id }.fail()
124
        }
125
    }
126
}
127

128
impl<'m> SplitQueue<'m> {
129
    pub fn new(reg: &QueueReg, ram: &'m Ram, event_idx: bool) -> Result<Option<SplitQueue<'m>>> {
35✔
130
        if !reg.enabled.load(Ordering::Acquire) {
70✔
131
            return Ok(None);
3✔
132
        }
133
        let size = reg.size.load(Ordering::Acquire) as u64;
98✔
134
        let mut avail_event = None;
66✔
135
        let mut used_event = None;
66✔
136
        let used = reg.device.load(Ordering::Acquire);
130✔
137
        let avail = reg.driver.load(Ordering::Acquire);
130✔
138
        if event_idx {
35✔
139
            let avail_event_gpa =
3✔
140
                used + size_of::<UsedHeader>() as u64 + size * size_of::<UsedElem>() as u64;
4✔
141
            avail_event = Some(ram.get_ptr(avail_event_gpa)?);
7✔
142
            let used_event_gpa =
3✔
143
                avail + size_of::<AvailHeader>() as u64 + size * size_of::<u16>() as u64;
×
144
            used_event = Some(ram.get_ptr(used_event_gpa)?);
1✔
145
        }
146
        let used_hdr = ram.get_ptr::<UsedHeader>(used)?;
66✔
147
        let avail_ring_gpa = avail + size_of::<AvailHeader>() as u64;
2✔
148
        let used_ring_gpa = used + size_of::<UsedHeader>() as u64;
4✔
149
        let desc = reg.desc.load(Ordering::Acquire);
2✔
150
        Ok(Some(SplitQueue {
2✔
151
            size: size as u16,
×
152
            avail_hdr: ram.get_ptr(avail)?,
2✔
153
            avail_ring: ram.get_ptr(avail_ring_gpa)?,
34✔
154
            used_event,
34✔
155
            used_hdr,
×
156
            used_ring: ram.get_ptr(used_ring_gpa)?,
2✔
157
            avail_event,
34✔
158
            desc: ram.get_ptr(desc)?,
2✔
159
            _phantom: PhantomData,
32✔
160
        }))
161
    }
162
}
163

164
impl<'m> VirtQueue<'m> for SplitQueue<'m> {
165
    type Index = u16;
166

167
    const INIT_INDEX: u16 = 0;
168

169
    fn desc_avail(&self, index: u16) -> bool {
193✔
170
        let avail_index = self.avail_index();
577✔
171
        index < avail_index || index - avail_index >= !(self.size - 1)
281✔
172
    }
173

174
    fn get_avail(&self, index: Self::Index, ram: &'m Ram) -> Result<Option<DescChain<'m>>> {
91✔
175
        if !self.desc_avail(index) {
181✔
176
            return Ok(None);
37✔
177
        }
178
        let mut readable = Vec::new();
109✔
179
        let mut writable = Vec::new();
109✔
180
        let wrapped_index = index & (self.size - 1);
109✔
181
        let head_id = unsafe { *self.avail_ring.offset(wrapped_index as isize) };
164✔
182
        let mut id = head_id;
109✔
183
        loop {
2✔
184
            let desc = self.get_desc(id)?;
297✔
185
            let flag = DescFlag::from_bits_retain(desc.flag);
1✔
186
            if flag.contains(DescFlag::INDIRECT) {
1✔
187
                let mut id = 0;
×
188
                loop {
×
189
                    let addr = desc.addr + id as u64 * size_of::<Desc>() as u64;
×
190
                    let desc: Desc = ram.read_t(addr)?;
×
191
                    let flag = DescFlag::from_bits_retain(desc.flag);
×
192
                    assert!(!flag.contains(DescFlag::INDIRECT));
×
193
                    if flag.contains(DescFlag::WRITE) {
×
194
                        writable.push((desc.addr, desc.len as u64));
×
195
                    } else {
196
                        readable.push((desc.addr, desc.len as u64));
×
197
                    }
198
                    if flag.contains(DescFlag::NEXT) {
×
199
                        id = desc.next;
×
200
                    } else {
201
                        break;
×
202
                    }
203
                }
204
            } else if flag.contains(DescFlag::WRITE) {
102✔
205
                writable.push((desc.addr, desc.len as u64));
80✔
206
            } else {
207
                readable.push((desc.addr, desc.len as u64));
50✔
208
            }
209
            if flag.contains(DescFlag::NEXT) {
96✔
210
                id = desc.next;
21✔
211
            } else {
212
                break;
54✔
213
            }
214
        }
215
        let readable = ram.translate_iov(&readable)?;
56✔
216
        let writable = ram.translate_iov_mut(&writable)?;
56✔
217
        Ok(Some(DescChain {
1✔
218
            id: head_id,
×
219
            delta: 1,
×
220
            readable,
1✔
221
            writable,
×
222
        }))
223
    }
224

225
    fn set_used(&self, index: Self::Index, id: u16, len: u32) {
43✔
226
        let used_elem = UsedElem { id: id as u32, len };
85✔
227
        log::info!("used_elem: {used_elem:x?}");
44✔
228
        let wrapped_index = index & (self.size - 1);
86✔
229
        unsafe { *self.used_ring.offset(wrapped_index as isize) = used_elem };
129✔
230
        fence(Ordering::SeqCst);
85✔
231
        self.set_used_index(index.wrapping_add(1));
169✔
232
    }
233

234
    fn enable_notification(&self, enabled: bool) {
89✔
235
        if !self.set_avail_event(|event| {
177✔
236
            let mut avail_index = self.avail_index();
×
237
            if enabled {
×
238
                loop {
×
239
                    *event = avail_index;
×
240
                    fence(Ordering::SeqCst);
×
241
                    let new_avail_index = self.avail_index();
×
242
                    if new_avail_index == avail_index {
×
243
                        break;
×
244
                    } else {
245
                        avail_index = new_avail_index;
×
246
                    }
247
                }
248
            } else {
249
                *event = avail_index.wrapping_sub(1);
×
250
            }
251
        }) {
252
            self.set_flag_notification(enabled);
89✔
253
        }
254
    }
255

256
    fn interrupt_enabled(&self, index: Self::Index, _: u16) -> bool {
39✔
257
        match self.used_event() {
39✔
258
            Some(used_event) => used_event == index.wrapping_sub(1),
×
259
            None => self.flag_interrupt_enabled(),
39✔
260
        }
261
    }
262

263
    fn index_add(&self, index: Self::Index, _: u16) -> Self::Index {
149✔
264
        index.wrapping_add(1)
297✔
265
    }
266
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc