• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 17185259678

24 Aug 2025 06:24AM UTC coverage: 13.868% (-0.02%) from 13.887%
17185259678

Pull #277

github

web-flow
Merge 2d53e34b0 into 861f19073
Pull Request #277: feat: Unix domain socket based vsock device

62 of 101 new or added lines in 9 files covered. (61.39%)

72 existing lines in 5 files now uncovered.

972 of 7009 relevant lines covered (13.87%)

17.77 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

88.78
/alioth/src/virtio/queue/packed.rs
1
// Copyright 2025 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
#[cfg(test)]
16
#[path = "packed_test.rs"]
17
mod tests;
18

19
use std::marker::PhantomData;
20
use std::sync::atomic::Ordering;
21

22
use bitfield::bitfield;
23
use zerocopy::{FromBytes, Immutable, IntoBytes};
24

25
use crate::c_enum;
26
use crate::mem::mapped::Ram;
27
use crate::virtio::Result;
28
use crate::virtio::queue::{DescChain, DescFlag, QueueReg, VirtQueue};
29

30
#[repr(C, align(16))]
31
#[derive(Debug, Clone, Default, FromBytes, Immutable, IntoBytes)]
32
struct Desc {
33
    pub addr: u64,
34
    pub len: u32,
35
    pub id: u16,
36
    pub flag: u16,
37
}
38

39
bitfield! {
40
    #[derive(Copy, Clone, Default, PartialEq, Eq, Hash)]
2✔
41
    pub struct WrappedIndex(u16);
42
    impl Debug;
43
    pub u16, offset, set_offset : 14, 0;
44
    pub wrap_counter, set_warp_counter: 15;
45
}
46

47
impl WrappedIndex {
48
    const INIT: WrappedIndex = WrappedIndex(1 << 15);
49

50
    fn wrapping_add(&self, delta: u16, size: u16) -> WrappedIndex {
19✔
51
        let mut offset = self.offset() + delta;
55✔
52
        let mut wrap_counter = self.wrap_counter();
55✔
53
        if offset >= size {
24✔
54
            offset -= size;
5✔
55
            wrap_counter = !wrap_counter;
5✔
56
        }
57
        let mut r = WrappedIndex(offset);
37✔
58
        r.set_warp_counter(wrap_counter);
55✔
59
        r
19✔
60
    }
61

62
    fn wrapping_sub(&self, delta: u16, size: u16) -> WrappedIndex {
25✔
63
        let mut offset = self.offset();
73✔
64
        let mut wrap_counter = self.wrap_counter();
73✔
65
        if offset >= delta {
39✔
66
            offset -= delta;
14✔
67
        } else {
68
            offset += size - delta;
14✔
69
            wrap_counter = !wrap_counter;
13✔
70
        }
71
        let mut r = WrappedIndex(offset);
49✔
72
        r.set_warp_counter(wrap_counter);
73✔
73
        r
25✔
74
    }
75
}
76

77
c_enum! {
78
    struct EventFlag(u16);
79
    {
80
        ENABLE = 0;
81
        DISABLE = 1;
82
        DESC = 2;
83
    }
84
}
85

86
struct DescEvent {
87
    index: WrappedIndex,
88
    flag: EventFlag,
89
}
90

91
#[derive(Debug)]
92
pub struct PackedQueue<'m> {
93
    size: u16,
94
    desc: *mut Desc,
95
    enable_event_idx: bool,
96
    notification: *mut DescEvent,
97
    interrupt: *mut DescEvent,
98
    _phantom: PhantomData<&'m ()>,
99
}
100

101
impl<'m> PackedQueue<'m> {
102
    pub fn new(reg: &QueueReg, ram: &'m Ram, event_idx: bool) -> Result<Option<PackedQueue<'m>>> {
33✔
103
        if !reg.enabled.load(Ordering::Acquire) {
65✔
104
            return Ok(None);
3✔
105
        }
106
        let size = reg.size.load(Ordering::Acquire);
121✔
107
        let desc = reg.desc.load(Ordering::Acquire);
121✔
108
        let notification: *mut DescEvent = ram.get_ptr(reg.device.load(Ordering::Acquire))?;
181✔
109
        Ok(Some(PackedQueue {
1✔
110
            size,
×
111
            desc: ram.get_ptr(desc)?,
1✔
112
            enable_event_idx: event_idx,
30✔
113
            notification,
×
114
            interrupt: ram.get_ptr(reg.driver.load(Ordering::Acquire))?,
1✔
115
            _phantom: PhantomData,
30✔
116
        }))
117
    }
118

119
    fn flag_is_avail(&self, flag: DescFlag, wrap_counter: bool) -> bool {
9✔
120
        flag.contains(DescFlag::AVAIL) == wrap_counter
17✔
121
            && flag.contains(DescFlag::USED) != wrap_counter
13✔
122
    }
123

124
    fn set_flag_used(&self, flag: &mut DescFlag, wrap_counter: bool) {
5✔
125
        if wrap_counter {
9✔
126
            flag.insert(DescFlag::USED | DescFlag::AVAIL);
9✔
127
        } else {
128
            flag.remove(DescFlag::USED | DescFlag::AVAIL);
×
129
        }
130
    }
131
}
132

133
impl<'m> VirtQueue<'m> for PackedQueue<'m> {
134
    type Index = WrappedIndex;
135

136
    const INIT_INDEX: WrappedIndex = WrappedIndex::INIT;
137

138
    fn desc_avail(&self, index: WrappedIndex) -> bool {
9✔
139
        self.flag_is_avail(
17✔
140
            DescFlag::from_bits_retain(unsafe { &*self.desc.offset(index.offset() as isize) }.flag),
34✔
141
            index.wrap_counter(),
17✔
142
        )
143
    }
144

145
    fn get_avail(&self, index: Self::Index, ram: &'m Ram) -> Result<Option<DescChain<'m>>> {
7✔
146
        if !self.desc_avail(index) {
13✔
147
            return Ok(None);
3✔
148
        }
149
        let mut readable = Vec::new();
9✔
150
        let mut writeable = Vec::new();
9✔
151
        let mut delta = 0;
9✔
152
        let mut offset = index.offset();
14✔
153
        let id = loop {
5✔
154
            let desc = unsafe { &*self.desc.offset(offset as isize) };
20✔
155
            let flag = DescFlag::from_bits_retain(desc.flag);
20✔
156
            if flag.contains(DescFlag::INDIRECT) {
13✔
157
                for i in 0..(desc.len as usize / size_of::<Desc>()) {
×
158
                    let addr = desc.addr + (i * size_of::<Desc>()) as u64;
×
NEW
159
                    let desc: Desc = ram.read_t(addr)?;
×
160
                    let flag = DescFlag::from_bits_retain(desc.flag);
×
161
                    if flag.contains(DescFlag::WRITE) {
×
162
                        writeable.push((desc.addr, desc.len as u64));
×
163
                    } else {
164
                        readable.push((desc.addr, desc.len as u64));
×
165
                    }
166
                }
167
            } else if flag.contains(DescFlag::WRITE) {
10✔
168
                writeable.push((desc.addr, desc.len as u64));
8✔
169
            } else {
170
                readable.push((desc.addr, desc.len as u64));
6✔
171
            }
172
            delta += 1;
8✔
173
            if !flag.contains(DescFlag::NEXT) {
2✔
174
                break desc.id;
1✔
175
            }
176
            offset = (offset + 1) % self.size;
2✔
177
        };
178
        Ok(Some(DescChain {
1✔
179
            id,
×
NEW
180
            delta,
1✔
NEW
181
            readable: ram.translate_iov(&readable)?,
2✔
182
            writable: ram.translate_iov_mut(&writeable)?,
6✔
183
        }))
184
    }
185

186
    fn set_used(&self, index: Self::Index, id: u16, len: u32) {
5✔
187
        let first = unsafe { &mut *self.desc.offset(index.offset() as isize) };
18✔
188
        first.id = id;
5✔
189
        first.len = len;
5✔
190
        let mut flag = DescFlag::from_bits_retain(first.flag);
13✔
191
        self.set_flag_used(&mut flag, index.wrap_counter());
21✔
192
        first.flag = flag.bits();
5✔
193
    }
194

195
    fn enable_notification(&self, enabled: bool) {
5✔
196
        unsafe {
197
            (&mut *self.notification).flag = if enabled {
11✔
198
                EventFlag::ENABLE
3✔
199
            } else {
200
                EventFlag::DISABLE
3✔
201
            };
202
        }
203
    }
204

205
    fn interrupt_enabled(&self, index: Self::Index, delta: u16) -> bool {
27✔
206
        let interrupt = unsafe { &*self.interrupt };
53✔
207
        if self.enable_event_idx && interrupt.flag == EventFlag::DESC {
48✔
208
            let prev_used_index = index.wrapping_sub(delta, self.size);
81✔
209
            let base = prev_used_index.offset();
49✔
210
            let end = base + delta;
34✔
211
            let mut offset = interrupt.index.offset();
49✔
212
            if interrupt.index.wrap_counter() != prev_used_index.wrap_counter() {
56✔
213
                offset += self.size;
7✔
214
            }
215
            base <= offset && offset < end
30✔
216
        } else {
217
            interrupt.flag == EventFlag::ENABLE
11✔
218
        }
219
    }
220

221
    fn index_add(&self, index: Self::Index, delta: u16) -> Self::Index {
5✔
222
        index.wrapping_add(delta, self.size)
17✔
223
    }
224
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc