• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 18638570188

20 Oct 2025 12:38AM UTC coverage: 20.202% (-0.01%) from 20.213%
18638570188

Pull #308

github

web-flow
Merge 73a1640e9 into 416357998
Pull Request #308: Add tests for PciSegment

0 of 59 new or added lines in 5 files covered. (0.0%)

1163 existing lines in 25 files now uncovered.

1578 of 7811 relevant lines covered (20.2%)

19.85 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/alioth/src/virtio/dev/fs/vu.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
use std::fs::File;
16
use std::io::ErrorKind;
17
use std::iter::zip;
18
use std::mem::size_of_val;
19
use std::os::fd::{AsFd, AsRawFd};
20
use std::path::PathBuf;
21
use std::sync::Arc;
22
use std::sync::mpsc::Receiver;
23
use std::thread::JoinHandle;
24

25
use libc::{MAP_ANONYMOUS, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, MAP_SHARED, PROT_NONE, mmap};
26
use mio::event::Event;
27
use mio::unix::SourceFd;
28
use mio::{Interest, Registry, Token};
29
use serde::Deserialize;
30
use serde_aco::Help;
31
use zerocopy::{FromZeros, IntoBytes};
32

33
use crate::errors::BoxTrace;
34
use crate::fuse::bindings::FuseSetupmappingFlag;
35
use crate::fuse::{self, DaxRegion};
36
use crate::hv::IoeventFd;
37
use crate::mem::mapped::{ArcMemPages, RamBus};
38
use crate::mem::{LayoutChanged, MemRegion, MemRegionType};
39
use crate::sync::notifier::Notifier;
40
use crate::virtio::dev::fs::{FsConfig, FsFeature};
41
use crate::virtio::dev::{DevParam, Virtio, WakeEvent};
42
use crate::virtio::queue::{QueueReg, VirtQueue};
43
use crate::virtio::vu::bindings::{DeviceConfig, FsMap, VuBackMsg, VuFeature};
44
use crate::virtio::vu::conn::VuChannel;
45
use crate::virtio::vu::frontend::VuFrontend;
46
use crate::virtio::vu::{Error, error as vu_error};
47
use crate::virtio::worker::mio::{ActiveMio, Mio, VirtioMio};
48
use crate::virtio::{DeviceId, IrqSender, Result};
49
use crate::{align_up, ffi};
50

51
#[derive(Debug)]
52
pub struct VuFs {
53
    frontend: VuFrontend,
54
    config: Arc<FsConfig>,
55
    dax_region: Option<ArcMemPages>,
56
}
57

58
impl VuFs {
59
    pub fn new(param: VuFsParam, name: impl Into<Arc<str>>) -> Result<Self> {
×
60
        let mut extra_features = VuFeature::empty();
×
61
        if param.dax_window > 0 {
×
62
            extra_features |= VuFeature::BACKEND_REQ | VuFeature::BACKEND_SEND_FD
×
63
        };
64
        if param.tag.is_none() {
×
65
            extra_features |= VuFeature::CONFIG;
×
66
        }
67
        let frontend = VuFrontend::new(name, &param.socket, DeviceId::FileSystem, extra_features)?;
×
68
        let config = if let Some(tag) = param.tag {
×
69
            assert!(tag.len() <= 36);
×
70
            assert_ne!(tag.len(), 0);
×
71
            let mut config = FsConfig::new_zeroed();
×
72
            config.tag[0..tag.len()].copy_from_slice(tag.as_bytes());
×
73
            config.num_request_queues = frontend.num_queues() as u32 - 1;
×
74
            if FsFeature::from_bits_retain(frontend.feature()).contains(FsFeature::NOTIFICATION) {
×
75
                config.num_request_queues -= 1;
×
76
            }
77
            config
×
78
        } else {
79
            let cfg = DeviceConfig {
80
                offset: 0,
81
                size: size_of::<FsConfig>() as u32,
×
82
                flags: 0,
83
            };
84
            let mut config = FsConfig::new_zeroed();
×
85
            frontend.session().get_config(&cfg, config.as_mut_bytes())?;
×
86
            log::info!("{}: get config: {config:?}", frontend.name());
×
87
            config
×
88
        };
89

90
        let dax_region = if param.dax_window > 0 {
×
91
            let size = align_up!(param.dax_window, 12);
×
92
            Some(ArcMemPages::from_anonymous(size, Some(PROT_NONE), None)?)
×
93
        } else {
94
            None
×
95
        };
96

97
        Ok(VuFs {
×
98
            frontend,
×
99
            config: Arc::new(config),
×
100
            dax_region,
×
101
        })
102
    }
103
}
104

105
#[derive(Debug, Clone, Deserialize, Help)]
106
pub struct VuFsParam {
107
    /// Path to the vhost-user UNIX domain socket.
108
    pub socket: PathBuf,
109
    /// Mount tag seen by the guest.
110
    pub tag: Option<String>,
111
    /// Size of memory region for DAX in bytes.
112
    /// 0 means no DAX. [default: 0]
113
    #[serde(default)]
114
    pub dax_window: usize,
115
}
116

117
impl DevParam for VuFsParam {
118
    type Device = VuFs;
119

120
    fn build(self, name: impl Into<Arc<str>>) -> Result<Self::Device> {
×
121
        VuFs::new(self, name)
×
122
    }
123

UNCOV
124
    fn needs_mem_shared_fd(&self) -> bool {
UNCOV
125
        true
126
    }
127
}
128

129
impl Virtio for VuFs {
130
    type Config = FsConfig;
131
    type Feature = FsFeature;
132

UNCOV
133
    fn id(&self) -> DeviceId {
UNCOV
134
        DeviceId::FileSystem
135
    }
136

UNCOV
137
    fn name(&self) -> &str {
UNCOV
138
        self.frontend.name()
139
    }
140

UNCOV
141
    fn config(&self) -> Arc<Self::Config> {
UNCOV
142
        self.config.clone()
143
    }
144

UNCOV
145
    fn feature(&self) -> u128 {
UNCOV
146
        self.frontend.feature()
147
    }
148

UNCOV
149
    fn num_queues(&self) -> u16 {
UNCOV
150
        self.frontend.num_queues()
151
    }
152

UNCOV
153
    fn spawn_worker<S, E>(
154
        self,
155
        event_rx: Receiver<WakeEvent<S, E>>,
156
        memory: Arc<RamBus>,
157
        queue_regs: Arc<[QueueReg]>,
158
    ) -> Result<(JoinHandle<()>, Arc<Notifier>)>
159
    where
160
        S: IrqSender,
161
        E: IoeventFd,
162
    {
163
        Mio::spawn_worker(self, event_rx, memory, queue_regs)
×
164
    }
165

UNCOV
166
    fn ioeventfd_offloaded(&self, q_index: u16) -> Result<bool> {
UNCOV
167
        self.frontend.ioeventfd_offloaded(q_index)
168
    }
169

UNCOV
170
    fn shared_mem_regions(&self) -> Option<Arc<MemRegion>> {
UNCOV
171
        let dax_region = self.dax_region.as_ref()?;
UNCOV
172
        Some(Arc::new(MemRegion::with_dev_mem(
UNCOV
173
            dax_region.clone(),
UNCOV
174
            MemRegionType::Hidden,
175
        )))
176
    }
177

UNCOV
178
    fn mem_change_callback(&self) -> Option<Box<dyn LayoutChanged>> {
UNCOV
179
        self.frontend.mem_change_callback()
180
    }
181
}
182

183
impl VirtioMio for VuFs {
UNCOV
184
    fn activate<'m, Q, S, E>(
185
        &mut self,
186
        feature: u128,
187
        active_mio: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
188
    ) -> Result<()>
189
    where
190
        Q: VirtQueue<'m>,
191
        S: IrqSender,
192
        E: IoeventFd,
193
    {
194
        self.frontend.activate(feature, active_mio)?;
×
195
        if let Some(channel) = self.frontend.channel() {
×
196
            channel.conn.set_nonblocking(true)?;
×
197
            active_mio.poll.registry().register(
×
198
                &mut SourceFd(&channel.conn.as_raw_fd()),
×
199
                Token(self.frontend.num_queues() as _),
×
200
                Interest::READABLE,
×
201
            )?;
202
        }
203
        Ok(())
×
204
    }
205

UNCOV
206
    fn handle_event<'a, 'm, Q, S, E>(
207
        &mut self,
208
        event: &Event,
209
        active_mio: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
210
    ) -> Result<()>
211
    where
212
        Q: VirtQueue<'m>,
213
        S: IrqSender,
214
        E: IoeventFd,
215
    {
216
        let q_index = event.token().0;
×
217
        if q_index < active_mio.queues.len() {
×
218
            return vu_error::QueueErr {
×
219
                index: q_index as u16,
×
220
            }
221
            .fail()?;
×
222
        }
223

224
        let Some(dax_region) = &self.dax_region else {
×
225
            return vu_error::ProtocolFeature {
×
226
                feature: VuFeature::BACKEND_REQ,
×
227
            }
228
            .fail()?;
×
229
        };
230
        let Some(channel) = self.frontend.channel() else {
×
231
            return vu_error::ProtocolFeature {
×
232
                feature: VuFeature::BACKEND_REQ,
×
233
            }
234
            .fail()?;
×
235
        };
236
        loop {
×
237
            let mut fds = [const { None }; 8];
×
238
            let msg = channel.recv_msg(&mut fds);
×
239
            let (request, size) = match msg {
×
240
                Ok(m) => (m.request, m.size),
×
241
                Err(Error::System { error, .. }) if error.kind() == ErrorKind::WouldBlock => break,
×
242
                Err(e) => return Err(e)?,
×
243
            };
244
            let fs_map: FsMap = channel.recv_payload()?;
×
245

246
            if size as usize != size_of_val(&fs_map) {
×
247
                return vu_error::PayloadSize {
×
248
                    want: size_of_val(&fs_map),
×
249
                    got: size,
×
250
                }
251
                .fail()?;
×
252
            }
253
            match VuBackMsg::from(request) {
×
254
                VuBackMsg::SHARED_OBJECT_ADD => {
×
255
                    for (index, fd) in fds.iter().enumerate() {
×
256
                        let Some(fd) = fd else {
×
257
                            break;
×
258
                        };
259
                        let raw_fd = fd.as_raw_fd();
×
260
                        let map_addr = dax_region.addr() + fs_map.cache_offset[index] as usize;
×
261
                        log::trace!(
×
262
                            "{}: mapping fd {raw_fd} to offset {:#x}",
×
263
                            self.name(),
×
264
                            fs_map.cache_offset[index]
×
265
                        );
266
                        ffi!(
×
267
                            unsafe {
×
268
                                mmap(
×
269
                                    map_addr as _,
×
270
                                    fs_map.len[index] as _,
×
271
                                    fs_map.flags[index] as _,
×
272
                                    MAP_SHARED | MAP_FIXED,
×
273
                                    raw_fd,
×
274
                                    fs_map.fd_offset[index] as _,
×
275
                                )
276
                            },
277
                            MAP_FAILED
×
278
                        )?;
279
                    }
280
                }
281
                VuBackMsg::SHARED_OBJECT_REMOVE => {
×
282
                    for (len, offset) in zip(fs_map.len, fs_map.cache_offset) {
×
283
                        if len == 0 {
×
284
                            continue;
×
285
                        }
286
                        log::trace!(
×
287
                            "{}: unmapping offset {offset:#x}, size {len:#x}",
×
288
                            self.name()
×
289
                        );
290
                        let map_addr = dax_region.addr() + offset as usize;
×
291
                        let flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED;
×
292
                        ffi!(
×
293
                            unsafe { mmap(map_addr as _, len as _, PROT_NONE, flags, -1, 0) },
×
294
                            MAP_FAILED
×
295
                        )?;
296
                    }
297
                }
298
                _ => unimplemented!("{}: unknown request {request:#x}", self.name()),
299
            }
300
            channel.reply(VuBackMsg::from(request), &0u64, &[])?;
×
301
        }
302
        Ok(())
×
303
    }
304

UNCOV
305
    fn handle_queue<'m, Q, S, E>(
306
        &mut self,
307
        index: u16,
308
        active_mio: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
309
    ) -> Result<()>
310
    where
311
        Q: VirtQueue<'m>,
312
        S: IrqSender,
313
        E: IoeventFd,
314
    {
315
        self.frontend.handle_queue(index, active_mio)
×
316
    }
317

UNCOV
318
    fn reset(&mut self, registry: &Registry) {
UNCOV
319
        self.frontend.reset(registry)
320
    }
321
}
322

323
#[derive(Debug)]
324
pub struct VuDaxRegion {
325
    pub channel: Arc<VuChannel>,
326
}
327

328
impl DaxRegion for VuDaxRegion {
UNCOV
329
    fn map(
330
        &self,
331
        m_offset: u64,
332
        fd: &File,
333
        f_offset: u64,
334
        len: u64,
335
        flag: FuseSetupmappingFlag,
336
    ) -> fuse::Result<()> {
UNCOV
337
        let mut fs_map = FsMap::new_zeroed();
UNCOV
338
        fs_map.fd_offset[0] = f_offset;
UNCOV
339
        fs_map.cache_offset[0] = m_offset;
340

UNCOV
341
        let mut prot = 0;
UNCOV
342
        if flag.contains(FuseSetupmappingFlag::READ) {
UNCOV
343
            prot |= libc::PROT_READ;
344
        };
UNCOV
345
        if flag.contains(FuseSetupmappingFlag::WRITE) {
UNCOV
346
            prot |= libc::PROT_WRITE;
347
        }
UNCOV
348
        fs_map.flags[0] = prot as _;
349

UNCOV
350
        fs_map.len[0] = len;
UNCOV
351
        let fds = [fd.as_fd()];
UNCOV
352
        self.channel
UNCOV
353
            .fs_map(&fs_map, &fds)
UNCOV
354
            .box_trace(fuse::error::DaxMapping)
355
    }
356

UNCOV
357
    fn unmap(&self, m_offset: u64, len: u64) -> fuse::Result<()> {
UNCOV
358
        let mut fs_map = FsMap::new_zeroed();
UNCOV
359
        fs_map.cache_offset[0] = m_offset;
UNCOV
360
        fs_map.len[0] = len;
UNCOV
361
        self.channel
UNCOV
362
            .fs_unmap(&fs_map)
UNCOV
363
            .box_trace(fuse::error::DaxMapping)
364
    }
365
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc