• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vigna / webgraph-rs / 20017908129

08 Dec 2025 05:36AM UTC coverage: 62.065% (+0.4%) from 61.641%
20017908129

push

github

zommiommy
Fix doctests

5435 of 8757 relevant lines covered (62.06%)

46674689.93 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

44.55
/webgraph/src/utils/mod.rs
1
/*
2
 * SPDX-FileCopyrightText: 2023 Inria
3
 * SPDX-FileCopyrightText: 2023 Sebastiano Vigna
4
 *
5
 * SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
6
 */
7

8
//! Miscellaneous utilities.
9

10
use rand::Rng;
11
use std::path::PathBuf;
12

13
/// Creates a new random dir inside the given folder
14
pub fn temp_dir<P: AsRef<std::path::Path>>(base: P) -> anyhow::Result<PathBuf> {
×
15
    let mut base = base.as_ref().to_owned();
×
16
    const ALPHABET: &[u8] = b"0123456789abcdef";
17
    let mut rnd = rand::rng();
×
18
    let mut random_str = String::new();
×
19
    loop {
×
20
        random_str.clear();
×
21
        for _ in 0..16 {
×
22
            let idx = rnd.random_range(0..ALPHABET.len());
×
23
            random_str.push(ALPHABET[idx] as char);
×
24
        }
25
        base.push(&random_str);
×
26

27
        if !base.exists() {
×
28
            std::fs::create_dir(&base)?;
×
29
            return Ok(base);
×
30
        }
31
        base.pop();
×
32
    }
33
}
34

35
mod batch_codec;
36
pub use batch_codec::*;
37

38
mod circular_buffer;
39
pub(crate) use circular_buffer::*;
40

41
mod mmap_helper;
42
pub use mmap_helper::*;
43

44
mod java_perm;
45
pub use java_perm::*;
46

47
mod granularity;
48
pub use granularity::*;
49

50
pub mod matrix;
51
pub use matrix::Matrix;
52

53
pub mod sort_pairs;
54
pub use sort_pairs::SortPairs;
55

56
pub mod par_sort_pairs;
57
pub use par_sort_pairs::ParSortPairs;
58

59
pub mod par_sort_iters;
60
pub use par_sort_iters::ParSortIters;
61

62
use crate::graphs::{
63
    arc_list_graph::Iter,
64
    bvgraph::{Decode, Encode},
65
};
66

67
/// A decoder that encodes the read values using the given encoder.
68
/// This is commonly used to change the codes of a graph without decoding and
69
/// re-encoding it but by changing the codes.
70
pub struct Converter<D: Decode, E: Encode> {
71
    pub decoder: D,
72
    pub encoder: E,
73
    pub offset: usize,
74
}
75

76
impl<D: Decode, E: Encode> Decode for Converter<D, E> {
77
    // TODO: implement correctly start_node/end_node
78
    #[inline(always)]
79
    fn read_outdegree(&mut self) -> u64 {
×
80
        let res = self.decoder.read_outdegree();
×
81
        self.offset += self.encoder.write_outdegree(res).unwrap();
×
82
        res
×
83
    }
84
    #[inline(always)]
85
    fn read_reference_offset(&mut self) -> u64 {
×
86
        let res = self.decoder.read_reference_offset();
×
87
        self.offset += self.encoder.write_reference_offset(res).unwrap();
×
88
        res
×
89
    }
90
    #[inline(always)]
91
    fn read_block_count(&mut self) -> u64 {
×
92
        let res = self.decoder.read_block_count();
×
93
        self.offset += self.encoder.write_block_count(res).unwrap();
×
94
        res
×
95
    }
96
    #[inline(always)]
97
    fn read_block(&mut self) -> u64 {
×
98
        let res = self.decoder.read_block();
×
99
        self.offset += self.encoder.write_block(res).unwrap();
×
100
        res
×
101
    }
102
    #[inline(always)]
103
    fn read_interval_count(&mut self) -> u64 {
×
104
        let res = self.decoder.read_interval_count();
×
105
        self.offset += self.encoder.write_interval_count(res).unwrap();
×
106
        res
×
107
    }
108
    #[inline(always)]
109
    fn read_interval_start(&mut self) -> u64 {
×
110
        let res = self.decoder.read_interval_start();
×
111
        self.offset += self.encoder.write_interval_start(res).unwrap();
×
112
        res
×
113
    }
114
    #[inline(always)]
115
    fn read_interval_len(&mut self) -> u64 {
×
116
        let res = self.decoder.read_interval_len();
×
117
        self.offset += self.encoder.write_interval_len(res).unwrap();
×
118
        res
×
119
    }
120
    #[inline(always)]
121
    fn read_first_residual(&mut self) -> u64 {
×
122
        let res = self.decoder.read_first_residual();
×
123
        self.offset += self.encoder.write_first_residual(res).unwrap();
×
124
        res
×
125
    }
126
    #[inline(always)]
127
    fn read_residual(&mut self) -> u64 {
×
128
        let res = self.decoder.read_residual();
×
129
        self.offset += self.encoder.write_residual(res).unwrap();
×
130
        res
×
131
    }
132
    #[inline(always)]
133
    fn num_of_residuals(&mut self, num_of_residuals: usize) {
×
134
        self.encoder.num_of_residuals(num_of_residuals);
×
135
    }
136
}
137

138
/// An enum expressing the memory requirements for batched algorithms
139
/// such as [`SortPairs`], [`ParSortPairs`], and [`ParSortIters`].
140
///
141
/// This type implements [`Mul`](core::ops::Mul) and [`Div`](core::ops::Div) to
142
/// scale the memory usage requirements by a given factor, independently of the
143
/// variant.
144
#[derive(Clone, Copy, Debug)]
145
pub enum MemoryUsage {
146
    /// The target overall memory usage in bytes.
147
    ///
148
    /// This is the more user-friendly option. The actual number of elements
149
    /// can be computed using [`batch_size`](MemoryUsage::batch_size).
150
    MemorySize(usize),
151
    /// The number of elements used in all batches.
152
    ///
153
    /// This is a more low-level option that gives more control to the user, but
154
    /// the actual memory usage will depend on the size of labels (if any).
155
    BatchSize(usize),
156
}
157

158
/// Default implementation, returning half of the physical RAM.
159
impl Default for MemoryUsage {
160
    fn default() -> Self {
16✔
161
        Self::from_perc(50.0)
16✔
162
    }
163
}
164

165
impl core::fmt::Display for MemoryUsage {
166
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
4✔
167
        match self {
4✔
168
            MemoryUsage::MemorySize(size) => write!(f, "{} bytes", size),
12✔
169
            MemoryUsage::BatchSize(size) => write!(f, "{} elements", size),
×
170
        }
171
    }
172
}
173

174
impl core::ops::Mul<usize> for MemoryUsage {
175
    type Output = MemoryUsage;
176

177
    fn mul(self, rhs: usize) -> Self::Output {
×
178
        match self {
×
179
            MemoryUsage::MemorySize(size) => MemoryUsage::MemorySize(size * rhs),
×
180
            MemoryUsage::BatchSize(size) => MemoryUsage::BatchSize(size * rhs),
×
181
        }
182
    }
183
}
184

185
impl core::ops::Div<usize> for MemoryUsage {
186
    type Output = MemoryUsage;
187

188
    fn div(self, rhs: usize) -> Self::Output {
16✔
189
        match self {
16✔
190
            MemoryUsage::MemorySize(size) => MemoryUsage::MemorySize(size / rhs),
32✔
191
            MemoryUsage::BatchSize(size) => MemoryUsage::BatchSize(size / rhs),
×
192
        }
193
    }
194
}
195

196
impl MemoryUsage {
197
    /// Creates a new memory usage expressed as a percentage of the
198
    /// physical RAM.
199
    pub fn from_perc(perc: f64) -> Self {
24✔
200
        let system = sysinfo::System::new_with_specifics(
201
            sysinfo::RefreshKind::nothing()
24✔
202
                .with_memory(sysinfo::MemoryRefreshKind::nothing().with_ram()),
72✔
203
        );
204
        MemoryUsage::MemorySize(
205
            usize::try_from((system.total_memory() as f64 * perc / 100.0) as u64)
72✔
206
                .expect("System memory overflows usize"),
24✔
207
        )
208
    }
209

210
    /// Returns the batch size for elements of type `T`.
211
    ///
212
    /// If the [memory usage is expressed as a number of
213
    /// bytes](MemoryUsage::MemorySize), this method divides the number of bytes
214
    /// by the size of `T` to obtain the number of elements. Otherwise, [it just
215
    /// returns specified batch size](MemoryUsage::BatchSize).
216
    pub fn batch_size<T>(&self) -> usize {
135✔
217
        match &self {
135✔
218
            MemoryUsage::MemorySize(memory_size) => {
35✔
219
                let elem_size = std::mem::size_of::<T>();
70✔
220
                assert!(elem_size > 0, "Element size cannot be zero");
70✔
221
                memory_size / elem_size
35✔
222
            }
223
            MemoryUsage::BatchSize(batch_size) => *batch_size,
200✔
224
        }
225
    }
226
}
227

228
/// Writes a human-readable representation of a large number using SI prefixes units.
229
pub fn humanize(value: f64) -> String {
128✔
230
    const UNITS: &[&str] = &["", "K", "M", "G", "T", "P", "E"];
231
    let mut v = value;
256✔
232
    let mut unit: usize = 0;
384✔
233
    while v >= 1000.0 && unit + 1 < UNITS.len() {
560✔
234
        v /= 1000.0;
108✔
235
        unit += 1;
108✔
236
    }
237
    if unit == 0 {
128✔
238
        format!("{:.0}{}", v, UNITS[unit])
64✔
239
    } else {
240
        format!("{:.3}{}", v, UNITS[unit])
192✔
241
    }
242
}
243

244
/// A structure holding partition iterators and their boundaries.
245
///
246
/// This type holds a list of iterators and a list of boundaries, one more
247
/// than the number of iterators. The implied semantics is that each iterator
248
/// returns (labelled) pairs of nodes, and that the first element of
249
/// each pair sits between the boundaries associated with the iterator.
250
///
251
/// This structures is returned by [`ParSortPairs`] and [`ParSortIters`] and can
252
/// easily be converted into lenders for use with
253
/// [`BvCompConfig::par_comp_lenders`](crate::graphs::bvgraph::BvCompConfig::par_comp_lenders)
254
/// using a convenient implementation of the [`From`] trait.
255
///
256
/// Note that it sufficient to write `let lenders: Vec<_> = split_iters.into()`
257
/// to perform the conversion. Type inference might not work properly if the
258
/// call is embedded in a larger expression, in which case an explicit type
259
/// annotation might be necessary.
260
pub struct SplitIters<I> {
261
    pub boundaries: Box<[usize]>,
262
    pub iters: Box<[I]>,
263
}
264

265
impl<I> SplitIters<I> {
266
    pub fn new(boundaries: Box<[usize]>, iters: Box<[I]>) -> Self {
2✔
267
        Self { boundaries, iters }
268
    }
269
}
270

271
impl<I> From<(Box<[usize]>, Box<[I]>)> for SplitIters<I> {
272
    fn from((boundaries, iters): (Box<[usize]>, Box<[I]>)) -> Self {
×
273
        Self::new(boundaries, iters)
×
274
    }
275
}
276

277
/// Conversion of a [`SplitIters`] of iterators on unlabeled pairs into a
278
/// sequence of pairs of starting points and associated lenders.
279
///
280
/// This is useful for converting the output of sorting utilities like
281
/// [`ParSortPairs`] or [`ParSortIters`] into a form suitable for
282
/// [`BvCompConfig::par_comp_lenders`](crate::graphs::bvgraph::BvCompConfig::par_comp_lenders)
283
/// when working with unlabeled graphs.
284
///
285
/// The pairs `(src, dst)` are automatically converted to labeled form with unit
286
/// labels, and the resulting lenders are wrapped with
287
/// [`LeftIterator`](crate::labels::proj::LeftIterator) to project out just the
288
/// successor nodes.
289
///
290
/// Note that it sufficient to write `let lenders: Vec<_> = split_iters.into()`
291
/// to perform the conversion. Type inference might not work properly if the
292
/// call is embedded in a larger expression, in which case an explicit type
293
/// annotation might be necessary.
294
impl<
295
    I: Iterator<Item = (usize, usize)> + Send + Sync,
296
    IT: IntoIterator<Item = (usize, usize), IntoIter = I>,
297
> From<SplitIters<IT>>
298
    for Vec<
299
        crate::labels::proj::LeftIterator<
300
            Iter<(), std::iter::Map<I, fn((usize, usize)) -> ((usize, usize), ())>>,
301
        >,
302
    >
303
{
304
    fn from(split: SplitIters<IT>) -> Self {
1✔
305
        Box::into_iter(split.iters)
2✔
306
            .enumerate()
307
            .map(|(i, iter)| {
5✔
308
                let start_node = split.boundaries[i];
8✔
309
                let end_node = split.boundaries[i + 1];
8✔
310
                let num_partition_nodes = end_node - start_node;
8✔
311
                // Map pairs to labeled pairs with unit labels
312
                #[allow(clippy::type_complexity)]
×
313
                let map_fn: fn((usize, usize)) -> ((usize, usize), ()) = |pair| (pair, ());
20✔
314
                let labeled_iter = iter.into_iter().map(map_fn);
20✔
315
                let lender = Iter::try_new_from(num_partition_nodes, labeled_iter, start_node)
20✔
316
                    .expect("Iterator should start from the expected first node");
8✔
317
                // Wrap with LeftIterator to project out just the successor
318
                crate::labels::proj::LeftIterator(lender)
4✔
319
            })
320
            .collect()
321
    }
322
}
323

324
/// Conversion of a [`SplitIters`] of iterators on labelled pairs into a
325
/// sequences of pairs of starting points and associated lenders.
326
///
327
/// This is useful for converting the output of sorting utilities like
328
/// [`ParSortPairs`] or [`ParSortIters`] into a form suitable for
329
/// [`BvCompConfig::par_comp_lenders`](crate::graphs::bvgraph::BvCompConfig::par_comp_lenders).
330
///
331
/// Note that it sufficient to write `let lenders: Vec<_> = split_iters.into()`
332
/// to perform the conversion. Type inference might not work properly if the
333
/// call is embedded in a larger expression, in which case an explicit type
334
/// annotation might be necessary.
335
impl<
336
    L: Clone + Copy + 'static,
337
    I: Iterator<Item = ((usize, usize), L)> + Send + Sync,
338
    IT: IntoIterator<Item = ((usize, usize), L), IntoIter = I>,
339
> From<SplitIters<IT>> for Vec<Iter<L, I>>
340
{
341
    fn from(split: SplitIters<IT>) -> Self {
1✔
342
        Box::into_iter(split.iters)
2✔
343
            .enumerate()
344
            .map(|(i, iter)| {
4✔
345
                let start_node = split.boundaries[i];
6✔
346
                let end_node = split.boundaries[i + 1];
6✔
347
                let num_partition_nodes = end_node - start_node;
6✔
348

349
                Iter::try_new_from(num_partition_nodes, iter.into_iter(), start_node)
15✔
350
                    .expect("Iterator should start from the expected first node")
6✔
351
            })
352
            .collect()
353
    }
354
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc