• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vigna / sux-rs / 15193941128

22 May 2025 06:19PM UTC coverage: 67.823% (-1.9%) from 69.748%
15193941128

push

github

web-flow
Merge pull request #68 from progval/fix-build

Fix build

2 of 4 new or added lines in 1 file covered. (50.0%)

248 existing lines in 3 files now uncovered.

3558 of 5246 relevant lines covered (67.82%)

122388451.36 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

75.0
/src/bits/bit_field_vec.rs
1
/*
2
 * SPDX-FileCopyrightText: 2023 Inria
3
 * SPDX-FileCopyrightText: 2023 Sebastiano Vigna
4
 *
5
 * SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
6
 */
7

8
//! Vectors of values of fixed bit width.
9
//!
10
//! Elements are stored contiguously, with no padding bits (in particular,
11
//! unless the bit width is a power of two some elements will be stored across
12
//! word boundaries).
13
//!
14
//! There are two flavors: [`BitFieldVec`], a mutable bit-field vector, and
15
//! [`AtomicBitFieldVec`], a mutable, thread-safe bit-field vector.
16
//!
17
//! These flavors depends on a backend, and presently we provide, given an
18
//! unsigned integer type `W` or an unsigned atomic integer type `A`:
19
//!
20
//! - `BitFieldVec<Vec<T>>`: a mutable, growable and resizable bit-field vector;
21
//! - `BitFieldVec<AsRef<[W]>>`: an immutable bit-field vector, useful for
22
//!   [ε-serde](epserde) support;
23
//! - `BitFieldVec<AsRef<[W]> + AsMut<[W]>>`: a mutable (but not resizable) bit
24
//!   vector;
25
//! - `AtomicBitFieldVec<AsRef<[A]>>`: a partially thread-safe, mutable (but not
26
//!   resizable) bit-field vector.
27
//!
28
//! More generally, the underlying type must satisfy the trait [`Word`] for
29
//! [`BitFieldVec`] and additionally [`IntoAtomic`] for [`AtomicBitFieldVec`].
30
//! A blanket implementation exposes slices of elements of type `W` as bit-field
31
//! vectors of width `W::BITS`, analogously for atomic types `A`.
32
//!
33
//! Note that nothing is assumed about the content of the backend outside the
34
//! bits of the vector. Moreover, the content of the backend outside of the
35
//! vector is never modified by the methods of this structure.
36
//!
37
//! For high-speed unchecked scanning, we implement [`IntoUncheckedIterator`]
38
//! and [`IntoReverseUncheckedIterator`] on a reference to this type. The are
39
//! used, for example, to provide
40
//! [predecessor](crate::traits::indexed_dict::Pred) and
41
//! [successor](crate::traits::indexed_dict::Succ) primitives for
42
//! [`EliasFano`].
43
//!
44
//! # Low-level support
45
//!
46
//! The methods [`address_of`](BitFieldVec::addr_of) and
47
//! [`get_unaligned`](BitFieldVec::get_unaligned) can be used to manually
48
//! prefetch parts of the data structure, or read values using unaligned read,
49
//! when the bit width makes it possible.
50
//!
51
//! # Examples
52
//! ```rust
53
//! use sux::prelude::*;
54
//!
55
//! // Bit field vector of bit width 5 and length 10, all entries set to zero
56
//! let mut b = <BitFieldVec<usize>>::new(5, 10);
57
//! assert_eq!(b.len(), 10);
58
//! assert_eq!(b.bit_width(), 5);
59
//! b.set(0, 3);
60
//! assert_eq!(b.get(0), 3);
61
//!
62
//! // Empty bit field vector of bit width 20 with capacity 10
63
//! let mut b = <BitFieldVec<usize>>::with_capacity(20, 10);
64
//! assert_eq!(b.len(), 0);
65
//! assert_eq!(b.bit_width(), 20);
66
//! b.push(20);
67
//! assert_eq!(b.len(), 1);
68
//! assert_eq!(b.get(0), 20);
69
//! assert_eq!(b.pop(), Some(20));
70
//!
71
//! // Convenience macro
72
//! let b = bit_field_vec![10; 4, 500, 2, 0, 1];
73
//! assert_eq!(b.len(), 5);
74
//! assert_eq!(b.bit_width(), 10);
75
//! assert_eq!(b.get(0), 4);
76
//! assert_eq!(b.get(1), 500);
77
//! assert_eq!(b.get(2), 2);
78
//! assert_eq!(b.get(3), 0);
79
//! assert_eq!(b.get(4), 1);
80
//! ```
81

82
use crate::prelude::*;
83
use crate::traits::bit_field_slice::{panic_if_out_of_bounds, panic_if_value};
84
use crate::utils::{transmute_boxed_slice, transmute_vec};
85
#[cfg(feature = "rayon")]
86
use crate::RAYON_MIN_LEN;
87
use anyhow::{bail, Result};
88
use common_traits::*;
89
use epserde::*;
90
use mem_dbg::*;
91
#[cfg(feature = "rayon")]
92
use rayon::prelude::*;
93
use std::sync::atomic::*;
94

95
/// Convenient, [`vec!`]-like macro to initialize `usize`-based bit-field
96
/// vectors.
97
///
98
/// Note that the syntax `bit_field_vec![width; length; value]` that has been
99
/// deprecated in favor of `bit_field_vec![width => value; length]`, so that
100
/// value and length are in the same order as in [`vec!`].
101
///
102
/// # Examples
103
///
104
/// ```rust
105
/// use sux::prelude::*;
106
///
107
/// // Empty bit field vector of bit width 5
108
/// let b = bit_field_vec![5];
109
/// assert_eq!(b.len(), 0);
110
/// assert_eq!(b.bit_width(), 5);
111
///
112
/// // 10 values of bit width 6, all set to 3
113
/// let b = bit_field_vec![6 => 3; 10];
114
/// assert_eq!(b.len(), 10);
115
/// assert_eq!(b.bit_width(), 6);
116
/// assert_eq!(b.iter().all(|x| x == 3), true);
117
///
118
/// // List of values of bit width 10
119
/// let b = bit_field_vec![10; 4, 500, 2, 0, 1];
120
/// assert_eq!(b.len(), 5);
121
/// assert_eq!(b.bit_width(), 10);
122
/// assert_eq!(b.get(0), 4);
123
/// assert_eq!(b.get(1), 500);
124
/// assert_eq!(b.get(2), 2);
125
/// assert_eq!(b.get(3), 0);
126
/// assert_eq!(b.get(4), 1);
127
/// ```
128
#[macro_export]
129
macro_rules! bit_field_vec {
130
    ($w:expr) => {
131
        $crate::bits::BitFieldVec::<usize, _>::new($w, 0)
132
    };
133
    ($w:expr; $n:expr; $v:expr) => {
134
        {
135
            let mut bit_field_vec = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, $n);
136
            // Force type
137
            let v: usize = $v;
138
            bit_field_vec.resize($n, v);
139
            bit_field_vec
140
        }
141
    };
142
    ($w:expr => $v:expr; $n:expr) => {
143
        {
144
            let mut bit_field_vec = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, $n);
145
            // Force type
146
            let v: usize = $v;
147
            bit_field_vec.resize($n, v);
148
            bit_field_vec
149
        }
150
    };
151
    ($w:expr; $($x:expr),+ $(,)?) => {
152
        {
153
            let mut b = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, [$($x),+].len());
154
            $(
155
                // Force type
156
                let x: usize = $x;
157
                b.push(x);
158
            )*
159
            b
160
        }
161
    };
162
}
163

164
/// A vector of bit fields of fixed width.
165
#[derive(Epserde, Debug, Clone, Hash, MemDbg, MemSize)]
166
pub struct BitFieldVec<W: Word = usize, B = Vec<W>> {
167
    /// The underlying storage.
168
    bits: B,
169
    /// The bit width of the values stored in the vector.
170
    bit_width: usize,
171
    /// A mask with its lowest `bit_width` bits set to one.
172
    mask: W,
173
    /// The length of the vector.
174
    len: usize,
175
}
176

177
fn mask<W: Word>(bit_width: usize) -> W {
9,656✔
178
    if bit_width == 0 {
9,656✔
179
        W::ZERO
192✔
180
    } else {
181
        W::MAX >> (W::BITS - bit_width)
9,464✔
182
    }
183
}
184

185
impl<W: Word, B> BitFieldVec<W, B> {
186
    /// # Safety
187
    /// `len` * `bit_width` must be between 0 (included) the number of
188
    /// bits in `bits` (included).
189
    #[inline(always)]
190
    pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
3,209✔
191
        Self {
192
            bits,
193
            bit_width,
194
            mask: mask(bit_width),
6,418✔
195
            len,
196
        }
197
    }
198

199
    #[inline(always)]
200
    pub fn into_raw_parts(self) -> (B, usize, usize) {
3,122✔
201
        (self.bits, self.bit_width, self.len)
6,244✔
202
    }
203

204
    #[inline(always)]
205
    /// Modifies the bit field in place.
206
    ///
207
    /// # Safety
208
    /// This is unsafe because it's the caller's responsibility to ensure that
209
    /// that the length is compatible with the modified bits.
210
    pub unsafe fn map<W2: Word, B2>(self, f: impl FnOnce(B) -> B2) -> BitFieldVec<W2, B2> {
×
211
        BitFieldVec {
212
            bits: f(self.bits),
×
213
            bit_width: self.bit_width,
×
214
            mask: mask(self.bit_width),
×
215
            len: self.len,
×
216
        }
217
    }
218
}
219

220
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {
221
    /// Gets the address of the item storing (the first part of)
222
    /// the element of given index.
223
    ///
224
    /// This method is mainly useful for manually prefetching
225
    /// parts of the data structure.
226
    pub fn addr_of(&self, index: usize) -> *const W {
4✔
227
        let start_bit = index * self.bit_width;
8✔
228
        let word_index = start_bit / W::BITS;
8✔
229
        (&self.bits.as_ref()[word_index]) as *const _
8✔
230
    }
231

232
    /// Like [`BitFieldSlice::get`], but using unaligned reads.
233
    ///
234
    /// This method can be used only for bit width smaller than or equal to
235
    /// `W::BITS - 8 + 2` or equal to `W::BITS - 8 + 4` or `W::BITS`. Moreover,
236
    /// an additional padding word must be present at the end of the vector.
237
    ///
238
    /// Note that to guarantee the absence of undefined behavior this method
239
    /// has to perform several tests. Consider using
240
    /// [`get_unaligned_unchecked`](Self::get_unaligned_unchecked) if you are
241
    /// sure that the constraints are respected.
242
    ///
243
    /// # Panics
244
    ///
245
    /// This method will panic if the constraints above are not respected.
246
    pub fn get_unaligned(&self, index: usize) -> W {
65✔
247
        assert!(
65✔
248
            self.bit_width <= W::BITS - 8 + 2
65✔
249
                || self.bit_width == W::BITS - 8 + 4
24✔
250
                || self.bit_width == W::BITS
14✔
251
        );
252
        panic_if_out_of_bounds!(index, self.len);
61✔
253
        assert!(
61✔
254
            (index * self.bit_width) / W::BYTES + W::BYTES <= self.bits.as_ref().len() * W::BYTES
122✔
255
        );
256
        unsafe { self.get_unaligned_unchecked(index) }
180✔
257
    }
258

259
    /// Like [`BitFieldSlice::get_unchecked`], but using unaligned reads.
260
    ///
261
    /// # Safety
262
    ///
263
    /// This method can be used only for bit width smaller than or equal to
264
    /// `W::BITS - 8 + 2` or equal to `W::BITS - 8 + 4` or `W::BITS`. Moreover,
265
    /// an additional padding word must be present at the end of the vector.
266
    ///
267
    /// # Panics
268
    ///
269
    /// This method will panic in debug mode if the safety constraints are not
270
    /// respected.
271
    pub unsafe fn get_unaligned_unchecked(&self, index: usize) -> W {
65✔
272
        debug_assert!(
65✔
273
            self.bit_width <= W::BITS - 8 + 2
65✔
274
                || self.bit_width == W::BITS - 8 + 4
24✔
275
                || self.bit_width == W::BITS
14✔
276
        );
277
        let base_ptr = self.bits.as_ref().as_ptr() as *const u8;
61✔
278
        let start_bit = index * self.bit_width;
×
279
        debug_assert!(start_bit / W::BYTES + W::BYTES <= self.bits.as_ref().len() * W::BYTES);
×
280
        let ptr = base_ptr.add(start_bit / W::BYTES) as *const W;
180✔
281
        let word = core::ptr::read_unaligned(ptr);
180✔
282
        (word >> (start_bit % 8)) & self.mask
60✔
283
    }
284

285
    /// Returns the backend of the vector as a slice of `W`.
286
    pub fn as_slice(&self) -> &[W] {
3✔
287
        self.bits.as_ref()
3✔
288
    }
289
}
290
/// An iterator over non-overlapping chunks of a bit-field vector, starting at
291
/// the beginning of the vector.
292
///
293
/// When the vector len is not evenly divided by the chunk size, the last chunk
294
/// of the iteration will be shorter.
295
///
296
/// This struct is created by the
297
/// [`try_chunks_mut`](#impl-BitFieldSliceMut-for-BitFieldVec) method.
298
pub struct ChunksMut<'a, W: Word> {
299
    remaining: usize,
300
    bit_width: usize,
301
    chunk_size: usize,
302
    iter: std::slice::ChunksMut<'a, W>,
303
}
304

305
impl<'a, W: Word> Iterator for ChunksMut<'a, W> {
306
    type Item = BitFieldVec<W, &'a mut [W]>;
307

308
    #[inline]
309
    fn next(&mut self) -> Option<Self::Item> {
90✔
310
        self.iter.next().map(|chunk| {
359✔
311
            let size = Ord::min(self.chunk_size, self.remaining);
356✔
312
            let next = unsafe { BitFieldVec::from_raw_parts(chunk, self.bit_width, size) };
445✔
313
            self.remaining -= size;
89✔
314
            next
89✔
315
        })
316
    }
317
}
318

319
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {}
320

321
impl<W: Word> BitFieldVec<W, Vec<W>> {
322
    /// Creates a new zero-initialized vector of given bit width and length.
323
    pub fn new(bit_width: usize, len: usize) -> Self {
932✔
324
        // We need at least one word to handle the case of bit width zero.
325
        let n_of_words = Ord::max(1, (len * bit_width).div_ceil(W::BITS));
3,728✔
326
        Self {
327
            bits: vec![W::ZERO; n_of_words],
2,796✔
328
            bit_width,
329
            mask: mask(bit_width),
1,864✔
330
            len,
331
        }
332
    }
333

334
    /// Creates a new zero-initialized vector of given bit width and length,
335
    /// adding padding bits to the end of the vector so that unaligned reads are
336
    /// possible.
337
    ///
338
    /// Note that this convenience method is a one-off: if the vector is resized
339
    /// or expanded, the padding will be lost.
340
    pub fn new_unaligned(bit_width: usize, len: usize) -> Self {
88✔
341
        let n_of_words = (len * bit_width).div_ceil(W::BITS);
264✔
342
        Self {
343
            // We add a word at the end
344
            bits: vec![W::ZERO; n_of_words + 1],
264✔
345
            bit_width,
346
            mask: mask(bit_width),
176✔
347
            len,
348
        }
349
    }
350

351
    /// Creates an empty vector that doesn't need to reallocate for up to
352
    /// `capacity` elements.
353
    pub fn with_capacity(bit_width: usize, capacity: usize) -> Self {
1,163✔
354
        // We need at least one word to handle the case of bit width zero.
355
        let n_of_words = Ord::max(1, (capacity * bit_width).div_ceil(W::BITS));
4,652✔
356
        Self {
357
            bits: Vec::with_capacity(n_of_words),
3,489✔
358
            bit_width,
359
            mask: mask(bit_width),
1,163✔
360
            len: 0,
361
        }
362
    }
363

364
    /// Sets the length.
365
    ///
366
    /// # Safety
367
    ///
368
    /// `len * bit_width` must be at most `self.bits.len() * W::BITS`. Note that
369
    /// setting the length might result in reading uninitialized data.
370
    pub unsafe fn set_len(&mut self, len: usize) {
1✔
371
        debug_assert!(len * self.bit_width <= self.bits.len() * W::BITS);
3✔
372
        self.len = len;
1✔
373
    }
374

375
    /// Sets len to 0
376
    pub fn clear(&mut self) {
1✔
377
        self.len = 0;
1✔
378
    }
379

380
    /// Returns the bit width of the values inside the vector.
381
    pub fn bit_width(&self) -> usize {
315✔
382
        debug_assert!(self.bit_width <= W::BITS);
630✔
383
        self.bit_width
315✔
384
    }
385

386
    /// Returns the mask used to extract values from the vector.
387
    /// This will keep the lowest `bit_width` bits.
388
    pub fn mask(&self) -> W {
312✔
389
        self.mask
312✔
390
    }
391

392
    /// Creates a new vector by copying a slice; the bit width will be the minimum
393
    /// width sufficient to hold all values in the slice.
394
    ///
395
    /// Returns an error if the bit width of the values in `slice` is larger than
396
    /// `W::BITS`.
397
    pub fn from_slice<SW>(slice: &impl BitFieldSlice<SW>) -> Result<Self>
3✔
398
    where
399
        SW: Word + CastableInto<W>,
400
    {
401
        let mut max_len: usize = 0;
9✔
402
        for i in 0..slice.len() {
3,006✔
403
            max_len = Ord::max(max_len, unsafe { slice.get_unchecked(i).len() as usize });
×
404
        }
405

406
        if max_len > W::BITS {
3✔
407
            bail!(
1✔
408
                "Cannot convert a slice of bit width {} into a slice with W = {}",
1✔
409
                max_len,
×
410
                std::any::type_name::<W>()
1✔
411
            );
412
        }
413
        let mut result = Self::new(max_len, slice.len());
×
414
        for i in 0..slice.len() {
2,000✔
415
            unsafe { result.set_unchecked(i, slice.get_unchecked(i).cast()) };
×
416
        }
417

418
        Ok(result)
×
419
    }
420

421
    /// Adds a value at the end of the vector.
422
    pub fn push(&mut self, value: W) {
2,249✔
423
        panic_if_value!(value, self.mask, self.bit_width);
6,747✔
424
        if (self.len + 1) * self.bit_width > self.bits.len() * W::BITS {
4,896✔
425
            self.bits.push(W::ZERO);
398✔
426
        }
427
        unsafe {
428
            self.set_unchecked(self.len, value);
6,747✔
429
        }
430
        self.len += 1;
2,249✔
431
    }
432

433
    /// Truncates or extend with `value` the vector.
434
    pub fn resize(&mut self, new_len: usize, value: W) {
1,162✔
435
        panic_if_value!(value, self.mask, self.bit_width);
3,486✔
436
        if new_len > self.len {
1,162✔
437
            if new_len * self.bit_width > self.bits.len() * W::BITS {
3,483✔
438
                self.bits
2,322✔
439
                    .resize((new_len * self.bit_width).div_ceil(W::BITS), W::ZERO);
2,322✔
440
            }
441
            for i in self.len..new_len {
808,882✔
442
                unsafe {
443
                    self.set_unchecked(i, value);
×
444
                }
445
            }
446
        }
447
        self.len = new_len;
1,162✔
448
    }
449

450
    /// Removes and returns a value from the end of the vector.
451
    ///
452
    /// Returns None if the [`BitFieldVec`] is empty.
453
    pub fn pop(&mut self) -> Option<W> {
1,002✔
454
        if self.len == 0 {
1,002✔
455
            return None;
2✔
456
        }
457
        let value = self.get(self.len - 1);
×
458
        self.len -= 1;
×
459
        Some(value)
×
460
    }
461
}
462

463
impl<W: Word, T> BitFieldSliceCore<W> for BitFieldVec<W, T> {
464
    #[inline(always)]
465
    fn bit_width(&self) -> usize {
13,798✔
466
        debug_assert!(self.bit_width <= W::BITS);
27,596✔
467
        self.bit_width
13,798✔
468
    }
469
    #[inline(always)]
470
    fn len(&self) -> usize {
65,899✔
471
        self.len
65,899✔
472
    }
473
}
474

475
impl<W: Word, B: AsRef<[W]>> BitFieldSlice<W> for BitFieldVec<W, B> {
476
    unsafe fn get_unchecked(&self, index: usize) -> W {
52,220,732✔
477
        let pos = index * self.bit_width;
104,441,464✔
478
        let word_index = pos / W::BITS;
104,441,464✔
479
        let bit_index = pos % W::BITS;
104,441,464✔
480
        let bits = self.bits.as_ref();
156,662,196✔
481

482
        if bit_index + self.bit_width <= W::BITS {
52,220,732✔
483
            (*bits.get_unchecked(word_index) >> bit_index) & self.mask
78,111,840✔
484
        } else {
485
            ((*bits.get_unchecked(word_index) >> bit_index)
13,164,812✔
486
                | (*bits.get_unchecked(word_index + 1) << (W::BITS - bit_index)))
13,164,812✔
487
                & self.mask
13,164,812✔
488
        }
489
    }
490
}
491

492
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> BitFieldSliceMut<W> for BitFieldVec<W, B> {
493
    #[inline(always)]
494
    fn mask(&self) -> W {
100,360✔
495
        self.mask
100,360✔
496
    }
497

498
    #[inline(always)]
499
    fn set(&mut self, index: usize, value: W) {
711,438✔
500
        panic_if_out_of_bounds!(index, self.len);
2,134,314✔
501
        panic_if_value!(value, self.mask, self.bit_width);
2,134,311✔
502
        unsafe {
503
            self.set_unchecked(index, value);
711,436✔
504
        }
505
    }
506

507
    unsafe fn set_unchecked(&mut self, index: usize, value: W) {
11,975,551✔
508
        let pos = index * self.bit_width;
23,951,102✔
509
        let word_index = pos / W::BITS;
23,951,102✔
510
        let bit_index = pos % W::BITS;
23,951,102✔
511
        let bits = self.bits.as_mut();
35,926,653✔
512

513
        if bit_index + self.bit_width <= W::BITS {
21,019,516✔
514
            let mut word = *bits.get_unchecked_mut(word_index);
36,175,860✔
515
            word &= !(self.mask << bit_index);
27,131,895✔
516
            word |= value << bit_index;
27,131,895✔
517
            *bits.get_unchecked_mut(word_index) = word;
27,131,895✔
518
        } else {
519
            let mut word = *bits.get_unchecked_mut(word_index);
2,931,586✔
520
            word &= (W::ONE << bit_index) - W::ONE;
2,931,586✔
521
            word |= value << bit_index;
2,931,586✔
522
            *bits.get_unchecked_mut(word_index) = word;
2,931,586✔
523

524
            let mut word = *bits.get_unchecked_mut(word_index + 1);
2,931,586✔
525
            word &= !(self.mask >> (W::BITS - bit_index));
2,931,586✔
526
            word |= value >> (W::BITS - bit_index);
2,931,586✔
527
            *bits.get_unchecked_mut(word_index + 1) = word;
2,931,586✔
528
        }
529
    }
530

531
    fn reset(&mut self) {
1✔
532
        let bit_len = self.len * self.bit_width;
2✔
533
        let full_words = bit_len / W::BITS;
2✔
534
        let residual = bit_len % W::BITS;
2✔
535
        let bits = self.bits.as_mut();
3✔
536
        bits[..full_words].iter_mut().for_each(|x| *x = W::ZERO);
3✔
537
        if residual != 0 {
2✔
538
            bits[full_words] &= W::MAX << residual;
1✔
539
        }
540
    }
541

542
    #[cfg(feature = "rayon")]
543
    fn par_reset(&mut self) {
1✔
544
        let bit_len = self.len * self.bit_width;
2✔
545
        let full_words = bit_len / W::BITS;
2✔
546
        let residual = bit_len % W::BITS;
2✔
547
        let bits = self.bits.as_mut();
3✔
548
        bits[..full_words]
1✔
549
            .par_iter_mut()
550
            .with_min_len(RAYON_MIN_LEN)
1✔
551
            .for_each(|x| *x = W::ZERO);
1✔
552
        if residual != 0 {
2✔
553
            bits[full_words] &= W::MAX << residual;
1✔
554
        }
555
    }
556

557
    /// This implementation perform the copy word by word, which is
558
    /// significantly faster than the default implementation.
559
    fn copy(&self, from: usize, dst: &mut Self, to: usize, len: usize) {
576✔
560
        assert_eq!(
576✔
561
            self.bit_width, dst.bit_width,
×
562
            "Bit widths must be equal (self: {}, dest: {})",
×
563
            self.bit_width, dst.bit_width
×
564
        );
565
        // Reduce len to the elements available in both vectors
566
        let len = Ord::min(Ord::min(len, dst.len - to), self.len - from);
3,456✔
567
        if len == 0 {
576✔
568
            return;
×
569
        }
570
        let bit_width = Ord::min(self.bit_width, dst.bit_width);
×
571
        let bit_len = len * bit_width;
×
572
        let src_pos = from * self.bit_width;
×
573
        let dst_pos = to * dst.bit_width;
×
574
        let src_bit = src_pos % W::BITS;
×
575
        let dst_bit = dst_pos % W::BITS;
×
576
        let src_first_word = src_pos / W::BITS;
×
577
        let dst_first_word = dst_pos / W::BITS;
×
578
        let src_last_word = (src_pos + bit_len - 1) / W::BITS;
×
579
        let dst_last_word = (dst_pos + bit_len - 1) / W::BITS;
×
580
        let source = self.bits.as_ref();
×
581
        let dest = dst.bits.as_mut();
×
582

583
        if src_first_word == src_last_word && dst_first_word == dst_last_word {
192✔
584
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
585
            let word = (source[src_first_word] >> src_bit) & mask;
192✔
586
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
587
            dest[dst_first_word] |= word << dst_bit;
128✔
588
        } else if src_first_word == src_last_word {
640✔
589
            // dst_first_word != dst_last_word
590
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
591
            let word = (source[src_first_word] >> src_bit) & mask;
192✔
592
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
593
            dest[dst_first_word] |= (word & mask) << dst_bit;
192✔
594
            dest[dst_last_word] &= !(mask >> (W::BITS - dst_bit));
192✔
595
            dest[dst_last_word] |= (word & mask) >> (W::BITS - dst_bit);
128✔
596
        } else if dst_first_word == dst_last_word {
576✔
597
            // src_first_word != src_last_word
598
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
599
            let word = ((source[src_first_word] >> src_bit)
256✔
600
                | (source[src_last_word] << (W::BITS - src_bit)))
128✔
601
                & mask;
128✔
602
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
603
            dest[dst_first_word] |= word << dst_bit;
128✔
604
        } else if src_bit == dst_bit {
576✔
605
            // src_first_word != src_last_word && dst_first_word != dst_last_word
606
            let mask = W::MAX << dst_bit;
384✔
607
            dest[dst_first_word] &= !mask;
256✔
608
            dest[dst_first_word] |= source[src_first_word] & mask;
384✔
609

610
            dest[(1 + dst_first_word)..dst_last_word]
384✔
611
                .copy_from_slice(&source[(1 + src_first_word)..src_last_word]);
512✔
612

613
            let residual =
256✔
614
                bit_len - (W::BITS - src_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
384✔
615
            let mask = W::MAX >> (W::BITS - residual);
384✔
616
            dest[dst_last_word] &= !mask;
256✔
617
            dest[dst_last_word] |= source[src_last_word] & mask;
256✔
618
        } else if src_bit < dst_bit {
384✔
619
            // src_first_word != src_last_word && dst_first_word !=
620
            // dst_last_word
621
            let dst_mask = W::MAX << dst_bit;
256✔
622
            let src_mask = W::MAX << src_bit;
256✔
623
            let shift = dst_bit - src_bit;
256✔
624
            dest[dst_first_word] &= !dst_mask;
128✔
625
            dest[dst_first_word] |= (source[src_first_word] & src_mask) << shift;
256✔
626

627
            let mut word = source[src_first_word] >> (W::BITS - shift);
256✔
628
            for i in 1..dst_last_word - dst_first_word {
256✔
629
                dest[dst_first_word + i] = word | (source[src_first_word + i] << shift);
×
630
                word = source[src_first_word + i] >> (W::BITS - shift);
×
631
            }
632
            let residual =
128✔
633
                bit_len - (W::BITS - dst_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
256✔
634
            let mask = W::MAX >> (W::BITS - residual);
256✔
635
            dest[dst_last_word] &= !mask;
128✔
636
            dest[dst_last_word] |= source[src_last_word] & mask;
256✔
637
        } else {
638
            // src_first_word != src_last_word && dst_first_word !=
639
            // dst_last_word && src_bit > dst_bit
640
            let dst_mask = W::MAX << dst_bit;
128✔
641
            let src_mask = W::MAX << src_bit;
×
642
            let shift = src_bit - dst_bit;
×
643
            dest[dst_first_word] &= !dst_mask;
×
644
            dest[dst_first_word] |= (source[src_first_word] & src_mask) >> shift;
×
645
            dest[dst_first_word] |= source[src_first_word + 1] << (W::BITS - shift);
×
646

647
            let mut word = source[src_first_word + 1] >> shift;
×
648

649
            for i in 1..dst_last_word - dst_first_word {
512✔
650
                word |= source[src_first_word + i + 1] << (W::BITS - shift);
×
651
                dest[dst_first_word + i] = word;
×
652
                word = source[src_first_word + i + 1] >> shift;
×
653
            }
654

655
            word |= source[src_last_word] << (W::BITS - shift);
×
656

657
            let residual =
×
658
                bit_len - (W::BITS - dst_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
×
659
            let mask = W::MAX >> (W::BITS - residual);
×
660
            dest[dst_last_word] &= !mask;
×
661
            dest[dst_last_word] |= word & mask;
×
662
        }
663
    }
664

665
    /// This implementation keeps a buffer of `W::BITS` bits for reading and
666
    /// writing, obtaining a significant speedup with respect to the default
667
    /// implementation.
668
    #[inline]
669
    unsafe fn apply_in_place_unchecked<F>(&mut self, mut f: F)
1,200✔
670
    where
671
        F: FnMut(W) -> W,
672
        Self: BitFieldSlice<W>,
673
    {
674
        if self.is_empty() {
2,400✔
675
            return;
×
676
        }
677
        let bit_width = self.bit_width();
×
678
        if bit_width == 0 {
×
679
            return;
40✔
680
        }
681
        let mask = self.mask();
×
682
        let number_of_words: usize = self.bits.as_ref().len();
×
683
        let last_word_idx = number_of_words.saturating_sub(1);
×
684

685
        let mut write_buffer: W = W::ZERO;
×
686
        let mut read_buffer: W = *self.bits.as_ref().get_unchecked(0);
×
687

688
        // specialized case because it's much faster
689
        if bit_width.is_power_of_two() {
×
690
            let mut bits_in_buffer = 0;
360✔
691

692
            // TODO!: figure out how to simplify
693
            let mut buffer_limit = (self.len() * bit_width) % W::BITS;
540✔
694
            if buffer_limit == 0 {
260✔
695
                buffer_limit = W::BITS;
80✔
696
            }
697

698
            for read_idx in 1..number_of_words {
3,830✔
699
                // pre-load the next word so it loads while we parse the buffer
700
                let next_word: W = *self.bits.as_ref().get_unchecked(read_idx);
14,600✔
701

702
                // parse as much as we can from the buffer
703
                loop {
×
704
                    let next_bits_in_buffer = bits_in_buffer + bit_width;
41,380✔
705

706
                    if next_bits_in_buffer > W::BITS {
20,690✔
707
                        break;
3,650✔
708
                    }
709

710
                    let value = read_buffer & mask;
×
711
                    // throw away the bits we just read
712
                    read_buffer >>= bit_width;
×
713
                    // apply user func
714
                    let new_value = f(value);
×
715
                    // put the new value in the write buffer
716
                    write_buffer |= new_value << bits_in_buffer;
×
717

718
                    bits_in_buffer = next_bits_in_buffer;
×
719
                }
720

721
                invariant_eq!(read_buffer, W::ZERO);
10,950✔
722
                *self.bits.as_mut().get_unchecked_mut(read_idx - 1) = write_buffer;
3,650✔
723
                read_buffer = next_word;
×
724
                write_buffer = W::ZERO;
×
725
                bits_in_buffer = 0;
×
726
            }
727

728
            // write the last word if we have some bits left
729
            while bits_in_buffer < buffer_limit {
2,100✔
730
                let value = read_buffer & mask;
960✔
731
                // throw away the bits we just read
732
                read_buffer >>= bit_width;
960✔
733
                // apply user func
734
                let new_value = f(value);
960✔
735
                // put the new value in the write buffer
736
                write_buffer |= new_value << bits_in_buffer;
960✔
737
                // -= bit_width but with no casts
738
                bits_in_buffer += bit_width;
960✔
739
            }
740

741
            *self.bits.as_mut().get_unchecked_mut(last_word_idx) = write_buffer;
180✔
742
            return;
×
743
        }
744

745
        // The position inside the word. In most parametrization of the
746
        // vector, since the bit_width is not necessarily a integer
747
        // divisor of the word size, we need to keep track of the position
748
        // inside the word. As we scroll through the bits, due to the bits
749
        // remainder, we may need to operate on two words at the same time.
750
        let mut global_bit_index: usize = 0;
×
751

752
        // The number of words in the bitvec.
753
        let mut lower_word_limit = 0;
×
754
        let mut upper_word_limit = W::BITS;
×
755

756
        // We iterate across the words
757
        for word_number in 0..last_word_idx {
53,710✔
758
            // We iterate across the elements in the word.
759
            while global_bit_index + bit_width <= upper_word_limit {
149,910✔
760
                // We retrieve the value from the current word.
761
                let offset = global_bit_index - lower_word_limit;
48,100✔
762
                global_bit_index += bit_width;
48,100✔
763
                let element = self.mask() & (read_buffer >> offset);
48,100✔
764

765
                // We apply the function to the element.
766
                let new_element = f(element);
48,100✔
767

768
                // We set the element in the new word.
769
                write_buffer |= new_element << offset;
48,100✔
770
            }
771

772
            // We retrieve the next word from the bitvec.
773
            let next_word = *self.bits.as_ref().get_unchecked(word_number + 1);
×
774

775
            let mut new_write_buffer = W::ZERO;
×
776
            if upper_word_limit != global_bit_index {
48,840✔
777
                let remainder = upper_word_limit - global_bit_index;
146,520✔
778
                let offset = global_bit_index - lower_word_limit;
146,520✔
779
                // We compose the element from the remaining elements in the
780
                // current word and the elements in the next word.
781
                let element = ((read_buffer >> offset) | (next_word << remainder)) & self.mask();
244,200✔
782
                global_bit_index += bit_width;
97,680✔
783

784
                // We apply the function to the element.
785
                let new_element = f(element);
146,520✔
786

787
                write_buffer |= new_element << offset;
146,520✔
788

789
                new_write_buffer = new_element >> remainder;
48,840✔
790
            };
791

792
            read_buffer = next_word;
×
793

794
            *self.bits.as_mut().get_unchecked_mut(word_number) = write_buffer;
×
795

796
            write_buffer = new_write_buffer;
×
797
            lower_word_limit = upper_word_limit;
×
798
            upper_word_limit += W::BITS;
×
799
        }
800

801
        let mut offset = global_bit_index - lower_word_limit;
×
802

803
        // We iterate across the elements in the word.
804
        while offset < self.len() * bit_width - global_bit_index {
7,180✔
805
            // We retrieve the value from the current word.
806
            let element = self.mask() & (read_buffer >> offset);
1,060✔
807

808
            // We apply the function to the element.
809
            let new_element = f(element);
1,060✔
810

811
            // We set the element in the new word.
812
            write_buffer |= new_element << offset;
1,060✔
813
            offset += bit_width;
1,060✔
814
        }
815

816
        *self.bits.as_mut().get_unchecked_mut(last_word_idx) = write_buffer;
×
817
    }
818

819
    type ChunksMut<'a>
820
        = ChunksMut<'a, W>
821
    where
822
        Self: 'a;
823

824
    /// # Errors
825
    ///
826
    /// This method will return an error if the chunk size multiplied by the by
827
    /// the [bit width](BitFieldSliceCore::bit_width) is not a multiple of
828
    /// `W::BITS` and more than one chunk must be returned.
829
    fn try_chunks_mut(&mut self, chunk_size: usize) -> Result<Self::ChunksMut<'_>, ()> {
87✔
830
        let len = self.len();
261✔
831
        let bit_width = self.bit_width();
261✔
832
        if len <= chunk_size || (chunk_size * bit_width) % W::BITS == 0 {
92✔
833
            Ok(ChunksMut {
85✔
834
                remaining: len,
85✔
835
                bit_width: self.bit_width,
85✔
836
                chunk_size,
85✔
837
                iter: self.bits.as_mut()[..(len * bit_width).div_ceil(W::BITS)]
85✔
838
                    .chunks_mut((chunk_size * bit_width).div_ceil(W::BITS)),
85✔
839
            })
840
        } else {
841
            Err(())
2✔
842
        }
843
    }
844

845
    fn as_mut_slice(&mut self) -> &mut [W] {
1✔
846
        self.bits.as_mut()
1✔
847
    }
848
}
849

850
impl<W: Word> core::iter::Extend<W> for BitFieldVec<W, Vec<W>> {
851
    /// Add values from
852
    fn extend<T: IntoIterator<Item = W>>(&mut self, iter: T) {
1✔
853
        for value in iter {
17✔
854
            self.push(value);
×
855
        }
856
    }
857
}
858

859
/// Equality between bit-field vectors requires that the word is the same, the
860
/// bit width is the same, and the content is the same.
861
impl<W: Word, B: AsRef<[W]>, C: AsRef<[W]>> PartialEq<BitFieldVec<W, C>> for BitFieldVec<W, B> {
862
    fn eq(&self, other: &BitFieldVec<W, C>) -> bool {
3,771✔
863
        if self.bit_width() != other.bit_width() {
11,313✔
864
            return false;
×
865
        }
866
        if self.len() != other.len() {
×
867
            return false;
2✔
868
        }
869
        let bit_len = self.len() * self.bit_width();
×
870
        if self.bits.as_ref()[..bit_len / W::BITS] != other.bits.as_ref()[..bit_len / W::BITS] {
×
871
            return false;
×
872
        }
873

874
        let residual = bit_len % W::BITS;
×
875
        residual == 0
×
876
            || (self.bits.as_ref()[bit_len / W::BITS] ^ other.bits.as_ref()[bit_len / W::BITS])
3,526✔
877
                << (W::BITS - residual)
3,526✔
878
                == W::ZERO
3,526✔
879
    }
880
}
881

882
impl Eq for BitFieldVec {}
883

884
// Support for unchecked iterators
885

886
/// An [`UncheckedIterator`] over the values of a [`BitFieldVec`].
887
#[derive(Debug, Clone, MemDbg, MemSize)]
888
pub struct BitFieldVectorUncheckedIterator<'a, W, B>
889
where
890
    W: Word,
891
{
892
    vec: &'a BitFieldVec<W, B>,
893
    word_index: usize,
894
    window: W,
895
    fill: usize,
896
}
897

898
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVectorUncheckedIterator<'a, W, B> {
899
    fn new(vec: &'a BitFieldVec<W, B>, index: usize) -> Self {
635,922✔
900
        if index > vec.len() {
1,271,844✔
901
            panic!("Start index out of bounds: {} > {}", index, vec.len());
×
902
        }
903
        let (fill, word_index);
×
904
        let window = if index == vec.len() {
1,907,766✔
905
            word_index = 0;
304✔
906
            fill = 0;
304✔
907
            W::ZERO
304✔
908
        } else {
909
            let bit_offset = index * vec.bit_width;
635,618✔
910
            let bit_index = bit_offset % W::BITS;
×
911

912
            word_index = bit_offset / W::BITS;
×
913
            fill = W::BITS - bit_index;
×
914
            unsafe {
915
                // SAFETY: index has been check at the start and it is within bounds
916
                *vec.bits.as_ref().get_unchecked(word_index) >> bit_index
×
917
            }
918
        };
919
        Self {
920
            vec,
921
            word_index,
922
            window,
923
            fill,
924
        }
925
    }
926
}
927

928
impl<W: Word, B: AsRef<[W]>> crate::traits::UncheckedIterator
929
    for BitFieldVectorUncheckedIterator<'_, W, B>
930
{
931
    type Item = W;
932
    unsafe fn next_unchecked(&mut self) -> W {
32,658,435✔
933
        let bit_width = self.vec.bit_width;
65,316,870✔
934

935
        if self.fill >= bit_width {
32,658,435✔
936
            self.fill -= bit_width;
17,182,682✔
937
            let res = self.window & self.vec.mask;
34,365,364✔
938
            self.window >>= bit_width;
17,182,682✔
939
            return res;
17,182,682✔
940
        }
941

942
        let res = self.window;
×
943
        self.word_index += 1;
×
944
        self.window = *self.vec.bits.as_ref().get_unchecked(self.word_index);
×
945
        let res = (res | (self.window << self.fill)) & self.vec.mask;
×
946
        let used = bit_width - self.fill;
×
947
        self.window >>= used;
×
948
        self.fill = W::BITS - used;
×
949
        res
×
950
    }
951
}
952

953
impl<'a, W: Word, B: AsRef<[W]>> IntoUncheckedIterator for &'a BitFieldVec<W, B> {
954
    type Item = W;
955
    type IntoUncheckedIter = BitFieldVectorUncheckedIterator<'a, W, B>;
956
    fn into_unchecked_iter_from(self, from: usize) -> Self::IntoUncheckedIter {
322,712✔
957
        BitFieldVectorUncheckedIterator::new(self, from)
968,136✔
958
    }
959
}
960

961
/// An [`UncheckedIterator`] moving backwards over the values of a [`BitFieldVec`].
962
#[derive(Debug, Clone, MemDbg, MemSize)]
963
pub struct BitFieldVectorReverseUncheckedIterator<'a, W: Word, B> {
964
    vec: &'a BitFieldVec<W, B>,
965
    word_index: usize,
966
    window: W,
967
    fill: usize,
968
}
969

970
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVectorReverseUncheckedIterator<'a, W, B> {
971
    fn new(vec: &'a BitFieldVec<W, B>, index: usize) -> Self {
315,992✔
972
        if index > vec.len() {
631,984✔
973
            panic!("Start index out of bounds: {} > {}", index, vec.len());
×
974
        }
975
        let (word_index, fill);
×
976

977
        let window = if index == 0 {
631,984✔
978
            word_index = 0;
3,120✔
979
            fill = 0;
3,120✔
980
            W::ZERO
3,120✔
981
        } else {
982
            // We have to handle the case of zero bit width
983
            let bit_offset = (index * vec.bit_width).saturating_sub(1);
312,872✔
984
            let bit_index = bit_offset % W::BITS;
×
985

986
            word_index = bit_offset / W::BITS;
×
987
            fill = bit_index + 1;
×
988
            unsafe {
989
                // SAFETY: index has been check at the start and it is within bounds
990
                *vec.bits.as_ref().get_unchecked(word_index) << (W::BITS - fill)
×
991
            }
992
        };
993
        Self {
994
            vec,
995
            word_index,
996
            window,
997
            fill,
998
        }
999
    }
1000
}
1001

1002
impl<W: Word, B: AsRef<[W]>> crate::traits::UncheckedIterator
1003
    for BitFieldVectorReverseUncheckedIterator<'_, W, B>
1004
{
1005
    type Item = W;
1006
    unsafe fn next_unchecked(&mut self) -> W {
15,451,222✔
1007
        let bit_width = self.vec.bit_width;
30,902,444✔
1008

1009
        if self.fill >= bit_width {
15,451,222✔
1010
            self.fill -= bit_width;
8,036,076✔
1011
            self.window = self.window.rotate_left(bit_width as u32);
16,072,152✔
1012
            return self.window & self.vec.mask;
8,036,076✔
1013
        }
1014

1015
        let mut res = self.window.rotate_left(self.fill as u32);
×
1016
        self.word_index -= 1;
×
1017
        self.window = *self.vec.bits.as_ref().get_unchecked(self.word_index);
×
1018
        let used = bit_width - self.fill;
×
1019
        res = ((res << used) | (self.window >> (W::BITS - used))) & self.vec.mask;
×
1020
        self.window <<= used;
×
1021
        self.fill = W::BITS - used;
×
1022
        res
×
1023
    }
1024
}
1025

1026
impl<'a, W: Word, B: AsRef<[W]>> IntoReverseUncheckedIterator for &'a BitFieldVec<W, B> {
1027
    type Item = W;
1028
    type IntoRevUncheckedIter = BitFieldVectorReverseUncheckedIterator<'a, W, B>;
1029

1030
    fn into_rev_unchecked_iter(self) -> Self::IntoRevUncheckedIter {
×
1031
        BitFieldVectorReverseUncheckedIterator::new(self, self.len())
×
1032
    }
1033

1034
    fn into_rev_unchecked_iter_from(self, from: usize) -> Self::IntoRevUncheckedIter {
315,992✔
1035
        BitFieldVectorReverseUncheckedIterator::new(self, from)
947,976✔
1036
    }
1037
}
1038

1039
/// An [`Iterator`] over the values of a [`BitFieldVec`].
1040
#[derive(Debug, Clone, MemDbg, MemSize)]
1041
pub struct BitFieldVecIterator<'a, W, B>
1042
where
1043
    W: Word,
1044
{
1045
    unchecked: BitFieldVectorUncheckedIterator<'a, W, B>,
1046
    index: usize,
1047
}
1048

1049
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVecIterator<'a, W, B> {
1050
    fn new(vec: &'a BitFieldVec<W, B>, from: usize) -> Self {
313,210✔
1051
        if from > vec.len() {
626,420✔
1052
            panic!("Start index out of bounds: {} > {}", from, vec.len());
×
1053
        }
1054
        Self {
1055
            unchecked: BitFieldVectorUncheckedIterator::new(vec, from),
×
1056
            index: from,
1057
        }
1058
    }
1059
}
1060

1061
impl<W: Word, B: AsRef<[W]>> Iterator for BitFieldVecIterator<'_, W, B> {
1062
    type Item = W;
1063
    fn next(&mut self) -> Option<Self::Item> {
16,189,263✔
1064
        if self.index < self.unchecked.vec.len() {
32,378,526✔
1065
            // SAFETY: index has just been checked.
1066
            let res = unsafe { self.unchecked.next_unchecked() };
47,628,177✔
1067
            self.index += 1;
15,876,059✔
1068
            Some(res)
15,876,059✔
1069
        } else {
1070
            None
313,204✔
1071
        }
1072
    }
1073

1074
    #[inline(always)]
1075
    fn size_hint(&self) -> (usize, Option<usize>) {
×
1076
        (self.len(), Some(self.len()))
×
1077
    }
1078
}
1079

1080
impl<W: Word, B: AsRef<[W]>> ExactSizeIterator for BitFieldVecIterator<'_, W, B> {
1081
    #[inline(always)]
1082
    fn len(&self) -> usize {
×
1083
        self.unchecked.vec.len() - self.index
×
1084
    }
1085
}
1086

1087
impl<'a, W: Word, B: AsRef<[W]>> IntoIterator for &'a BitFieldVec<W, B> {
1088
    type Item = W;
1089
    type IntoIter = BitFieldVecIterator<'a, W, B>;
1090

1091
    fn into_iter(self) -> Self::IntoIter {
2✔
1092
        BitFieldVecIterator::new(self, 0)
4✔
1093
    }
1094
}
1095

1096
impl<'a, W: Word, B: AsRef<[W]>> IntoIteratorFrom for &'a BitFieldVec<W, B> {
1097
    type IntoIterFrom = BitFieldVecIterator<'a, W, B>;
1098

1099
    fn into_iter_from(self, from: usize) -> Self::IntoIter {
×
1100
        BitFieldVecIterator::new(self, from)
×
1101
    }
1102
}
1103

1104
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {
1105
    pub fn iter_from(&self, from: usize) -> BitFieldVecIterator<W, B> {
313,208✔
1106
        BitFieldVecIterator::new(self, from)
939,624✔
1107
    }
1108

1109
    pub fn iter(&self) -> BitFieldVecIterator<W, B> {
1,202✔
1110
        self.iter_from(0)
2,404✔
1111
    }
1112
}
1113
/// A tentatively thread-safe vector of bit fields of fixed width.
1114
///
1115
/// This implementation provides some concurrency guarantees, albeit not
1116
/// full-fledged thread safety: more precisely, we can guarantee thread-safety
1117
/// if the bit width is a power of two; otherwise, concurrent writes to values
1118
/// that cross word boundaries might end up in different threads succeding in
1119
/// writing only part of a value. If the user can guarantee that no two threads
1120
/// ever write to the same boundary-crossing value, then no race condition can
1121
/// happen.
1122
///
1123
/// Note that the trait
1124
/// [`AtomicHelper`](crate::traits::bit_field_slice::AtomicHelper) can be used
1125
/// to provide a more convenient naming for some methods.
1126

1127
#[derive(Epserde, Debug, Clone, Hash, MemDbg, MemSize)]
1128
pub struct AtomicBitFieldVec<W: Word + IntoAtomic = usize, B = Vec<<W as IntoAtomic>::AtomicType>> {
1129
    /// The underlying storage.
1130
    bits: B,
1131
    /// The bit width of the values stored in the vector.
1132
    bit_width: usize,
1133
    /// A mask with its lowest `bit_width` bits set to one.
1134
    mask: W,
1135
    /// The length of the vector.
1136
    len: usize,
1137
}
1138

1139
impl<W: Word + IntoAtomic, B> AtomicBitFieldVec<W, B> {
1140
    /// # Safety
1141
    /// `len` * `bit_width` must be between 0 (included) the number of
1142
    /// bits in `bits` (included).
1143
    #[inline(always)]
1144
    pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
184✔
1145
        Self {
1146
            bits,
1147
            bit_width,
1148
            mask: mask(bit_width),
368✔
1149
            len,
1150
        }
1151
    }
1152

1153
    #[inline(always)]
1154
    pub fn into_raw_parts(self) -> (B, usize, usize) {
184✔
1155
        (self.bits, self.bit_width, self.len)
368✔
1156
    }
1157

1158
    /// Returns the mask used to extract values from the vector.
1159
    /// This will keep the lowest `bit_width` bits.
1160
    pub fn mask(&self) -> W {
184✔
1161
        self.mask
184✔
1162
    }
1163
}
1164

1165
impl<W: Word + IntoAtomic, B: AsRef<[W::AtomicType]>> AtomicBitFieldVec<W, B> {
1166
    /// Returns the backend of the `AtomicBitFieldVec` as a slice of `A`, where `A` is the
1167
    /// atomic variant of `W`.
1168
    pub fn as_slice(&self) -> &[W::AtomicType] {
4✔
1169
        self.bits.as_ref()
4✔
1170
    }
1171
}
1172

1173
impl<W: Word + IntoAtomic> AtomicBitFieldVec<W>
1174
where
1175
    W::AtomicType: AtomicUnsignedInt,
1176
{
1177
    pub fn new(bit_width: usize, len: usize) -> AtomicBitFieldVec<W> {
491✔
1178
        // we need at least two words to avoid branches in the gets
1179
        let n_of_words = Ord::max(1, (len * bit_width).div_ceil(W::BITS));
1,964✔
1180
        AtomicBitFieldVec::<W> {
1181
            bits: (0..n_of_words)
491✔
1182
                .map(|_| W::AtomicType::new(W::ZERO))
1183
                .collect(),
1184
            bit_width,
1185
            mask: mask(bit_width),
982✔
1186
            len,
1187
        }
1188
    }
1189
}
1190

1191
impl<W: Word + IntoAtomic, B: AsRef<[W::AtomicType]>> AtomicBitFieldVec<W, B>
1192
where
1193
    W::AtomicType: AtomicUnsignedInt + AsBytes,
1194
{
1195
    /// Writes zeros in all values.
1196
    #[deprecated(since = "0.4.4", note = "reset is deprecated in favor of reset_atomic")]
1197
    pub fn reset(&mut self, ordering: Ordering) {
×
1198
        self.reset_atomic(ordering)
×
1199
    }
1200
}
1201

1202
impl<W: Word + IntoAtomic, B> BitFieldSliceCore<W::AtomicType> for AtomicBitFieldVec<W, B> {
1203
    #[inline(always)]
1204
    fn bit_width(&self) -> usize {
184✔
1205
        debug_assert!(self.bit_width <= W::BITS);
368✔
1206
        self.bit_width
184✔
1207
    }
1208

1209
    #[inline(always)]
1210
    fn len(&self) -> usize {
404,824✔
1211
        self.len
404,824✔
1212
    }
1213
}
1214

1215
impl<W: Word + IntoAtomic, T: AsRef<[W::AtomicType]>> AtomicBitFieldSlice<W>
1216
    for AtomicBitFieldVec<W, T>
1217
where
1218
    W::AtomicType: AtomicUnsignedInt + AsBytes,
1219
{
1220
    #[inline]
1221
    unsafe fn get_atomic_unchecked(&self, index: usize, order: Ordering) -> W {
404,825✔
1222
        let pos = index * self.bit_width;
809,650✔
1223
        let word_index = pos / W::BITS;
809,650✔
1224
        let bit_index = pos % W::BITS;
809,650✔
1225
        let bits = self.bits.as_ref();
1,214,475✔
1226

1227
        if bit_index + self.bit_width <= W::BITS {
404,825✔
1228
            (bits.get_unchecked(word_index).load(order) >> bit_index) & self.mask
939,176✔
1229
        } else {
1230
            ((bits.get_unchecked(word_index).load(order) >> bit_index)
170,031✔
1231
                | (bits.get_unchecked(word_index + 1).load(order) << (W::BITS - bit_index)))
170,031✔
1232
                & self.mask
170,031✔
1233
        }
1234
    }
1235

1236
    // We reimplement set as we have the mask in the structure.
1237

1238
    /// Sets the element of the slice at the specified index.
1239
    ///
1240
    /// May panic if the index is not in in [0..[len](`BitFieldSliceCore::len`))
1241
    /// or the value does not fit in [`BitFieldSliceCore::bit_width`] bits.
1242
    #[inline(always)]
1243
    fn set_atomic(&self, index: usize, value: W, order: Ordering) {
184,005✔
1244
        panic_if_out_of_bounds!(index, self.len);
552,015✔
1245
        panic_if_value!(value, self.mask, self.bit_width);
552,012✔
1246
        unsafe {
1247
            self.set_atomic_unchecked(index, value, order);
184,003✔
1248
        }
1249
    }
1250

1251
    #[inline]
1252
    unsafe fn set_atomic_unchecked(&self, index: usize, value: W, order: Ordering) {
451,171✔
1253
        debug_assert!(self.bit_width != W::BITS);
902,342✔
1254
        let pos = index * self.bit_width;
902,342✔
1255
        let word_index = pos / W::BITS;
902,342✔
1256
        let bit_index = pos % W::BITS;
902,342✔
1257
        let bits = self.bits.as_ref();
1,353,513✔
1258

1259
        if bit_index + self.bit_width <= W::BITS {
451,171✔
1260
            // this is consistent
1261
            let mut current = bits.get_unchecked(word_index).load(order);
1,300,230✔
1262
            loop {
×
1263
                let mut new = current;
520,092✔
1264
                new &= !(self.mask << bit_index);
520,092✔
1265
                new |= value << bit_index;
520,092✔
1266

1267
                match bits
780,138✔
1268
                    .get_unchecked(word_index)
520,092✔
1269
                    .compare_exchange(current, new, order, order)
1,040,184✔
1270
                {
1271
                    Ok(_) => break,
260,046✔
UNCOV
1272
                    Err(e) => current = e,
×
1273
                }
1274
            }
1275
        } else {
1276
            let mut word = bits.get_unchecked(word_index).load(order);
191,125✔
1277
            // try to wait for the other thread to finish
1278
            fence(Ordering::Acquire);
×
1279
            loop {
×
1280
                let mut new = word;
382,250✔
1281
                new &= (W::ONE << bit_index) - W::ONE;
191,125✔
1282
                new |= value << bit_index;
382,250✔
1283

1284
                match bits
573,375✔
1285
                    .get_unchecked(word_index)
382,250✔
1286
                    .compare_exchange(word, new, order, order)
764,500✔
1287
                {
1288
                    Ok(_) => break,
×
1289
                    Err(e) => word = e,
×
1290
                }
1291
            }
1292
            fence(Ordering::Release);
×
1293

1294
            // ensures that the compiler does not reorder the two atomic operations
1295
            // this should increase the probability of having consistency
1296
            // between two concurrent writes as they will both execute the set
1297
            // of the bits in the same order, and the release / acquire fence
1298
            // should try to syncronize the threads as much as possible
1299
            compiler_fence(Ordering::SeqCst);
×
1300

1301
            let mut word = bits.get_unchecked(word_index + 1).load(order);
×
1302
            fence(Ordering::Acquire);
×
1303
            loop {
×
1304
                let mut new = word;
382,250✔
1305
                new &= !(self.mask >> (W::BITS - bit_index));
382,250✔
1306
                new |= value >> (W::BITS - bit_index);
382,250✔
1307

1308
                match bits
573,375✔
1309
                    .get_unchecked(word_index + 1)
382,250✔
1310
                    .compare_exchange(word, new, order, order)
764,500✔
1311
                {
1312
                    Ok(_) => break,
×
1313
                    Err(e) => word = e,
×
1314
                }
1315
            }
1316
            fence(Ordering::Release);
×
1317
        }
1318
    }
1319

1320
    fn reset_atomic(&mut self, ordering: Ordering) {
1✔
1321
        let bit_len = self.len * self.bit_width;
2✔
1322
        let full_words = bit_len / W::BITS;
2✔
1323
        let residual = bit_len % W::BITS;
2✔
1324
        let bits = self.bits.as_ref();
3✔
1325
        bits[..full_words]
1✔
1326
            .iter()
1327
            .for_each(|x| x.store(W::ZERO, ordering));
22✔
1328
        if residual != 0 {
2✔
1329
            bits[full_words].fetch_and(W::MAX << residual, ordering);
3✔
1330
        }
1331
    }
1332

1333
    #[cfg(feature = "rayon")]
1334
    fn par_reset_atomic(&mut self, ordering: Ordering) {
1✔
1335
        let bit_len = self.len * self.bit_width;
2✔
1336
        let full_words = bit_len / W::BITS;
2✔
1337
        let residual = bit_len % W::BITS;
2✔
1338
        let bits = self.bits.as_ref();
3✔
1339
        bits[..full_words]
1✔
1340
            .par_iter()
1341
            .with_min_len(RAYON_MIN_LEN)
1✔
1342
            .for_each(|x| x.store(W::ZERO, ordering));
22✔
1343
        if residual != 0 {
2✔
1344
            bits[full_words].fetch_and(W::MAX << residual, ordering);
3✔
1345
        }
1346
    }
1347
}
1348

1349
// Conversions
1350

1351
impl<W: Word + IntoAtomic> From<AtomicBitFieldVec<W, Vec<W::AtomicType>>>
1352
    for BitFieldVec<W, Vec<W>>
1353
{
1354
    #[inline]
1355
    fn from(value: AtomicBitFieldVec<W, Vec<W::AtomicType>>) -> Self {
470✔
1356
        BitFieldVec {
1357
            bits: unsafe { transmute_vec::<W::AtomicType, W>(value.bits) },
1,410✔
1358
            len: value.len,
940✔
1359
            bit_width: value.bit_width,
470✔
1360
            mask: value.mask,
470✔
1361
        }
1362
    }
1363
}
1364

1365
impl<W: Word + IntoAtomic> From<AtomicBitFieldVec<W, Box<[W::AtomicType]>>>
1366
    for BitFieldVec<W, Box<[W]>>
1367
{
1368
    #[inline]
1369
    fn from(value: AtomicBitFieldVec<W, Box<[W::AtomicType]>>) -> Self {
1✔
1370
        BitFieldVec {
1371
            bits: unsafe { transmute_boxed_slice::<W::AtomicType, W>(value.bits) },
3✔
1372

1373
            len: value.len,
2✔
1374
            bit_width: value.bit_width,
1✔
1375
            mask: value.mask,
1✔
1376
        }
1377
    }
1378
}
1379

1380
impl<'a, W: Word + IntoAtomic> From<AtomicBitFieldVec<W, &'a [W::AtomicType]>>
1381
    for BitFieldVec<W, &'a [W]>
1382
{
1383
    #[inline]
1384
    fn from(value: AtomicBitFieldVec<W, &'a [W::AtomicType]>) -> Self {
1✔
1385
        BitFieldVec {
1386
            bits: unsafe { core::mem::transmute::<&'a [W::AtomicType], &'a [W]>(value.bits) },
2✔
1387
            len: value.len,
2✔
1388
            bit_width: value.bit_width,
1✔
1389
            mask: value.mask,
1✔
1390
        }
1391
    }
1392
}
1393

1394
impl<'a, W: Word + IntoAtomic> From<AtomicBitFieldVec<W, &'a mut [W::AtomicType]>>
1395
    for BitFieldVec<W, &'a mut [W]>
1396
{
1397
    #[inline]
1398
    fn from(value: AtomicBitFieldVec<W, &'a mut [W::AtomicType]>) -> Self {
1✔
1399
        BitFieldVec {
1400
            bits: unsafe {
1✔
1401
                core::mem::transmute::<&'a mut [W::AtomicType], &'a mut [W]>(value.bits)
1402
            },
1403
            len: value.len,
2✔
1404
            bit_width: value.bit_width,
1✔
1405
            mask: value.mask,
1✔
1406
        }
1407
    }
1408
}
1409

1410
impl<W: Word + IntoAtomic> From<BitFieldVec<W, Vec<W>>>
1411
    for AtomicBitFieldVec<W, Vec<W::AtomicType>>
1412
{
1413
    #[inline]
1414
    fn from(value: BitFieldVec<W, Vec<W>>) -> Self {
369✔
1415
        AtomicBitFieldVec {
1416
            bits: unsafe { transmute_vec::<W, W::AtomicType>(value.bits) },
1,107✔
1417
            len: value.len,
738✔
1418
            bit_width: value.bit_width,
369✔
1419
            mask: value.mask,
369✔
1420
        }
1421
    }
1422
}
1423

1424
impl<W: Word + IntoAtomic> From<BitFieldVec<W, Box<[W]>>>
1425
    for AtomicBitFieldVec<W, Box<[W::AtomicType]>>
1426
{
1427
    #[inline]
1428
    fn from(value: BitFieldVec<W, Box<[W]>>) -> Self {
1✔
1429
        AtomicBitFieldVec {
1430
            bits: unsafe { transmute_boxed_slice::<W, W::AtomicType>(value.bits) },
3✔
1431
            len: value.len,
2✔
1432
            bit_width: value.bit_width,
1✔
1433
            mask: value.mask,
1✔
1434
        }
1435
    }
1436
}
1437

1438
impl<'a, W: Word + IntoAtomic> From<BitFieldVec<W, &'a [W]>>
1439
    for AtomicBitFieldVec<W, &'a [W::AtomicType]>
1440
{
1441
    #[inline]
1442
    fn from(value: BitFieldVec<W, &'a [W]>) -> Self {
1✔
1443
        AtomicBitFieldVec {
1444
            bits: unsafe { core::mem::transmute::<&'a [W], &'a [W::AtomicType]>(value.bits) },
2✔
1445
            len: value.len,
2✔
1446
            bit_width: value.bit_width,
1✔
1447
            mask: value.mask,
1✔
1448
        }
1449
    }
1450
}
1451

1452
impl<'a, W: Word + IntoAtomic> From<BitFieldVec<W, &'a mut [W]>>
1453
    for AtomicBitFieldVec<W, &'a mut [W::AtomicType]>
1454
{
1455
    #[inline]
1456
    fn from(value: BitFieldVec<W, &'a mut [W]>) -> Self {
1✔
1457
        AtomicBitFieldVec {
1458
            bits: unsafe {
1✔
1459
                core::mem::transmute::<&'a mut [W], &'a mut [W::AtomicType]>(value.bits)
1460
            },
1461
            len: value.len,
2✔
1462
            bit_width: value.bit_width,
1✔
1463
            mask: value.mask,
1✔
1464
        }
1465
    }
1466
}
1467

1468
impl<W: Word> From<BitFieldVec<W, Vec<W>>> for BitFieldVec<W, Box<[W]>> {
1469
    fn from(value: BitFieldVec<W, Vec<W>>) -> Self {
96✔
1470
        BitFieldVec {
1471
            bits: value.bits.into_boxed_slice(),
288✔
1472
            len: value.len,
192✔
1473
            bit_width: value.bit_width,
96✔
1474
            mask: value.mask,
96✔
1475
        }
1476
    }
1477
}
1478

1479
impl<W: Word> From<BitFieldVec<W, Box<[W]>>> for BitFieldVec<W, Vec<W>> {
1480
    fn from(value: BitFieldVec<W, Box<[W]>>) -> Self {
1✔
1481
        BitFieldVec {
1482
            bits: value.bits.into_vec(),
3✔
1483
            len: value.len,
2✔
1484
            bit_width: value.bit_width,
1✔
1485
            mask: value.mask,
1✔
1486
        }
1487
    }
1488
}
1489

1490
#[cfg(test)]
1491
mod tests {
1492
    use super::*;
1493

1494
    #[test]
1495
    fn test_with_capacity() {
1496
        let mut b = BitFieldVec::<usize, _>::with_capacity(10, 100);
1497
        let capacity = b.bits.capacity();
1498
        for _ in 0..100 {
1499
            b.push(0);
1500
        }
1501
        assert_eq!(b.bits.capacity(), capacity);
1502
    }
1503

1504
    fn copy<W: Word, B: AsRef<[W]>, C: AsRef<[W]> + AsMut<[W]>>(
1505
        source: &BitFieldVec<W, B>,
1506
        from: usize,
1507
        dest: &mut BitFieldVec<W, C>,
1508
        to: usize,
1509
        len: usize,
1510
    ) {
1511
        let len = Ord::min(Ord::min(len, dest.len - to), source.len - from);
1512
        for i in 0..len {
1513
            dest.set(to + i, source.get(from + i));
1514
        }
1515
    }
1516

1517
    #[test]
1518
    fn test_copy() {
1519
        for src_pattern in 0..8 {
1520
            for dst_pattern in 0..8 {
1521
                // if from_first_word == from_last_word && to_first_word == to_last_word
1522
                let source = bit_field_vec![3 => src_pattern; 100];
1523
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1524
                let mut dest_expected = dest_actual.clone();
1525
                source.copy(1, &mut dest_actual, 2, 10);
1526
                copy(&source, 1, &mut dest_expected, 2, 10);
1527
                assert_eq!(dest_actual, dest_expected);
1528
                // else if from_first_word == from_last_word
1529
                let source = bit_field_vec![3 => src_pattern; 100];
1530
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1531
                let mut dest_expected = dest_actual.clone();
1532
                source.copy(1, &mut dest_actual, 20, 10);
1533
                copy(&source, 1, &mut dest_expected, 20, 10);
1534
                assert_eq!(dest_actual, dest_expected);
1535
                // else if to_first_word == to_last_word
1536
                let source = bit_field_vec![3 => src_pattern; 100];
1537
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1538
                let mut dest_expected = dest_actual.clone();
1539
                source.copy(20, &mut dest_actual, 1, 10);
1540
                copy(&source, 20, &mut dest_expected, 1, 10);
1541
                assert_eq!(dest_actual, dest_expected);
1542
                // else if src_bit == dest_bit (residual = 1)
1543
                let source = bit_field_vec![3 => src_pattern; 1000];
1544
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1545
                let mut dest_expected = dest_actual.clone();
1546
                source.copy(3, &mut dest_actual, 3 + 3 * 128, 40);
1547
                copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 40);
1548
                assert_eq!(dest_actual, dest_expected);
1549
                // else if src_bit == dest_bit (residual = 64)
1550
                let source = bit_field_vec![3 => src_pattern; 1000];
1551
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1552
                let mut dest_expected = dest_actual.clone();
1553
                source.copy(3, &mut dest_actual, 3 + 3 * 128, 61);
1554
                copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 61);
1555
                assert_eq!(dest_actual, dest_expected);
1556
                // else if src_bit < dest_bit (residual = 1)
1557
                let source = bit_field_vec![3 => src_pattern; 1000];
1558
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1559
                let mut dest_expected = dest_actual.clone();
1560
                source.copy(3, &mut dest_actual, 7 + 64 * 3, 40);
1561
                copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40);
1562
                assert_eq!(dest_actual, dest_expected);
1563
                // else if src_bit < dest_bit (residual = 64)
1564
                let source = bit_field_vec![3 => src_pattern; 1000];
1565
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1566
                let mut dest_expected = dest_actual.clone();
1567
                source.copy(3, &mut dest_actual, 7 + 64 * 3, 40 + 17);
1568
                copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40 + 17);
1569
                assert_eq!(dest_actual, dest_expected);
1570
                // else if src_bit > dest_bit (residual = 1)
1571
                let source = bit_field_vec![3 => src_pattern; 1000];
1572
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1573
                let mut dest_expected = dest_actual.clone();
1574
                source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 64);
1575
                copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 64);
1576
                assert_eq!(dest_actual, dest_expected);
1577
                // else if src_bit > dest_bit (residual = 64)
1578
                let source = bit_field_vec![3 => src_pattern; 1000];
1579
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1580
                let mut dest_expected = dest_actual.clone();
1581
                source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 21 + 64);
1582
                copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 21 + 64);
1583
                assert_eq!(dest_actual, dest_expected);
1584
            }
1585
        }
1586
    }
1587
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc