• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vigna / sux-rs / 14146200738

29 Mar 2025 01:41PM UTC coverage: 78.607% (-0.02%) from 78.626%
14146200738

push

github

vigna
Reduced vertex size

4119 of 5240 relevant lines covered (78.61%)

125617525.32 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

88.47
/src/bits/bit_field_vec.rs
1
/*
2
 * SPDX-FileCopyrightText: 2023 Inria
3
 * SPDX-FileCopyrightText: 2023 Sebastiano Vigna
4
 *
5
 * SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
6
 */
7

8
//! Vectors of values of fixed bit width.
9
//!
10
//! Elements are stored contiguously, with no padding bits (in particular,
11
//! unless the bit width is a power of two some elements will be stored across
12
//! word boundaries).
13
//!
14
//! There are two flavors: [`BitFieldVec`], a mutable bit-field vector, and
15
//! [`AtomicBitFieldVec`], a mutable, thread-safe bit-field vector.
16
//!
17
//! These flavors depends on a backend, and presently we provide, given an
18
//! unsigned integer type `W` or an unsigned atomic integer type `A`:
19
//!
20
//! - `BitFieldVec<Vec<T>>`: a mutable, growable and resizable bit-field vector;
21
//! - `BitFieldVec<AsRef<[W]>>`: an immutable bit-field vector, useful for
22
//!   [ε-serde](epserde) support;
23
//! - `BitFieldVec<AsRef<[W]> + AsMut<[W]>>`: a mutable (but not resizable) bit
24
//!   vector;
25
//! - `AtomicBitFieldVec<AsRef<[A]>>`: a partially thread-safe, mutable (but not
26
//!   resizable) bit-field vector.
27
//!
28
//! More generally, the underlying type must satisfy the trait [`Word`] for
29
//! [`BitFieldVec`] and additionally [`IntoAtomic`] for [`AtomicBitFieldVec`].
30
//! A blanket implementation exposes slices of elements of type `W` as bit-field
31
//! vectors of width `W::BITS`, analogously for atomic types `A`.
32
//!
33
//! Note that nothing is assumed about the content of the backend outside the
34
//! bits of the vector. Moreover, the content of the backend outside of the
35
//! vector is never modified by the methods of this structure.
36
//!
37
//! For high-speed unchecked scanning, we implement [`IntoUncheckedIterator`]
38
//! and [`IntoReverseUncheckedIterator`] on a reference to this type. The are
39
//! used, for example, to provide
40
//! [predecessor](crate::traits::indexed_dict::Pred) and
41
//! [successor](crate::traits::indexed_dict::Succ) primitives for
42
//! [`EliasFano`].
43
//!
44
//! # Low-level support
45
//!
46
//! The methods [`address_of`](BitFieldVec::addr_of) and
47
//! [`get_unaligned`](BitFieldVec::get_unaligned) can be used to manually
48
//! prefetch parts of the data structure, or read values using unaligned read,
49
//! when the bit width makes it possible.
50
//!
51
//! # Examples
52
//! ```rust
53
//! use sux::prelude::*;
54
//!
55
//! // Bit field vector of bit width 5 and length 10, all entries set to zero
56
//! let mut b = <BitFieldVec<usize>>::new(5, 10);
57
//! assert_eq!(b.len(), 10);
58
//! assert_eq!(b.bit_width(), 5);
59
//! b.set(0, 3);
60
//! assert_eq!(b.get(0), 3);
61
//!
62
//! // Empty bit field vector of bit width 20 with capacity 10
63
//! let mut b = <BitFieldVec<usize>>::with_capacity(20, 10);
64
//! assert_eq!(b.len(), 0);
65
//! assert_eq!(b.bit_width(), 20);
66
//! b.push(20);
67
//! assert_eq!(b.len(), 1);
68
//! assert_eq!(b.get(0), 20);
69
//! assert_eq!(b.pop(), Some(20));
70
//!
71
//! // Convenience macro
72
//! let b = bit_field_vec![10; 4, 500, 2, 0, 1];
73
//! assert_eq!(b.len(), 5);
74
//! assert_eq!(b.bit_width(), 10);
75
//! assert_eq!(b.get(0), 4);
76
//! assert_eq!(b.get(1), 500);
77
//! assert_eq!(b.get(2), 2);
78
//! assert_eq!(b.get(3), 0);
79
//! assert_eq!(b.get(4), 1);
80
//! ```
81

82
use crate::prelude::*;
83
use crate::traits::bit_field_slice::{panic_if_out_of_bounds, panic_if_value};
84
use crate::utils::{transmute_boxed_slice, transmute_vec};
85
#[cfg(feature = "rayon")]
86
use crate::RAYON_MIN_LEN;
87
use anyhow::{bail, Result};
88
use common_traits::*;
89
use epserde::*;
90
use mem_dbg::*;
91
#[cfg(feature = "rayon")]
92
use rayon::prelude::*;
93
use std::sync::atomic::*;
94

95
/// Convenient, [`vec!`]-like macro to initialize `usize`-based bit-field
96
/// vectors.
97
///
98
/// Note that the syntax `bit_field_vec![width; length; value]` that has been
99
/// deprecated in favor of `bit_field_vec![width => value; length]`, so that
100
/// value and length are in the same order as in [`vec!`].
101
///
102
/// # Examples
103
///
104
/// ```rust
105
/// use sux::prelude::*;
106
///
107
/// // Empty bit field vector of bit width 5
108
/// let b = bit_field_vec![5];
109
/// assert_eq!(b.len(), 0);
110
/// assert_eq!(b.bit_width(), 5);
111
///
112
/// // 10 values of bit width 6, all set to 3
113
/// let b = bit_field_vec![6 => 3; 10];
114
/// assert_eq!(b.len(), 10);
115
/// assert_eq!(b.bit_width(), 6);
116
/// assert_eq!(b.iter().all(|x| x == 3), true);
117
///
118
/// // List of values of bit width 10
119
/// let b = bit_field_vec![10; 4, 500, 2, 0, 1];
120
/// assert_eq!(b.len(), 5);
121
/// assert_eq!(b.bit_width(), 10);
122
/// assert_eq!(b.get(0), 4);
123
/// assert_eq!(b.get(1), 500);
124
/// assert_eq!(b.get(2), 2);
125
/// assert_eq!(b.get(3), 0);
126
/// assert_eq!(b.get(4), 1);
127
/// ```
128
#[macro_export]
129
macro_rules! bit_field_vec {
130
    ($w:expr) => {
131
        $crate::bits::BitFieldVec::<usize, _>::new($w, 0)
132
    };
133
    ($w:expr; $n:expr; $v:expr) => {
134
        {
135
            let mut bit_field_vec = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, $n);
136
            // Force type
137
            let v: usize = $v;
138
            bit_field_vec.resize($n, v);
139
            bit_field_vec
140
        }
141
    };
142
    ($w:expr => $v:expr; $n:expr) => {
143
        {
144
            let mut bit_field_vec = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, $n);
145
            // Force type
146
            let v: usize = $v;
147
            bit_field_vec.resize($n, v);
148
            bit_field_vec
149
        }
150
    };
151
    ($w:expr; $($x:expr),+ $(,)?) => {
152
        {
153
            let mut b = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, [$($x),+].len());
154
            $(
155
                // Force type
156
                let x: usize = $x;
157
                b.push(x);
158
            )*
159
            b
160
        }
161
    };
162
}
163

164
/// A vector of bit fields of fixed width.
165
#[derive(Epserde, Debug, Clone, Hash, MemDbg, MemSize)]
166
pub struct BitFieldVec<W: Word = usize, B = Vec<W>> {
167
    /// The underlying storage.
168
    bits: B,
169
    /// The bit width of the values stored in the vector.
170
    bit_width: usize,
171
    /// A mask with its lowest `bit_width` bits set to one.
172
    mask: W,
173
    /// The length of the vector.
174
    len: usize,
175
}
176

177
fn mask<W: Word>(bit_width: usize) -> W {
10,861✔
178
    if bit_width == 0 {
10,861✔
179
        W::ZERO
192✔
180
    } else {
181
        W::MAX >> (W::BITS - bit_width)
10,669✔
182
    }
183
}
184

185
impl<W: Word, B> BitFieldVec<W, B> {
186
    /// # Safety
187
    /// `len` * `bit_width` must be between 0 (included) the number of
188
    /// bits in `bits` (included).
189
    #[inline(always)]
190
    pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
3,333✔
191
        Self {
192
            bits,
193
            bit_width,
194
            mask: mask(bit_width),
3,333✔
195
            len,
196
        }
197
    }
198

199
    #[inline(always)]
200
    pub fn into_raw_parts(self) -> (B, usize, usize) {
3,122✔
201
        (self.bits, self.bit_width, self.len)
3,122✔
202
    }
203

204
    #[inline(always)]
205
    /// Modifies the bit field in place.
206
    ///
207
    /// # Safety
208
    /// This is unsafe because it's the caller's responsibility to ensure that
209
    /// that the length is compatible with the modified bits.
210
    pub unsafe fn map<W2: Word, B2>(self, f: impl FnOnce(B) -> B2) -> BitFieldVec<W2, B2> {
×
211
        BitFieldVec {
212
            bits: f(self.bits),
×
213
            bit_width: self.bit_width,
×
214
            mask: mask(self.bit_width),
×
215
            len: self.len,
×
216
        }
217
    }
218
}
219

220
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {
221
    /// Gets the address of the item storing (the first part of)
222
    /// the element of given index.
223
    ///
224
    /// This method is mainly useful for manually prefetching
225
    /// parts of the data structure.
226
    pub fn addr_of(&self, index: usize) -> *const W {
4✔
227
        let start_bit = index * self.bit_width;
4✔
228
        let word_index = start_bit / W::BITS;
4✔
229
        (&self.bits.as_ref()[word_index]) as *const _
4✔
230
    }
231

232
    /// Like [`BitFieldSlice::get`], but using unaligned reads.
233
    ///
234
    /// This method can be used for bit width smaller than or equal to `W::BITS
235
    /// - 8 + 2` or equal to `W::BITS - 8 + 4` or `W::BITS`.
236
    pub fn get_unaligned(&self, index: usize) -> W {
64✔
237
        panic_if_out_of_bounds!(index, self.len);
64✔
238
        unsafe { self.get_unaligned_unchecked(index) }
64✔
239
    }
240

241
    /// Like [`BitFieldSlice::get`], but using unaligned reads.
242
    ///
243
    /// # Safety
244
    ///
245
    /// This method can be used for bit width smaller than or equal
246
    /// to `W::BITS - 8 + 2` or equal to `W::BITS - 8 + 4` or `W::BITS`.
247
    pub unsafe fn get_unaligned_unchecked(&self, index: usize) -> W {
64✔
248
        debug_assert!(
64✔
249
            self.bit_width <= W::BITS - 8 + 2
64✔
250
                || self.bit_width == W::BITS - 8 + 4
24✔
251
                || self.bit_width == W::BITS
14✔
252
        );
253
        let base_ptr = self.bits.as_ref().as_ptr() as *const u8;
60✔
254
        let start_bit = index * self.bit_width;
60✔
255
        let ptr = base_ptr.add(start_bit / W::BYTES) as *const W;
60✔
256
        let word = core::ptr::read_unaligned(ptr);
60✔
257
        (word >> (start_bit % 8)) & self.mask
60✔
258
    }
259

260
    /// Returns the backend of the vector as a slice of `W`.
261
    pub fn as_slice(&self) -> &[W] {
3✔
262
        self.bits.as_ref()
3✔
263
    }
264
}
265
/// An iterator over non-overlapping chunks of a bit-field vector, starting at
266
/// the beginning of the vector.
267
///
268
/// When the vector len is not evenly divided by the chunk size, the last chunk
269
/// of the iteration will be shorter.
270
///
271
/// This struct is created by the
272
/// [`try_chunks_mut`](#impl-BitFieldSliceMut-for-BitFieldVec) method.
273
pub struct ChunksMut<'a, W: Word> {
274
    remaining: usize,
275
    bit_width: usize,
276
    chunk_size: usize,
277
    iter: std::slice::ChunksMut<'a, W>,
278
}
279

280
impl<'a, W: Word> Iterator for ChunksMut<'a, W> {
281
    type Item = BitFieldVec<W, &'a mut [W]>;
282

283
    #[inline]
284
    fn next(&mut self) -> Option<Self::Item> {
214✔
285
        self.iter.next().map(|chunk| {
427✔
286
            let size = Ord::min(self.chunk_size, self.remaining);
213✔
287
            let next = unsafe { BitFieldVec::from_raw_parts(chunk, self.bit_width, size) };
213✔
288
            self.remaining -= size;
213✔
289
            next
213✔
290
        })
291
    }
292
}
293

294
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {}
295

296
impl<W: Word> BitFieldVec<W, Vec<W>> {
297
    /// Creates a new zero-initialized vector of given bit width and length.
298
    pub fn new(bit_width: usize, len: usize) -> Self {
1,962✔
299
        // We need at least one word to handle the case of bit width zero.
300
        let n_of_words = Ord::max(1, (len * bit_width).div_ceil(W::BITS));
1,962✔
301
        Self {
302
            bits: vec![W::ZERO; n_of_words],
1,962✔
303
            bit_width,
304
            mask: mask(bit_width),
1,962✔
305
            len,
306
        }
307
    }
308

309
    /// Creates an empty vector that doesn't need to reallocate for up to
310
    /// `capacity` elements.
311
    pub fn with_capacity(bit_width: usize, capacity: usize) -> Self {
1,162✔
312
        // We need at least one word to handle the case of bit width zero.
313
        let n_of_words = Ord::max(1, (capacity * bit_width).div_ceil(W::BITS));
1,162✔
314
        Self {
315
            bits: Vec::with_capacity(n_of_words),
1,162✔
316
            bit_width,
317
            mask: mask(bit_width),
1,162✔
318
            len: 0,
319
        }
320
    }
321

322
    /// Sets the length.
323
    ///
324
    /// # Safety
325
    ///
326
    /// `len * bit_width` must be at most `self.bits.len() * W::BITS`. Note that
327
    /// setting the length might result in reading uninitialized data.
328
    pub unsafe fn set_len(&mut self, len: usize) {
1✔
329
        debug_assert!(len * self.bit_width <= self.bits.len() * W::BITS);
2✔
330
        self.len = len;
1✔
331
    }
332

333
    /// Sets len to 0
334
    pub fn clear(&mut self) {
1✔
335
        self.len = 0;
1✔
336
    }
337

338
    /// Returns the bit width of the values inside the vector.
339
    pub fn bit_width(&self) -> usize {
315✔
340
        debug_assert!(self.bit_width <= W::BITS);
630✔
341
        self.bit_width
315✔
342
    }
343

344
    /// Returns the mask used to extract values from the vector.
345
    /// This will keep the lowest `bit_width` bits.
346
    pub fn mask(&self) -> W {
312✔
347
        self.mask
312✔
348
    }
349

350
    /// Creates a new vector by copying a slice; the bit width will be the minimum
351
    /// width sufficient to hold all values in the slice.
352
    ///
353
    /// Returns an error if the bit width of the values in `slice` is larger than
354
    /// `W::BITS`.
355
    pub fn from_slice<SW>(slice: &impl BitFieldSlice<SW>) -> Result<Self>
3✔
356
    where
357
        SW: Word + CastableInto<W>,
358
    {
359
        let mut max_len: usize = 0;
3✔
360
        for i in 0..slice.len() {
3,003✔
361
            max_len = Ord::max(max_len, unsafe { slice.get_unchecked(i).len() as usize });
×
362
        }
363

364
        if max_len > W::BITS {
3✔
365
            bail!(
1✔
366
                "Cannot convert a slice of bit width {} into a slice with W = {}",
1✔
367
                max_len,
1✔
368
                std::any::type_name::<W>()
1✔
369
            );
370
        }
371
        let mut result = Self::new(max_len, slice.len());
2✔
372
        for i in 0..slice.len() {
2,000✔
373
            unsafe { result.set_unchecked(i, slice.get_unchecked(i).cast()) };
×
374
        }
375

376
        Ok(result)
×
377
    }
378

379
    /// Adds a value at the end of the vector.
380
    pub fn push(&mut self, value: W) {
2,303✔
381
        panic_if_value!(value, self.mask, self.bit_width);
2,303✔
382
        if (self.len + 1) * self.bit_width > self.bits.len() * W::BITS {
2,750✔
383
            self.bits.push(W::ZERO);
447✔
384
        }
385
        unsafe {
386
            self.set_unchecked(self.len, value);
2,303✔
387
        }
388
        self.len += 1;
2,303✔
389
    }
390

391
    /// Truncates or exted with `value` the vector.
392
    pub fn resize(&mut self, new_len: usize, value: W) {
1,162✔
393
        panic_if_value!(value, self.mask, self.bit_width);
1,162✔
394
        if new_len > self.len {
1,162✔
395
            if new_len * self.bit_width > self.bits.len() * W::BITS {
2,322✔
396
                self.bits
1,161✔
397
                    .resize((new_len * self.bit_width).div_ceil(W::BITS), W::ZERO);
1,161✔
398
            }
399
            for i in self.len..new_len {
807,721✔
400
                unsafe {
401
                    self.set_unchecked(i, value);
×
402
                }
403
            }
404
        }
405
        self.len = new_len;
1,162✔
406
    }
407

408
    /// Removes and returns a value from the end of the vector.
409
    ///
410
    /// Returns None if the [`BitFieldVec`] is empty.
411
    pub fn pop(&mut self) -> Option<W> {
1,002✔
412
        if self.len == 0 {
1,002✔
413
            return None;
2✔
414
        }
415
        let value = self.get(self.len - 1);
1,000✔
416
        self.len -= 1;
1,000✔
417
        Some(value)
1,000✔
418
    }
419
}
420

421
impl<W: Word, T> BitFieldSliceCore<W> for BitFieldVec<W, T> {
422
    #[inline(always)]
423
    fn bit_width(&self) -> usize {
13,922✔
424
        debug_assert!(self.bit_width <= W::BITS);
27,844✔
425
        self.bit_width
13,922✔
426
    }
427
    #[inline(always)]
428
    fn len(&self) -> usize {
14,949,180✔
429
        self.len
14,949,180✔
430
    }
431
}
432

433
impl<W: Word, B: AsRef<[W]>> BitFieldSlice<W> for BitFieldVec<W, B> {
434
    unsafe fn get_unchecked(&self, index: usize) -> W {
52,220,284✔
435
        let pos = index * self.bit_width;
52,220,284✔
436
        let word_index = pos / W::BITS;
52,220,284✔
437
        let bit_index = pos % W::BITS;
52,220,284✔
438
        let bits = self.bits.as_ref();
52,220,284✔
439

440
        if bit_index + self.bit_width <= W::BITS {
52,220,284✔
441
            (*bits.get_unchecked(word_index) >> bit_index) & self.mask
39,055,622✔
442
        } else {
443
            ((*bits.get_unchecked(word_index) >> bit_index)
13,164,662✔
444
                | (*bits.get_unchecked(word_index + 1) << (W::BITS - bit_index)))
13,164,662✔
445
                & self.mask
13,164,662✔
446
        }
447
    }
448
}
449

450
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> BitFieldSliceMut<W> for BitFieldVec<W, B> {
451
    #[inline(always)]
452
    fn mask(&self) -> W {
100,360✔
453
        self.mask
100,360✔
454
    }
455

456
    #[inline(always)]
457
    fn set(&mut self, index: usize, value: W) {
520,687✔
458
        panic_if_out_of_bounds!(index, self.len);
520,687✔
459
        panic_if_value!(value, self.mask, self.bit_width);
520,686✔
460
        unsafe {
461
            self.set_unchecked(index, value);
520,685✔
462
        }
463
    }
464

465
    unsafe fn set_unchecked(&mut self, index: usize, value: W) {
11,975,491✔
466
        let pos = index * self.bit_width;
11,975,491✔
467
        let word_index = pos / W::BITS;
11,975,491✔
468
        let bit_index = pos % W::BITS;
11,975,491✔
469
        let bits = self.bits.as_mut();
11,975,491✔
470

471
        if bit_index + self.bit_width <= W::BITS {
21,019,406✔
472
            let mut word = *bits.get_unchecked_mut(word_index);
9,043,915✔
473
            word &= !(self.mask << bit_index);
9,043,915✔
474
            word |= value << bit_index;
9,043,915✔
475
            *bits.get_unchecked_mut(word_index) = word;
9,043,915✔
476
        } else {
477
            let mut word = *bits.get_unchecked_mut(word_index);
2,931,576✔
478
            word &= (W::ONE << bit_index) - W::ONE;
2,931,576✔
479
            word |= value << bit_index;
2,931,576✔
480
            *bits.get_unchecked_mut(word_index) = word;
2,931,576✔
481

482
            let mut word = *bits.get_unchecked_mut(word_index + 1);
2,931,576✔
483
            word &= !(self.mask >> (W::BITS - bit_index));
2,931,576✔
484
            word |= value >> (W::BITS - bit_index);
2,931,576✔
485
            *bits.get_unchecked_mut(word_index + 1) = word;
2,931,576✔
486
        }
487
    }
488

489
    fn reset(&mut self) {
1✔
490
        let bit_len = self.len * self.bit_width;
1✔
491
        let full_words = bit_len / W::BITS;
1✔
492
        let residual = bit_len % W::BITS;
1✔
493
        let bits = self.bits.as_mut();
1✔
494
        bits[..full_words].iter_mut().for_each(|x| *x = W::ZERO);
9✔
495
        if residual != 0 {
2✔
496
            bits[full_words] &= W::MAX << residual;
1✔
497
        }
498
    }
499

500
    #[cfg(feature = "rayon")]
501
    fn par_reset(&mut self) {
1✔
502
        let bit_len = self.len * self.bit_width;
1✔
503
        let full_words = bit_len / W::BITS;
1✔
504
        let residual = bit_len % W::BITS;
1✔
505
        let bits = self.bits.as_mut();
1✔
506
        bits[..full_words]
1✔
507
            .par_iter_mut()
508
            .with_min_len(RAYON_MIN_LEN)
1✔
509
            .for_each(|x| *x = W::ZERO);
9✔
510
        if residual != 0 {
2✔
511
            bits[full_words] &= W::MAX << residual;
1✔
512
        }
513
    }
514

515
    /// This implementation perform the copy word by word, which is
516
    /// significantly faster than the default implementation.
517
    fn copy(&self, from: usize, dst: &mut Self, to: usize, len: usize) {
576✔
518
        assert_eq!(
576✔
519
            self.bit_width, dst.bit_width,
×
520
            "Bit widths must be equal (self: {}, dest: {})",
×
521
            self.bit_width, dst.bit_width
×
522
        );
523
        // Reduce len to the elements available in both vectors
524
        let len = Ord::min(Ord::min(len, dst.len - to), self.len - from);
576✔
525
        if len == 0 {
576✔
526
            return;
×
527
        }
528
        let bit_width = Ord::min(self.bit_width, dst.bit_width);
576✔
529
        let bit_len = len * bit_width;
576✔
530
        let src_pos = from * self.bit_width;
576✔
531
        let dst_pos = to * dst.bit_width;
576✔
532
        let src_bit = src_pos % W::BITS;
576✔
533
        let dst_bit = dst_pos % W::BITS;
576✔
534
        let src_first_word = src_pos / W::BITS;
576✔
535
        let dst_first_word = dst_pos / W::BITS;
576✔
536
        let src_last_word = (src_pos + bit_len - 1) / W::BITS;
576✔
537
        let dst_last_word = (dst_pos + bit_len - 1) / W::BITS;
576✔
538
        let source = self.bits.as_ref();
576✔
539
        let dest = dst.bits.as_mut();
576✔
540

541
        if src_first_word == src_last_word && dst_first_word == dst_last_word {
768✔
542
            let mask = W::MAX >> (W::BITS - bit_len);
64✔
543
            let word = (source[src_first_word] >> src_bit) & mask;
64✔
544
            dest[dst_first_word] &= !(mask << dst_bit);
64✔
545
            dest[dst_first_word] |= word << dst_bit;
64✔
546
        } else if src_first_word == src_last_word {
640✔
547
            // dst_first_word != dst_last_word
548
            let mask = W::MAX >> (W::BITS - bit_len);
64✔
549
            let word = (source[src_first_word] >> src_bit) & mask;
64✔
550
            dest[dst_first_word] &= !(mask << dst_bit);
64✔
551
            dest[dst_first_word] |= (word & mask) << dst_bit;
64✔
552
            dest[dst_last_word] &= !(mask >> (W::BITS - dst_bit));
64✔
553
            dest[dst_last_word] |= (word & mask) >> (W::BITS - dst_bit);
64✔
554
        } else if dst_first_word == dst_last_word {
576✔
555
            // src_first_word != src_last_word
556
            let mask = W::MAX >> (W::BITS - bit_len);
64✔
557
            let word = ((source[src_first_word] >> src_bit)
64✔
558
                | (source[src_last_word] << (W::BITS - src_bit)))
64✔
559
                & mask;
64✔
560
            dest[dst_first_word] &= !(mask << dst_bit);
64✔
561
            dest[dst_first_word] |= word << dst_bit;
64✔
562
        } else if src_bit == dst_bit {
576✔
563
            // src_first_word != src_last_word && dst_first_word != dst_last_word
564
            let mask = W::MAX << dst_bit;
128✔
565
            dest[dst_first_word] &= !mask;
128✔
566
            dest[dst_first_word] |= source[src_first_word] & mask;
128✔
567

568
            dest[(1 + dst_first_word)..dst_last_word]
128✔
569
                .copy_from_slice(&source[(1 + src_first_word)..src_last_word]);
128✔
570

571
            let residual =
128✔
572
                bit_len - (W::BITS - src_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
128✔
573
            let mask = W::MAX >> (W::BITS - residual);
128✔
574
            dest[dst_last_word] &= !mask;
128✔
575
            dest[dst_last_word] |= source[src_last_word] & mask;
128✔
576
        } else if src_bit < dst_bit {
384✔
577
            // src_first_word != src_last_word && dst_first_word !=
578
            // dst_last_word
579
            let dst_mask = W::MAX << dst_bit;
128✔
580
            let src_mask = W::MAX << src_bit;
128✔
581
            let shift = dst_bit - src_bit;
128✔
582
            dest[dst_first_word] &= !dst_mask;
128✔
583
            dest[dst_first_word] |= (source[src_first_word] & src_mask) << shift;
128✔
584

585
            let mut word = source[src_first_word] >> (W::BITS - shift);
128✔
586
            for i in 1..dst_last_word - dst_first_word {
256✔
587
                dest[dst_first_word + i] = word | (source[src_first_word + i] << shift);
×
588
                word = source[src_first_word + i] >> (W::BITS - shift);
×
589
            }
590
            let residual =
128✔
591
                bit_len - (W::BITS - dst_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
128✔
592
            let mask = W::MAX >> (W::BITS - residual);
128✔
593
            dest[dst_last_word] &= !mask;
128✔
594
            dest[dst_last_word] |= source[src_last_word] & mask;
128✔
595
        } else {
596
            // src_first_word != src_last_word && dst_first_word !=
597
            // dst_last_word && src_bit > dst_bit
598
            let dst_mask = W::MAX << dst_bit;
128✔
599
            let src_mask = W::MAX << src_bit;
128✔
600
            let shift = src_bit - dst_bit;
128✔
601
            dest[dst_first_word] &= !dst_mask;
128✔
602
            dest[dst_first_word] |= (source[src_first_word] & src_mask) >> shift;
128✔
603
            dest[dst_first_word] |= source[src_first_word + 1] << (W::BITS - shift);
128✔
604

605
            let mut word = source[src_first_word + 1] >> shift;
128✔
606

607
            for i in 1..dst_last_word - dst_first_word {
512✔
608
                word |= source[src_first_word + i + 1] << (W::BITS - shift);
×
609
                dest[dst_first_word + i] = word;
×
610
                word = source[src_first_word + i + 1] >> shift;
×
611
            }
612

613
            word |= source[src_last_word] << (W::BITS - shift);
×
614

615
            let residual =
×
616
                bit_len - (W::BITS - dst_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
×
617
            let mask = W::MAX >> (W::BITS - residual);
×
618
            dest[dst_last_word] &= !mask;
×
619
            dest[dst_last_word] |= word & mask;
×
620
        }
621
    }
622

623
    /// This implementation keeps a buffer of `W::BITS` bits for reading and
624
    /// writing, obtaining a significant speedup with respect to the default
625
    /// implementation.
626
    #[inline]
627
    unsafe fn apply_in_place_unchecked<F>(&mut self, mut f: F)
1,200✔
628
    where
629
        F: FnMut(W) -> W,
630
        Self: BitFieldSlice<W>,
631
    {
632
        if self.is_empty() {
1,200✔
633
            return;
×
634
        }
635
        let bit_width = self.bit_width();
1,200✔
636
        if bit_width == 0 {
1,200✔
637
            return;
40✔
638
        }
639
        let mask = self.mask();
1,160✔
640
        let number_of_words: usize = self.bits.as_ref().len();
1,160✔
641
        let last_word_idx = number_of_words.saturating_sub(1);
1,160✔
642

643
        let mut write_buffer: W = W::ZERO;
1,160✔
644
        let mut read_buffer: W = *self.bits.as_ref().get_unchecked(0);
1,160✔
645

646
        // specialized case because it's much faster
647
        if bit_width.is_power_of_two() {
1,160✔
648
            let mut bits_in_buffer = 0;
180✔
649

650
            // TODO!: figure out how to simplify
651
            let mut buffer_limit = (self.len() * bit_width) % W::BITS;
180✔
652
            if buffer_limit == 0 {
260✔
653
                buffer_limit = W::BITS;
80✔
654
            }
655

656
            for read_idx in 1..number_of_words {
3,830✔
657
                // pre-load the next word so it loads while we parse the buffer
658
                let next_word: W = *self.bits.as_ref().get_unchecked(read_idx);
3,650✔
659

660
                // parse as much as we can from the buffer
661
                loop {
×
662
                    let next_bits_in_buffer = bits_in_buffer + bit_width;
20,690✔
663

664
                    if next_bits_in_buffer > W::BITS {
20,690✔
665
                        break;
3,650✔
666
                    }
667

668
                    let value = read_buffer & mask;
17,040✔
669
                    // throw away the bits we just read
670
                    read_buffer >>= bit_width;
17,040✔
671
                    // apply user func
672
                    let new_value = f(value);
17,040✔
673
                    // put the new value in the write buffer
674
                    write_buffer |= new_value << bits_in_buffer;
17,040✔
675

676
                    bits_in_buffer = next_bits_in_buffer;
17,040✔
677
                }
678

679
                invariant_eq!(read_buffer, W::ZERO);
3,650✔
680
                *self.bits.as_mut().get_unchecked_mut(read_idx - 1) = write_buffer;
3,650✔
681
                read_buffer = next_word;
3,650✔
682
                write_buffer = W::ZERO;
3,650✔
683
                bits_in_buffer = 0;
3,650✔
684
            }
685

686
            // write the last word if we have some bits left
687
            while bits_in_buffer < buffer_limit {
2,100✔
688
                let value = read_buffer & mask;
960✔
689
                // throw away the bits we just read
690
                read_buffer >>= bit_width;
960✔
691
                // apply user func
692
                let new_value = f(value);
960✔
693
                // put the new value in the write buffer
694
                write_buffer |= new_value << bits_in_buffer;
960✔
695
                // -= bit_width but with no casts
696
                bits_in_buffer += bit_width;
960✔
697
            }
698

699
            *self.bits.as_mut().get_unchecked_mut(last_word_idx) = write_buffer;
180✔
700
            return;
180✔
701
        }
702

703
        // The position inside the word. In most parametrization of the
704
        // vector, since the bit_width is not necessarily a integer
705
        // divisor of the word size, we need to keep track of the position
706
        // inside the word. As we scroll through the bits, due to the bits
707
        // remainder, we may need to operate on two words at the same time.
708
        let mut global_bit_index: usize = 0;
980✔
709

710
        // The number of words in the bitvec.
711
        let mut lower_word_limit = 0;
980✔
712
        let mut upper_word_limit = W::BITS;
980✔
713

714
        // We iterate across the words
715
        for word_number in 0..last_word_idx {
53,710✔
716
            // We iterate across the elements in the word.
717
            while global_bit_index + bit_width <= upper_word_limit {
149,910✔
718
                // We retrieve the value from the current word.
719
                let offset = global_bit_index - lower_word_limit;
48,100✔
720
                global_bit_index += bit_width;
48,100✔
721
                let element = self.mask() & (read_buffer >> offset);
48,100✔
722

723
                // We apply the function to the element.
724
                let new_element = f(element);
48,100✔
725

726
                // We set the element in the new word.
727
                write_buffer |= new_element << offset;
48,100✔
728
            }
729

730
            // We retrieve the next word from the bitvec.
731
            let next_word = *self.bits.as_ref().get_unchecked(word_number + 1);
×
732

733
            let mut new_write_buffer = W::ZERO;
×
734
            if upper_word_limit != global_bit_index {
48,840✔
735
                let remainder = upper_word_limit - global_bit_index;
48,840✔
736
                let offset = global_bit_index - lower_word_limit;
48,840✔
737
                // We compose the element from the remaining elements in the
738
                // current word and the elements in the next word.
739
                let element = ((read_buffer >> offset) | (next_word << remainder)) & self.mask();
48,840✔
740
                global_bit_index += bit_width;
48,840✔
741

742
                // We apply the function to the element.
743
                let new_element = f(element);
48,840✔
744

745
                write_buffer |= new_element << offset;
48,840✔
746

747
                new_write_buffer = new_element >> remainder;
48,840✔
748
            };
749

750
            read_buffer = next_word;
×
751

752
            *self.bits.as_mut().get_unchecked_mut(word_number) = write_buffer;
×
753

754
            write_buffer = new_write_buffer;
×
755
            lower_word_limit = upper_word_limit;
×
756
            upper_word_limit += W::BITS;
×
757
        }
758

759
        let mut offset = global_bit_index - lower_word_limit;
×
760

761
        // We iterate across the elements in the word.
762
        while offset < self.len() * bit_width - global_bit_index {
3,100✔
763
            // We retrieve the value from the current word.
764
            let element = self.mask() & (read_buffer >> offset);
1,060✔
765

766
            // We apply the function to the element.
767
            let new_element = f(element);
1,060✔
768

769
            // We set the element in the new word.
770
            write_buffer |= new_element << offset;
1,060✔
771
            offset += bit_width;
1,060✔
772
        }
773

774
        *self.bits.as_mut().get_unchecked_mut(last_word_idx) = write_buffer;
×
775
    }
776

777
    type ChunksMut<'a>
778
        = ChunksMut<'a, W>
779
    where
780
        Self: 'a;
781

782
    /// # Errors
783
    ///
784
    /// This method will return an error if the chunk size multiplied by the by
785
    /// the [bit width](BitFieldSliceCore::bit_width) is not a multiple of
786
    /// `W::BITS` and more than one chunk must be returned.
787
    fn try_chunks_mut(&mut self, chunk_size: usize) -> Result<Self::ChunksMut<'_>, ()> {
211✔
788
        let len = self.len();
211✔
789
        let bit_width = self.bit_width();
211✔
790
        if len <= chunk_size || (chunk_size * bit_width) % W::BITS == 0 {
216✔
791
            Ok(ChunksMut {
209✔
792
                remaining: len,
209✔
793
                bit_width: self.bit_width,
209✔
794
                chunk_size,
209✔
795
                iter: self.bits.as_mut()[..(len * bit_width).div_ceil(W::BITS)]
209✔
796
                    .chunks_mut((chunk_size * bit_width).div_ceil(W::BITS)),
209✔
797
            })
798
        } else {
799
            Err(())
2✔
800
        }
801
    }
802

803
    fn as_mut_slice(&mut self) -> &mut [W] {
1✔
804
        self.bits.as_mut()
1✔
805
    }
806
}
807

808
impl<W: Word> core::iter::Extend<W> for BitFieldVec<W, Vec<W>> {
809
    /// Add values from
810
    fn extend<T: IntoIterator<Item = W>>(&mut self, iter: T) {
1✔
811
        for value in iter {
17✔
812
            self.push(value);
×
813
        }
814
    }
815
}
816

817
/// Equality between bit-field vectors requires that the word is the same, the
818
/// bit width is the same, and the content is the same.
819
impl<W: Word, B: AsRef<[W]>, C: AsRef<[W]>> PartialEq<BitFieldVec<W, C>> for BitFieldVec<W, B> {
820
    fn eq(&self, other: &BitFieldVec<W, C>) -> bool {
3,771✔
821
        if self.bit_width() != other.bit_width() {
3,771✔
822
            return false;
×
823
        }
824
        if self.len() != other.len() {
3,771✔
825
            return false;
2✔
826
        }
827
        let bit_len = self.len() * self.bit_width();
3,769✔
828
        if self.bits.as_ref()[..bit_len / W::BITS] != other.bits.as_ref()[..bit_len / W::BITS] {
3,769✔
829
            return false;
×
830
        }
831

832
        let residual = bit_len % W::BITS;
3,769✔
833
        residual == 0
3,769✔
834
            || (self.bits.as_ref()[bit_len / W::BITS] ^ other.bits.as_ref()[bit_len / W::BITS])
3,526✔
835
                << (W::BITS - residual)
3,526✔
836
                == W::ZERO
3,526✔
837
    }
838
}
839

840
impl Eq for BitFieldVec {}
841

842
// Support for unchecked iterators
843

844
/// An [`UncheckedIterator`] over the values of a [`BitFieldVec`].
845
#[derive(Debug, Clone, MemDbg, MemSize)]
846
pub struct BitFieldVectorUncheckedIterator<'a, W, B>
847
where
848
    W: Word,
849
{
850
    vec: &'a BitFieldVec<W, B>,
851
    word_index: usize,
852
    window: W,
853
    fill: usize,
854
}
855

856
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVectorUncheckedIterator<'a, W, B> {
857
    fn new(vec: &'a BitFieldVec<W, B>, index: usize) -> Self {
635,916✔
858
        if index > vec.len() {
635,916✔
859
            panic!("Start index out of bounds: {} > {}", index, vec.len());
×
860
        }
861
        let (fill, word_index);
×
862
        let window = if index == vec.len() {
1,271,832✔
863
            word_index = 0;
304✔
864
            fill = 0;
304✔
865
            W::ZERO
304✔
866
        } else {
867
            let bit_offset = index * vec.bit_width;
635,612✔
868
            let bit_index = bit_offset % W::BITS;
635,612✔
869

870
            word_index = bit_offset / W::BITS;
635,612✔
871
            fill = W::BITS - bit_index;
635,612✔
872
            unsafe {
873
                // SAFETY: index has been check at the start and it is within bounds
874
                *vec.bits.as_ref().get_unchecked(word_index) >> bit_index
635,612✔
875
            }
876
        };
877
        Self {
878
            vec,
879
            word_index,
880
            window,
881
            fill,
882
        }
883
    }
884
}
885

886
impl<W: Word, B: AsRef<[W]>> crate::traits::UncheckedIterator
887
    for BitFieldVectorUncheckedIterator<'_, W, B>
888
{
889
    type Item = W;
890
    unsafe fn next_unchecked(&mut self) -> W {
32,658,414✔
891
        let bit_width = self.vec.bit_width;
32,658,414✔
892

893
        if self.fill >= bit_width {
32,658,414✔
894
            self.fill -= bit_width;
17,182,661✔
895
            let res = self.window & self.vec.mask;
17,182,661✔
896
            self.window >>= bit_width;
17,182,661✔
897
            return res;
17,182,661✔
898
        }
899

900
        let res = self.window;
15,475,753✔
901
        self.word_index += 1;
15,475,753✔
902
        self.window = *self.vec.bits.as_ref().get_unchecked(self.word_index);
15,475,753✔
903
        let res = (res | (self.window << self.fill)) & self.vec.mask;
15,475,753✔
904
        let used = bit_width - self.fill;
15,475,753✔
905
        self.window >>= used;
15,475,753✔
906
        self.fill = W::BITS - used;
15,475,753✔
907
        res
15,475,753✔
908
    }
909
}
910

911
impl<'a, W: Word, B: AsRef<[W]>> IntoUncheckedIterator for &'a BitFieldVec<W, B> {
912
    type Item = W;
913
    type IntoUncheckedIter = BitFieldVectorUncheckedIterator<'a, W, B>;
914
    fn into_unchecked_iter_from(self, from: usize) -> Self::IntoUncheckedIter {
322,712✔
915
        BitFieldVectorUncheckedIterator::new(self, from)
322,712✔
916
    }
917
}
918

919
/// An [`UncheckedIterator`] moving backwards over the values of a [`BitFieldVec`].
920
#[derive(Debug, Clone, MemDbg, MemSize)]
921
pub struct BitFieldVectorReverseUncheckedIterator<'a, W: Word, B> {
922
    vec: &'a BitFieldVec<W, B>,
923
    word_index: usize,
924
    window: W,
925
    fill: usize,
926
}
927

928
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVectorReverseUncheckedIterator<'a, W, B> {
929
    fn new(vec: &'a BitFieldVec<W, B>, index: usize) -> Self {
315,992✔
930
        if index > vec.len() {
315,992✔
931
            panic!("Start index out of bounds: {} > {}", index, vec.len());
×
932
        }
933
        let (word_index, fill);
×
934

935
        let window = if index == 0 {
631,984✔
936
            word_index = 0;
3,120✔
937
            fill = 0;
3,120✔
938
            W::ZERO
3,120✔
939
        } else {
940
            // We have to handle the case of zero bit width
941
            let bit_offset = (index * vec.bit_width).saturating_sub(1);
312,872✔
942
            let bit_index = bit_offset % W::BITS;
312,872✔
943

944
            word_index = bit_offset / W::BITS;
312,872✔
945
            fill = bit_index + 1;
312,872✔
946
            unsafe {
947
                // SAFETY: index has been check at the start and it is within bounds
948
                *vec.bits.as_ref().get_unchecked(word_index) << (W::BITS - fill)
312,872✔
949
            }
950
        };
951
        Self {
952
            vec,
953
            word_index,
954
            window,
955
            fill,
956
        }
957
    }
958
}
959

960
impl<W: Word, B: AsRef<[W]>> crate::traits::UncheckedIterator
961
    for BitFieldVectorReverseUncheckedIterator<'_, W, B>
962
{
963
    type Item = W;
964
    unsafe fn next_unchecked(&mut self) -> W {
15,451,222✔
965
        let bit_width = self.vec.bit_width;
15,451,222✔
966

967
        if self.fill >= bit_width {
15,451,222✔
968
            self.fill -= bit_width;
8,036,076✔
969
            self.window = self.window.rotate_left(bit_width as u32);
8,036,076✔
970
            return self.window & self.vec.mask;
8,036,076✔
971
        }
972

973
        let mut res = self.window.rotate_left(self.fill as u32);
7,415,146✔
974
        self.word_index -= 1;
7,415,146✔
975
        self.window = *self.vec.bits.as_ref().get_unchecked(self.word_index);
7,415,146✔
976
        let used = bit_width - self.fill;
7,415,146✔
977
        res = ((res << used) | (self.window >> (W::BITS - used))) & self.vec.mask;
7,415,146✔
978
        self.window <<= used;
7,415,146✔
979
        self.fill = W::BITS - used;
7,415,146✔
980
        res
7,415,146✔
981
    }
982
}
983

984
impl<'a, W: Word, B: AsRef<[W]>> IntoReverseUncheckedIterator for &'a BitFieldVec<W, B> {
985
    type Item = W;
986
    type IntoRevUncheckedIter = BitFieldVectorReverseUncheckedIterator<'a, W, B>;
987

988
    fn into_rev_unchecked_iter(self) -> Self::IntoRevUncheckedIter {
×
989
        BitFieldVectorReverseUncheckedIterator::new(self, self.len())
×
990
    }
991

992
    fn into_rev_unchecked_iter_from(self, from: usize) -> Self::IntoRevUncheckedIter {
315,992✔
993
        BitFieldVectorReverseUncheckedIterator::new(self, from)
315,992✔
994
    }
995
}
996

997
/// An [`Iterator`] over the values of a [`BitFieldVec`].
998
#[derive(Debug, Clone, MemDbg, MemSize)]
999
pub struct BitFieldVecIterator<'a, W, B>
1000
where
1001
    W: Word,
1002
{
1003
    unchecked: BitFieldVectorUncheckedIterator<'a, W, B>,
1004
    index: usize,
1005
}
1006

1007
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVecIterator<'a, W, B> {
1008
    fn new(vec: &'a BitFieldVec<W, B>, from: usize) -> Self {
313,204✔
1009
        if from > vec.len() {
313,204✔
1010
            panic!("Start index out of bounds: {} > {}", from, vec.len());
×
1011
        }
1012
        Self {
1013
            unchecked: BitFieldVectorUncheckedIterator::new(vec, from),
313,204✔
1014
            index: from,
1015
        }
1016
    }
1017
}
1018

1019
impl<W: Word, B: AsRef<[W]>> Iterator for BitFieldVecIterator<'_, W, B> {
1020
    type Item = W;
1021
    fn next(&mut self) -> Option<Self::Item> {
16,189,242✔
1022
        if self.index < self.unchecked.vec.len() {
16,189,242✔
1023
            // SAFETY: index has just been checked.
1024
            let res = unsafe { self.unchecked.next_unchecked() };
15,876,038✔
1025
            self.index += 1;
15,876,038✔
1026
            Some(res)
15,876,038✔
1027
        } else {
1028
            None
313,204✔
1029
        }
1030
    }
1031

1032
    #[inline(always)]
1033
    fn size_hint(&self) -> (usize, Option<usize>) {
×
1034
        (self.len(), Some(self.len()))
×
1035
    }
1036
}
1037

1038
impl<W: Word, B: AsRef<[W]>> ExactSizeIterator for BitFieldVecIterator<'_, W, B> {
1039
    #[inline(always)]
1040
    fn len(&self) -> usize {
×
1041
        self.unchecked.vec.len() - self.index
×
1042
    }
1043
}
1044

1045
impl<'a, W: Word, B: AsRef<[W]>> IntoIterator for &'a BitFieldVec<W, B> {
1046
    type Item = W;
1047
    type IntoIter = BitFieldVecIterator<'a, W, B>;
1048

1049
    fn into_iter(self) -> Self::IntoIter {
2✔
1050
        BitFieldVecIterator::new(self, 0)
2✔
1051
    }
1052
}
1053

1054
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {
1055
    pub fn iter_from(&self, from: usize) -> BitFieldVecIterator<W, B> {
313,202✔
1056
        BitFieldVecIterator::new(self, from)
313,202✔
1057
    }
1058

1059
    pub fn iter(&self) -> BitFieldVecIterator<W, B> {
1,202✔
1060
        self.iter_from(0)
1,202✔
1061
    }
1062
}
1063
/// A tentatively thread-safe vector of bit fields of fixed width.
1064
///
1065
/// This implementation provides some concurrency guarantees, albeit not
1066
/// full-fledged thread safety: more precisely, we can guarantee thread-safety
1067
/// if the bit width is a power of two; otherwise, concurrent writes to values
1068
/// that cross word boundaries might end up in different threads succeding in
1069
/// writing only part of a value. If the user can guarantee that no two threads
1070
/// ever write to the same boundary-crossing value, then no race condition can
1071
/// happen.
1072
///
1073
/// Note that the trait
1074
/// [`AtomicHelper`](crate::traits::bit_field_slice::AtomicHelper) can be used
1075
/// to provide a more convenient naming for some methods.
1076

1077
#[derive(Epserde, Debug, Clone, Hash, MemDbg, MemSize)]
1078
pub struct AtomicBitFieldVec<W: Word + IntoAtomic = usize, B = Vec<<W as IntoAtomic>::AtomicType>> {
1079
    /// The underlying storage.
1080
    bits: B,
1081
    /// The bit width of the values stored in the vector.
1082
    bit_width: usize,
1083
    /// A mask with its lowest `bit_width` bits set to one.
1084
    mask: W,
1085
    /// The length of the vector.
1086
    len: usize,
1087
}
1088

1089
impl<W: Word + IntoAtomic, B> AtomicBitFieldVec<W, B> {
1090
    /// # Safety
1091
    /// `len` * `bit_width` must be between 0 (included) the number of
1092
    /// bits in `bits` (included).
1093
    #[inline(always)]
1094
    pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
184✔
1095
        Self {
1096
            bits,
1097
            bit_width,
1098
            mask: mask(bit_width),
184✔
1099
            len,
1100
        }
1101
    }
1102

1103
    #[inline(always)]
1104
    pub fn into_raw_parts(self) -> (B, usize, usize) {
184✔
1105
        (self.bits, self.bit_width, self.len)
184✔
1106
    }
1107

1108
    /// Returns the mask used to extract values from the vector.
1109
    /// This will keep the lowest `bit_width` bits.
1110
    pub fn mask(&self) -> W {
184✔
1111
        self.mask
184✔
1112
    }
1113
}
1114

1115
impl<W: Word + IntoAtomic, B: AsRef<[W::AtomicType]>> AtomicBitFieldVec<W, B> {
1116
    /// Returns the backend of the `AtomicBitFieldVec` as a slice of `A`, where `A` is the
1117
    /// atomic variant of `W`.
1118
    pub fn as_slice(&self) -> &[W::AtomicType] {
4✔
1119
        self.bits.as_ref()
4✔
1120
    }
1121
}
1122

1123
impl<W: Word + IntoAtomic> AtomicBitFieldVec<W>
1124
where
1125
    W::AtomicType: AtomicUnsignedInt,
1126
{
1127
    pub fn new(bit_width: usize, len: usize) -> AtomicBitFieldVec<W> {
491✔
1128
        // we need at least two words to avoid branches in the gets
1129
        let n_of_words = Ord::max(1, (len * bit_width).div_ceil(W::BITS));
491✔
1130
        AtomicBitFieldVec::<W> {
1131
            bits: (0..n_of_words)
491✔
1132
                .map(|_| W::AtomicType::new(W::ZERO))
1133
                .collect(),
1134
            bit_width,
1135
            mask: mask(bit_width),
491✔
1136
            len,
1137
        }
1138
    }
1139
}
1140

1141
impl<W: Word + IntoAtomic, B: AsRef<[W::AtomicType]>> AtomicBitFieldVec<W, B>
1142
where
1143
    W::AtomicType: AtomicUnsignedInt + AsBytes,
1144
{
1145
    /// Writes zeros in all values.
1146
    #[deprecated(since = "0.4.4", note = "reset is deprecated in favor of reset_atomic")]
1147
    pub fn reset(&mut self, ordering: Ordering) {
×
1148
        self.reset_atomic(ordering)
×
1149
    }
1150
}
1151

1152
impl<W: Word + IntoAtomic, B> BitFieldSliceCore<W::AtomicType> for AtomicBitFieldVec<W, B> {
1153
    #[inline(always)]
1154
    fn bit_width(&self) -> usize {
184✔
1155
        debug_assert!(self.bit_width <= W::BITS);
368✔
1156
        self.bit_width
184✔
1157
    }
1158

1159
    #[inline(always)]
1160
    fn len(&self) -> usize {
404,824✔
1161
        self.len
404,824✔
1162
    }
1163
}
1164

1165
impl<W: Word + IntoAtomic, T: AsRef<[W::AtomicType]>> AtomicBitFieldSlice<W>
1166
    for AtomicBitFieldVec<W, T>
1167
where
1168
    W::AtomicType: AtomicUnsignedInt + AsBytes,
1169
{
1170
    #[inline]
1171
    unsafe fn get_atomic_unchecked(&self, index: usize, order: Ordering) -> W {
404,825✔
1172
        let pos = index * self.bit_width;
404,825✔
1173
        let word_index = pos / W::BITS;
404,825✔
1174
        let bit_index = pos % W::BITS;
404,825✔
1175
        let bits = self.bits.as_ref();
404,825✔
1176

1177
        if bit_index + self.bit_width <= W::BITS {
404,825✔
1178
            (bits.get_unchecked(word_index).load(order) >> bit_index) & self.mask
234,794✔
1179
        } else {
1180
            ((bits.get_unchecked(word_index).load(order) >> bit_index)
170,031✔
1181
                | (bits.get_unchecked(word_index + 1).load(order) << (W::BITS - bit_index)))
170,031✔
1182
                & self.mask
170,031✔
1183
        }
1184
    }
1185

1186
    // We reimplement set as we have the mask in the structure.
1187

1188
    /// Sets the element of the slice at the specified index.
1189
    ///
1190
    /// May panic if the index is not in in [0..[len](`BitFieldSliceCore::len`))
1191
    /// or the value does not fit in [`BitFieldSliceCore::bit_width`] bits.
1192
    #[inline(always)]
1193
    fn set_atomic(&self, index: usize, value: W, order: Ordering) {
56,026✔
1194
        panic_if_out_of_bounds!(index, self.len);
56,026✔
1195
        panic_if_value!(value, self.mask, self.bit_width);
56,025✔
1196
        unsafe {
1197
            self.set_atomic_unchecked(index, value, order);
56,024✔
1198
        }
1199
    }
1200

1201
    #[inline]
1202
    unsafe fn set_atomic_unchecked(&self, index: usize, value: W, order: Ordering) {
451,171✔
1203
        debug_assert!(self.bit_width != W::BITS);
902,342✔
1204
        let pos = index * self.bit_width;
451,171✔
1205
        let word_index = pos / W::BITS;
451,171✔
1206
        let bit_index = pos % W::BITS;
451,171✔
1207
        let bits = self.bits.as_ref();
451,171✔
1208

1209
        if bit_index + self.bit_width <= W::BITS {
451,171✔
1210
            // this is consistent
1211
            let mut current = bits.get_unchecked(word_index).load(order);
260,046✔
1212
            loop {
×
1213
                let mut new = current;
260,046✔
1214
                new &= !(self.mask << bit_index);
260,046✔
1215
                new |= value << bit_index;
260,046✔
1216

1217
                match bits
260,046✔
1218
                    .get_unchecked(word_index)
260,046✔
1219
                    .compare_exchange(current, new, order, order)
260,046✔
1220
                {
1221
                    Ok(_) => break,
260,046✔
1222
                    Err(e) => current = e,
×
1223
                }
1224
            }
1225
        } else {
1226
            let mut word = bits.get_unchecked(word_index).load(order);
191,125✔
1227
            // try to wait for the other thread to finish
1228
            fence(Ordering::Acquire);
191,125✔
1229
            loop {
×
1230
                let mut new = word;
191,125✔
1231
                new &= (W::ONE << bit_index) - W::ONE;
191,125✔
1232
                new |= value << bit_index;
191,125✔
1233

1234
                match bits
191,125✔
1235
                    .get_unchecked(word_index)
191,125✔
1236
                    .compare_exchange(word, new, order, order)
191,125✔
1237
                {
1238
                    Ok(_) => break,
×
1239
                    Err(e) => word = e,
×
1240
                }
1241
            }
1242
            fence(Ordering::Release);
×
1243

1244
            // ensures that the compiler does not reorder the two atomic operations
1245
            // this should increase the probability of having consistency
1246
            // between two concurrent writes as they will both execute the set
1247
            // of the bits in the same order, and the release / acquire fence
1248
            // should try to syncronize the threads as much as possible
1249
            compiler_fence(Ordering::SeqCst);
×
1250

1251
            let mut word = bits.get_unchecked(word_index + 1).load(order);
×
1252
            fence(Ordering::Acquire);
×
1253
            loop {
×
1254
                let mut new = word;
191,125✔
1255
                new &= !(self.mask >> (W::BITS - bit_index));
191,125✔
1256
                new |= value >> (W::BITS - bit_index);
191,125✔
1257

1258
                match bits
191,125✔
1259
                    .get_unchecked(word_index + 1)
191,125✔
1260
                    .compare_exchange(word, new, order, order)
191,125✔
1261
                {
1262
                    Ok(_) => break,
×
1263
                    Err(e) => word = e,
×
1264
                }
1265
            }
1266
            fence(Ordering::Release);
×
1267
        }
1268
    }
1269

1270
    fn reset_atomic(&mut self, ordering: Ordering) {
1✔
1271
        let bit_len = self.len * self.bit_width;
1✔
1272
        let full_words = bit_len / W::BITS;
1✔
1273
        let residual = bit_len % W::BITS;
1✔
1274
        let bits = self.bits.as_ref();
1✔
1275
        bits[..full_words]
1✔
1276
            .iter()
1277
            .for_each(|x| x.store(W::ZERO, ordering));
9✔
1278
        if residual != 0 {
2✔
1279
            bits[full_words].fetch_and(W::MAX << residual, ordering);
1✔
1280
        }
1281
    }
1282

1283
    #[cfg(feature = "rayon")]
1284
    fn par_reset_atomic(&mut self, ordering: Ordering) {
1✔
1285
        let bit_len = self.len * self.bit_width;
1✔
1286
        let full_words = bit_len / W::BITS;
1✔
1287
        let residual = bit_len % W::BITS;
1✔
1288
        let bits = self.bits.as_ref();
1✔
1289
        bits[..full_words]
1✔
1290
            .par_iter()
1291
            .with_min_len(RAYON_MIN_LEN)
1✔
1292
            .for_each(|x| x.store(W::ZERO, ordering));
9✔
1293
        if residual != 0 {
2✔
1294
            bits[full_words].fetch_and(W::MAX << residual, ordering);
1✔
1295
        }
1296
    }
1297
}
1298

1299
// Conversions
1300

1301
impl<W: Word + IntoAtomic> From<AtomicBitFieldVec<W, Vec<W::AtomicType>>>
1302
    for BitFieldVec<W, Vec<W>>
1303
{
1304
    #[inline]
1305
    fn from(value: AtomicBitFieldVec<W, Vec<W::AtomicType>>) -> Self {
470✔
1306
        BitFieldVec {
1307
            bits: unsafe { transmute_vec::<W::AtomicType, W>(value.bits) },
470✔
1308
            len: value.len,
470✔
1309
            bit_width: value.bit_width,
470✔
1310
            mask: value.mask,
470✔
1311
        }
1312
    }
1313
}
1314

1315
impl<W: Word + IntoAtomic> From<AtomicBitFieldVec<W, Box<[W::AtomicType]>>>
1316
    for BitFieldVec<W, Box<[W]>>
1317
{
1318
    #[inline]
1319
    fn from(value: AtomicBitFieldVec<W, Box<[W::AtomicType]>>) -> Self {
1✔
1320
        BitFieldVec {
1321
            bits: unsafe { transmute_boxed_slice::<W::AtomicType, W>(value.bits) },
1✔
1322

1323
            len: value.len,
1✔
1324
            bit_width: value.bit_width,
1✔
1325
            mask: value.mask,
1✔
1326
        }
1327
    }
1328
}
1329

1330
impl<'a, W: Word + IntoAtomic> From<AtomicBitFieldVec<W, &'a [W::AtomicType]>>
1331
    for BitFieldVec<W, &'a [W]>
1332
{
1333
    #[inline]
1334
    fn from(value: AtomicBitFieldVec<W, &'a [W::AtomicType]>) -> Self {
1✔
1335
        BitFieldVec {
1336
            bits: unsafe { core::mem::transmute::<&'a [W::AtomicType], &'a [W]>(value.bits) },
1✔
1337
            len: value.len,
1✔
1338
            bit_width: value.bit_width,
1✔
1339
            mask: value.mask,
1✔
1340
        }
1341
    }
1342
}
1343

1344
impl<'a, W: Word + IntoAtomic> From<AtomicBitFieldVec<W, &'a mut [W::AtomicType]>>
1345
    for BitFieldVec<W, &'a mut [W]>
1346
{
1347
    #[inline]
1348
    fn from(value: AtomicBitFieldVec<W, &'a mut [W::AtomicType]>) -> Self {
1✔
1349
        BitFieldVec {
1350
            bits: unsafe {
1✔
1351
                core::mem::transmute::<&'a mut [W::AtomicType], &'a mut [W]>(value.bits)
1352
            },
1353
            len: value.len,
1✔
1354
            bit_width: value.bit_width,
1✔
1355
            mask: value.mask,
1✔
1356
        }
1357
    }
1358
}
1359

1360
impl<W: Word + IntoAtomic> From<BitFieldVec<W, Vec<W>>>
1361
    for AtomicBitFieldVec<W, Vec<W::AtomicType>>
1362
{
1363
    #[inline]
1364
    fn from(value: BitFieldVec<W, Vec<W>>) -> Self {
369✔
1365
        AtomicBitFieldVec {
1366
            bits: unsafe { transmute_vec::<W, W::AtomicType>(value.bits) },
369✔
1367
            len: value.len,
369✔
1368
            bit_width: value.bit_width,
369✔
1369
            mask: value.mask,
369✔
1370
        }
1371
    }
1372
}
1373

1374
impl<W: Word + IntoAtomic> From<BitFieldVec<W, Box<[W]>>>
1375
    for AtomicBitFieldVec<W, Box<[W::AtomicType]>>
1376
{
1377
    #[inline]
1378
    fn from(value: BitFieldVec<W, Box<[W]>>) -> Self {
1✔
1379
        AtomicBitFieldVec {
1380
            bits: unsafe { transmute_boxed_slice::<W, W::AtomicType>(value.bits) },
1✔
1381
            len: value.len,
1✔
1382
            bit_width: value.bit_width,
1✔
1383
            mask: value.mask,
1✔
1384
        }
1385
    }
1386
}
1387

1388
impl<'a, W: Word + IntoAtomic> From<BitFieldVec<W, &'a [W]>>
1389
    for AtomicBitFieldVec<W, &'a [W::AtomicType]>
1390
{
1391
    #[inline]
1392
    fn from(value: BitFieldVec<W, &'a [W]>) -> Self {
1✔
1393
        AtomicBitFieldVec {
1394
            bits: unsafe { core::mem::transmute::<&'a [W], &'a [W::AtomicType]>(value.bits) },
1✔
1395
            len: value.len,
1✔
1396
            bit_width: value.bit_width,
1✔
1397
            mask: value.mask,
1✔
1398
        }
1399
    }
1400
}
1401

1402
impl<'a, W: Word + IntoAtomic> From<BitFieldVec<W, &'a mut [W]>>
1403
    for AtomicBitFieldVec<W, &'a mut [W::AtomicType]>
1404
{
1405
    #[inline]
1406
    fn from(value: BitFieldVec<W, &'a mut [W]>) -> Self {
1✔
1407
        AtomicBitFieldVec {
1408
            bits: unsafe {
1✔
1409
                core::mem::transmute::<&'a mut [W], &'a mut [W::AtomicType]>(value.bits)
1410
            },
1411
            len: value.len,
1✔
1412
            bit_width: value.bit_width,
1✔
1413
            mask: value.mask,
1✔
1414
        }
1415
    }
1416
}
1417

1418
impl<W: Word> From<BitFieldVec<W, Vec<W>>> for BitFieldVec<W, Box<[W]>> {
1419
    fn from(value: BitFieldVec<W, Vec<W>>) -> Self {
96✔
1420
        BitFieldVec {
1421
            bits: value.bits.into_boxed_slice(),
96✔
1422
            len: value.len,
96✔
1423
            bit_width: value.bit_width,
96✔
1424
            mask: value.mask,
96✔
1425
        }
1426
    }
1427
}
1428

1429
impl<W: Word> From<BitFieldVec<W, Box<[W]>>> for BitFieldVec<W, Vec<W>> {
1430
    fn from(value: BitFieldVec<W, Box<[W]>>) -> Self {
1✔
1431
        BitFieldVec {
1432
            bits: value.bits.into_vec(),
1✔
1433
            len: value.len,
1✔
1434
            bit_width: value.bit_width,
1✔
1435
            mask: value.mask,
1✔
1436
        }
1437
    }
1438
}
1439

1440
#[cfg(test)]
1441
mod tests {
1442
    use super::*;
1443

1444
    #[test]
1445
    fn test_with_capacity() {
1446
        let mut b = BitFieldVec::<usize, _>::with_capacity(10, 100);
1447
        let capacity = b.bits.capacity();
1448
        for _ in 0..100 {
1449
            b.push(0);
1450
        }
1451
        assert_eq!(b.bits.capacity(), capacity);
1452
    }
1453

1454
    fn copy<W: Word, B: AsRef<[W]>, C: AsRef<[W]> + AsMut<[W]>>(
1455
        source: &BitFieldVec<W, B>,
1456
        from: usize,
1457
        dest: &mut BitFieldVec<W, C>,
1458
        to: usize,
1459
        len: usize,
1460
    ) {
1461
        let len = Ord::min(Ord::min(len, dest.len - to), source.len - from);
1462
        for i in 0..len {
1463
            dest.set(to + i, source.get(from + i));
1464
        }
1465
    }
1466

1467
    #[test]
1468
    fn test_copy() {
1469
        for src_pattern in 0..8 {
1470
            for dst_pattern in 0..8 {
1471
                // if from_first_word == from_last_word && to_first_word == to_last_word
1472
                let source = bit_field_vec![3 => src_pattern; 100];
1473
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1474
                let mut dest_expected = dest_actual.clone();
1475
                source.copy(1, &mut dest_actual, 2, 10);
1476
                copy(&source, 1, &mut dest_expected, 2, 10);
1477
                assert_eq!(dest_actual, dest_expected);
1478
                // else if from_first_word == from_last_word
1479
                let source = bit_field_vec![3 => src_pattern; 100];
1480
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1481
                let mut dest_expected = dest_actual.clone();
1482
                source.copy(1, &mut dest_actual, 20, 10);
1483
                copy(&source, 1, &mut dest_expected, 20, 10);
1484
                assert_eq!(dest_actual, dest_expected);
1485
                // else if to_first_word == to_last_word
1486
                let source = bit_field_vec![3 => src_pattern; 100];
1487
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1488
                let mut dest_expected = dest_actual.clone();
1489
                source.copy(20, &mut dest_actual, 1, 10);
1490
                copy(&source, 20, &mut dest_expected, 1, 10);
1491
                assert_eq!(dest_actual, dest_expected);
1492
                // else if src_bit == dest_bit (residual = 1)
1493
                let source = bit_field_vec![3 => src_pattern; 1000];
1494
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1495
                let mut dest_expected = dest_actual.clone();
1496
                source.copy(3, &mut dest_actual, 3 + 3 * 128, 40);
1497
                copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 40);
1498
                assert_eq!(dest_actual, dest_expected);
1499
                // else if src_bit == dest_bit (residual = 64)
1500
                let source = bit_field_vec![3 => src_pattern; 1000];
1501
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1502
                let mut dest_expected = dest_actual.clone();
1503
                source.copy(3, &mut dest_actual, 3 + 3 * 128, 61);
1504
                copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 61);
1505
                assert_eq!(dest_actual, dest_expected);
1506
                // else if src_bit < dest_bit (residual = 1)
1507
                let source = bit_field_vec![3 => src_pattern; 1000];
1508
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1509
                let mut dest_expected = dest_actual.clone();
1510
                source.copy(3, &mut dest_actual, 7 + 64 * 3, 40);
1511
                copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40);
1512
                assert_eq!(dest_actual, dest_expected);
1513
                // else if src_bit < dest_bit (residual = 64)
1514
                let source = bit_field_vec![3 => src_pattern; 1000];
1515
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1516
                let mut dest_expected = dest_actual.clone();
1517
                source.copy(3, &mut dest_actual, 7 + 64 * 3, 40 + 17);
1518
                copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40 + 17);
1519
                assert_eq!(dest_actual, dest_expected);
1520
                // else if src_bit > dest_bit (residual = 1)
1521
                let source = bit_field_vec![3 => src_pattern; 1000];
1522
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1523
                let mut dest_expected = dest_actual.clone();
1524
                source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 64);
1525
                copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 64);
1526
                assert_eq!(dest_actual, dest_expected);
1527
                // else if src_bit > dest_bit (residual = 64)
1528
                let source = bit_field_vec![3 => src_pattern; 1000];
1529
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1530
                let mut dest_expected = dest_actual.clone();
1531
                source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 21 + 64);
1532
                copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 21 + 64);
1533
                assert_eq!(dest_actual, dest_expected);
1534
            }
1535
        }
1536
    }
1537
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc