• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vigna / sux-rs / 18466861223

13 Oct 2025 01:09PM UTC coverage: 65.352% (+0.01%) from 65.341%
18466861223

push

github

vigna
Sparse partial array implementation

43 of 65 new or added lines in 2 files covered. (66.15%)

1 existing line in 1 file now uncovered.

3746 of 5732 relevant lines covered (65.35%)

116528486.3 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

66.03
/src/bits/bit_field_vec.rs
1
/*
2
 * SPDX-FileCopyrightText: 2023 Inria
3
 * SPDX-FileCopyrightText: 2023 Sebastiano Vigna
4
 *
5
 * SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
6
 */
7

8
//! Vectors of values of fixed bit width.
9
//!
10
//! Elements are stored contiguously, with no padding bits (in particular,
11
//! unless the bit width is a power of two some elements will be stored across
12
//! word boundaries).
13
//!
14
//! There are two flavors: [`BitFieldVec`], a mutable bit-field vector, and
15
//! [`AtomicBitFieldVec`], a mutable, thread-safe bit-field vector.
16
//!
17
//! These flavors depends on a backend, and presently we provide, given an
18
//! unsigned integer type `W` or an unsigned atomic integer type `A`:
19
//!
20
//! - `BitFieldVec<Vec<T>>`: a mutable, growable and resizable bit-field vector;
21
//! - `BitFieldVec<AsRef<[W]>>`: an immutable bit-field vector, useful for
22
//!   [ε-serde](epserde) support;
23
//! - `BitFieldVec<AsRef<[W]> + AsMut<[W]>>`: a mutable (but not resizable) bit
24
//!   vector;
25
//! - `AtomicBitFieldVec<AsRef<[A]>>`: a partially thread-safe, mutable (but not
26
//!   resizable) bit-field vector.
27
//!
28
//! More generally, the underlying type must satisfy the trait [`Word`] for
29
//! [`BitFieldVec`] and additionally [`IntoAtomic`] for [`AtomicBitFieldVec`].
30
//! A blanket implementation exposes slices of elements of type `W` as bit-field
31
//! vectors of width `W::BITS`, analogously for atomic types `A`.
32
//!
33
//! Note that nothing is assumed about the content of the backend outside the
34
//! bits of the vector. Moreover, the content of the backend outside of the
35
//! vector is never modified by the methods of this structure.
36
//!
37
//! For high-speed unchecked scanning, we implement [`IntoUncheckedIterator`]
38
//! and [`IntoReverseUncheckedIterator`] on a reference to this type. The are
39
//! used, for example, to provide
40
//! [predecessor](crate::traits::indexed_dict::Pred) and
41
//! [successor](crate::traits::indexed_dict::Succ) primitives for
42
//! [`EliasFano`].
43
//!
44
//! # Low-level support
45
//!
46
//! The methods [`address_of`](BitFieldVec::addr_of) and
47
//! [`get_unaligned`](BitFieldVec::get_unaligned) can be used to manually
48
//! prefetch parts of the data structure, or read values using unaligned read,
49
//! when the bit width makes it possible.
50
//!
51
//! # Examples
52
//! ```
53
//! # use sux::prelude::*;
54
//! # use bit_field_slice::*;
55
//! // Bit field vector of bit width 5 and length 10, all entries set to zero
56
//! let mut b = <BitFieldVec<usize>>::new(5, 10);
57
//! assert_eq!(b.len(), 10);
58
//! assert_eq!(b.bit_width(), 5);
59
//! b.set(0, 3);
60
//! assert_eq!(b.get(0), 3);
61
//!
62
//! // Empty bit field vector of bit width 20 with capacity 10
63
//! let mut b = <BitFieldVec<usize>>::with_capacity(20, 10);
64
//! assert_eq!(b.len(), 0);
65
//! assert_eq!(b.bit_width(), 20);
66
//! b.push(20);
67
//! assert_eq!(b.len(), 1);
68
//! assert_eq!(b.get(0), 20);
69
//! assert_eq!(b.pop(), Some(20));
70
//!
71
//! // Convenience macro
72
//! let b = bit_field_vec![10; 4, 500, 2, 0, 1];
73
//! assert_eq!(b.len(), 5);
74
//! assert_eq!(b.bit_width(), 10);
75
//! assert_eq!(b.get(0), 4);
76
//! assert_eq!(b.get(1), 500);
77
//! assert_eq!(b.get(2), 2);
78
//! assert_eq!(b.get(3), 0);
79
//! assert_eq!(b.get(4), 1);
80
//! ```
81

82
#[cfg(feature = "rayon")]
83
use crate::RAYON_MIN_LEN;
84
use crate::prelude::{bit_field_slice::*, *};
85
use crate::utils::{
86
    CannotCastToAtomicError, transmute_boxed_slice_from_atomic, transmute_boxed_slice_into_atomic,
87
    transmute_vec_from_atomic, transmute_vec_into_atomic,
88
};
89
use crate::{panic_if_out_of_bounds, panic_if_value};
90
use anyhow::{Result, bail};
91
use common_traits::{
92
    AsBytes, Atomic, AtomicInteger, AtomicUnsignedInt, CastableInto, IntoAtomic, invariant_eq,
93
};
94
use mem_dbg::*;
95
#[cfg(feature = "rayon")]
96
use rayon::prelude::*;
97
use std::sync::atomic::{Ordering, compiler_fence, fence};
98

99
/// Convenient, [`vec!`]-like macro to initialize `usize`-based bit-field
100
/// vectors.
101
///
102
/// Note that the syntax `bit_field_vec![width; length; value]` that has been
103
/// deprecated in favor of `bit_field_vec![width => value; length]`, so that
104
/// value and length are in the same order as in [`vec!`].
105
///
106
/// # Examples
107
///
108
/// ```
109
/// # use sux::prelude::*;
110
/// # use bit_field_slice::*;
111
/// // Empty bit field vector of bit width 5
112
/// let b = bit_field_vec![5];
113
/// assert_eq!(b.len(), 0);
114
/// assert_eq!(b.bit_width(), 5);
115
///
116
/// // 10 values of bit width 6, all set to 3
117
/// let b = bit_field_vec![6 => 3; 10];
118
/// assert_eq!(b.len(), 10);
119
/// assert_eq!(b.bit_width(), 6);
120
/// assert_eq!(b.iter().all(|x| x == 3), true);
121
///
122
/// // List of values of bit width 10
123
/// let b = bit_field_vec![10; 4, 500, 2, 0, 1];
124
/// assert_eq!(b.len(), 5);
125
/// assert_eq!(b.bit_width(), 10);
126
/// assert_eq!(b.get(0), 4);
127
/// assert_eq!(b.get(1), 500);
128
/// assert_eq!(b.get(2), 2);
129
/// assert_eq!(b.get(3), 0);
130
/// assert_eq!(b.get(4), 1);
131
/// ```
132
#[macro_export]
133
macro_rules! bit_field_vec {
134
    ($w:expr) => {
135
        $crate::bits::BitFieldVec::<usize, _>::new($w, 0)
136
    };
137
    ($w:expr; $n:expr; $v:expr) => {
138
        {
139
            let mut bit_field_vec = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, $n);
140
            // Force type
141
            let v: usize = $v;
142
            bit_field_vec.resize($n, v);
143
            bit_field_vec
144
        }
145
    };
146
    ($w:expr => $v:expr; $n:expr) => {
147
        {
148
            let mut bit_field_vec = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, $n);
149
            // Force type
150
            let v: usize = $v;
151
            bit_field_vec.resize($n, v);
152
            bit_field_vec
153
        }
154
    };
155
    ($w:expr; $($x:expr),+ $(,)?) => {
156
        {
157
            let mut b = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, [$($x),+].len());
158
            $(
159
                // Force type
160
                let x: usize = $x;
161
                b.push(x);
162
            )*
163
            b
164
        }
165
    };
166
}
167

168
/// A vector of bit fields of fixed width.
169
#[derive(Debug, Clone, Copy, Hash, MemDbg, MemSize, value_traits::Subslices)]
170
#[cfg_attr(feature = "epserde", derive(epserde::Epserde))]
171
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
172
#[value_traits_subslices(bound = "B: AsRef<[W]>")]
173
#[derive(value_traits::SubslicesMut)]
174
#[value_traits_subslices_mut(bound = "B: AsRef<[W]> + AsMut<[W]>")]
175
pub struct BitFieldVec<W: Word = usize, B = Vec<W>> {
176
    /// The underlying storage.
177
    bits: B,
178
    /// The bit width of the values stored in the vector.
179
    bit_width: usize,
180
    /// A mask with its lowest `bit_width` bits set to one.
181
    mask: W,
182
    /// The length of the vector.
183
    len: usize,
184
}
185

186
fn mask<W: Word>(bit_width: usize) -> W {
9,701✔
187
    if bit_width == 0 {
9,701✔
188
        W::ZERO
212✔
189
    } else {
190
        W::MAX >> W::BITS.checked_sub(bit_width).expect("bit_width > W::BITS")
9,489✔
191
    }
192
}
193

194
impl<W: Word, B> BitFieldVec<W, B> {
195
    /// # Safety
196
    /// `len` * `bit_width` must be between 0 (included) the number of
197
    /// bits in `bits` (included).
198
    #[inline(always)]
199
    pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
3,213✔
200
        Self {
201
            bits,
202
            bit_width,
203
            mask: mask(bit_width),
6,426✔
204
            len,
205
        }
206
    }
207

208
    #[inline(always)]
209
    pub fn into_raw_parts(self) -> (B, usize, usize) {
3,122✔
210
        (self.bits, self.bit_width, self.len)
6,244✔
211
    }
212

213
    #[inline(always)]
214
    /// Modifies the bit field in place.
215
    ///
216
    /// # Safety
217
    /// This is unsafe because it's the caller's responsibility to ensure that
218
    /// that the length is compatible with the modified bits.
219
    pub unsafe fn map<W2: Word, B2>(self, f: impl FnOnce(B) -> B2) -> BitFieldVec<W2, B2> {
×
220
        BitFieldVec {
221
            bits: f(self.bits),
×
222
            bit_width: self.bit_width,
×
223
            mask: mask(self.bit_width),
×
224
            len: self.len,
×
225
        }
226
    }
227
}
228

229
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {
230
    /// Gets the address of the item storing (the first part of)
231
    /// the element of given index.
232
    ///
233
    /// This method is mainly useful for manually prefetching
234
    /// parts of the data structure.
235
    pub fn addr_of(&self, index: usize) -> *const W {
4✔
236
        let start_bit = index * self.bit_width;
8✔
237
        let word_index = start_bit / W::BITS;
8✔
238
        (&self.bits.as_ref()[word_index]) as *const _
8✔
239
    }
240

241
    /// Like [`BitFieldSlice::get`], but using unaligned reads.
242
    ///
243
    /// This method can be used only for bit width smaller than or equal to
244
    /// `W::BITS - 8 + 2` or equal to `W::BITS - 8 + 4` or `W::BITS`. Moreover,
245
    /// an additional padding word must be present at the end of the vector.
246
    ///
247
    /// Note that to guarantee the absence of undefined behavior this method
248
    /// has to perform several tests. Consider using
249
    /// [`get_unaligned_unchecked`](Self::get_unaligned_unchecked) if you are
250
    /// sure that the constraints are respected.
251
    ///
252
    /// # Panics
253
    ///
254
    /// This method will panic if the constraints above are not respected.
255
    pub fn get_unaligned(&self, index: usize) -> W {
65✔
256
        assert!(
65✔
257
            self.bit_width <= W::BITS - 8 + 2
65✔
258
                || self.bit_width == W::BITS - 8 + 4
24✔
259
                || self.bit_width == W::BITS
14✔
260
        );
261
        panic_if_out_of_bounds!(index, self.len);
61✔
262
        assert!(
61✔
263
            (index * self.bit_width) / W::BYTES + W::BYTES <= self.bits.as_ref().len() * W::BYTES
122✔
264
        );
265
        unsafe { self.get_unaligned_unchecked(index) }
180✔
266
    }
267

268
    /// Like [`BitFieldSlice::get_unchecked`], but using unaligned reads.
269
    ///
270
    /// # Safety
271
    ///
272
    /// This method can be used only for bit width smaller than or equal to
273
    /// `W::BITS - 8 + 2` or equal to `W::BITS - 8 + 4` or `W::BITS`. Moreover,
274
    /// an additional padding word must be present at the end of the vector.
275
    ///
276
    /// # Panics
277
    ///
278
    /// This method will panic in debug mode if the safety constraints are not
279
    /// respected.
280
    pub unsafe fn get_unaligned_unchecked(&self, index: usize) -> W {
65✔
281
        debug_assert!(
65✔
282
            self.bit_width <= W::BITS - 8 + 2
65✔
283
                || self.bit_width == W::BITS - 8 + 4
24✔
284
                || self.bit_width == W::BITS
14✔
285
        );
286
        let base_ptr = self.bits.as_ref().as_ptr() as *const u8;
61✔
287
        let start_bit = index * self.bit_width;
×
288
        debug_assert!(start_bit / W::BYTES + W::BYTES <= self.bits.as_ref().len() * W::BYTES);
×
289
        let ptr = unsafe { base_ptr.add(start_bit / 8) } as *const W;
180✔
290
        let word = unsafe { core::ptr::read_unaligned(ptr) };
180✔
291
        (word >> (start_bit % 8)) & self.mask
60✔
292
    }
293

294
    /// Returns the backend of the vector as a slice of `W`.
295
    pub fn as_slice(&self) -> &[W] {
3✔
296
        self.bits.as_ref()
3✔
297
    }
298
}
299
/// An iterator over non-overlapping chunks of a bit-field vector, starting at
300
/// the beginning of the vector.
301
///
302
/// When the vector len is not evenly divided by the chunk size, the last chunk
303
/// of the iteration will be shorter.
304
///
305
/// This struct is created by the
306
/// [`try_chunks_mut`](crate::bits::bit_field_vec::BitFieldVec#impl-BitFieldSliceMut<W>-for-BitFieldVec<W,+B>)
307
/// method.
308
pub struct ChunksMut<'a, W: Word> {
309
    remaining: usize,
310
    bit_width: usize,
311
    chunk_size: usize,
312
    iter: std::slice::ChunksMut<'a, W>,
313
}
314

315
impl<'a, W: Word> Iterator for ChunksMut<'a, W> {
316
    type Item = BitFieldVec<W, &'a mut [W]>;
317

318
    #[inline]
319
    fn next(&mut self) -> Option<Self::Item> {
90✔
320
        self.iter.next().map(|chunk| {
359✔
321
            let size = Ord::min(self.chunk_size, self.remaining);
356✔
322
            let next = unsafe { BitFieldVec::from_raw_parts(chunk, self.bit_width, size) };
445✔
323
            self.remaining -= size;
89✔
324
            next
89✔
325
        })
326
    }
327
}
328

329
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {}
330

331
impl<W: Word> BitFieldVec<W, Vec<W>> {
332
    /// Creates a new zero-initialized vector of given bit width and length.
333
    pub fn new(bit_width: usize, len: usize) -> Self {
977✔
334
        // We need at least one word to handle the case of bit width zero.
335
        let n_of_words = Ord::max(1, (len * bit_width).div_ceil(W::BITS));
3,908✔
336
        Self {
337
            bits: vec![W::ZERO; n_of_words],
2,931✔
338
            bit_width,
339
            mask: mask(bit_width),
1,954✔
340
            len,
341
        }
342
    }
343

344
    /// Creates a new zero-initialized vector of given bit width and length,
345
    /// adding padding bits to the end of the vector so that unaligned reads are
346
    /// possible.
347
    ///
348
    /// Note that this convenience method is a one-off: if the vector is resized
349
    /// or expanded, the padding will be lost.
350
    pub fn new_unaligned(bit_width: usize, len: usize) -> Self {
88✔
351
        let n_of_words = (len * bit_width).div_ceil(W::BITS);
264✔
352
        Self {
353
            // We add a word at the end
354
            bits: vec![W::ZERO; n_of_words + 1],
264✔
355
            bit_width,
356
            mask: mask(bit_width),
176✔
357
            len,
358
        }
359
    }
360

361
    /// Creates an empty vector that doesn't need to reallocate for up to
362
    /// `capacity` elements.
363
    pub fn with_capacity(bit_width: usize, capacity: usize) -> Self {
1,163✔
364
        // We need at least one word to handle the case of bit width zero.
365
        let n_of_words = Ord::max(1, (capacity * bit_width).div_ceil(W::BITS));
4,652✔
366
        Self {
367
            bits: Vec::with_capacity(n_of_words),
3,489✔
368
            bit_width,
369
            mask: mask(bit_width),
1,163✔
370
            len: 0,
371
        }
372
    }
373

374
    /// Sets the length.
375
    ///
376
    /// # Safety
377
    ///
378
    /// `len * bit_width` must be at most `self.bits.len() * W::BITS`. Note that
379
    /// setting the length might result in reading uninitialized data.
380
    pub unsafe fn set_len(&mut self, len: usize) {
1✔
381
        debug_assert!(len * self.bit_width <= self.bits.len() * W::BITS);
3✔
382
        self.len = len;
1✔
383
    }
384

385
    /// Sets len to 0
386
    pub fn clear(&mut self) {
1✔
387
        self.len = 0;
1✔
388
    }
389

390
    /// Returns the bit width of the values inside the vector.
391
    pub fn bit_width(&self) -> usize {
315✔
392
        debug_assert!(self.bit_width <= W::BITS);
630✔
393
        self.bit_width
315✔
394
    }
395

396
    /// Returns the mask used to extract values from the vector.
397
    /// This will keep the lowest `bit_width` bits.
398
    pub fn mask(&self) -> W {
312✔
399
        self.mask
312✔
400
    }
401

402
    /// Creates a new vector by copying a slice; the bit width will be the minimum
403
    /// width sufficient to hold all values in the slice.
404
    ///
405
    /// Returns an error if the bit width of the values in `slice` is larger than
406
    /// `W::BITS`.
407
    pub fn from_slice<SW>(slice: &impl BitFieldSlice<SW>) -> Result<Self>
3✔
408
    where
409
        SW: Word + CastableInto<W>,
410
    {
411
        let mut max_len: usize = 0;
9✔
412
        for i in 0..slice.len() {
3,006✔
413
            max_len = Ord::max(max_len, unsafe { slice.get_unchecked(i).len() as usize });
×
414
        }
415

416
        if max_len > W::BITS {
3✔
417
            bail!(
1✔
418
                "Cannot convert a slice of bit width {} into a slice with W = {}",
1✔
419
                max_len,
×
420
                std::any::type_name::<W>()
1✔
421
            );
422
        }
423
        let mut result = Self::new(max_len, slice.len());
×
424
        for i in 0..slice.len() {
2,000✔
425
            unsafe { result.set_unchecked(i, slice.get_unchecked(i).cast()) };
×
426
        }
427

428
        Ok(result)
×
429
    }
430

431
    /// Adds a value at the end of the vector.
432
    pub fn push(&mut self, value: W) {
2,249✔
433
        panic_if_value!(value, self.mask, self.bit_width);
6,747✔
434
        if (self.len + 1) * self.bit_width > self.bits.len() * W::BITS {
4,896✔
435
            self.bits.push(W::ZERO);
398✔
436
        }
437
        unsafe {
438
            self.set_unchecked(self.len, value);
6,747✔
439
        }
440
        self.len += 1;
2,249✔
441
    }
442

443
    /// Truncates or extend with `value` the vector.
444
    pub fn resize(&mut self, new_len: usize, value: W) {
1,162✔
445
        panic_if_value!(value, self.mask, self.bit_width);
3,486✔
446
        if new_len > self.len {
1,162✔
447
            if new_len * self.bit_width > self.bits.len() * W::BITS {
3,483✔
448
                self.bits
2,322✔
449
                    .resize((new_len * self.bit_width).div_ceil(W::BITS), W::ZERO);
2,322✔
450
            }
451
            for i in self.len..new_len {
808,882✔
452
                unsafe {
453
                    self.set_unchecked(i, value);
×
454
                }
455
            }
456
        }
457
        self.len = new_len;
1,162✔
458
    }
459

460
    /// Removes and returns a value from the end of the vector.
461
    ///
462
    /// Returns None if the [`BitFieldVec`] is empty.
463
    pub fn pop(&mut self) -> Option<W> {
1,002✔
464
        if self.len == 0 {
1,002✔
465
            return None;
2✔
466
        }
467
        let value = self.get(self.len - 1);
×
468
        self.len -= 1;
×
469
        Some(value)
×
470
    }
471
}
472

473
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> BitFieldVec<W, B> {
474
    /// A version of [`BitFieldSliceMut::set`] that returns the previous value.
475
    ///
476
    /// # Panics
477
    /// - If `index` is out of bounds.
478
    /// - If `value` is not a valid value for the bit width of the vector.
479
    pub fn replace(&mut self, index: usize, value: W) -> Result<W> {
×
480
        panic_if_out_of_bounds!(index, self.len);
×
481
        panic_if_value!(value, self.mask, self.bit_width);
×
482
        Ok(unsafe { self.replace_unchecked(index, value) })
×
483
    }
484

485
    /// A version of [`BitFieldSliceMut::set_unchecked`] that returns the previous value,
486
    /// that doesn't check for out-of-bounds access or value validity.
487
    ///
488
    /// # Safety
489
    /// This method is unsafe because it does not check that `index` is within bounds
490
    pub unsafe fn replace_unchecked(&mut self, index: usize, value: W) -> W {
×
491
        let pos = index * self.bit_width;
×
492
        let word_index = pos / W::BITS;
×
493
        let bit_index = pos % W::BITS;
×
494
        let bits = self.bits.as_mut();
×
495

496
        unsafe {
497
            if bit_index + self.bit_width <= W::BITS {
×
498
                let mut word = *bits.get_unchecked_mut(word_index);
×
499
                let old_value = (word >> bit_index) & self.mask;
×
500
                word &= !(self.mask << bit_index);
×
501
                word |= value << bit_index;
×
502
                *bits.get_unchecked_mut(word_index) = word;
×
503
                old_value
×
504
            } else {
505
                let mut word = *bits.get_unchecked_mut(word_index);
×
506
                let mut old_value = word >> bit_index;
×
507
                word &= (W::ONE << bit_index) - W::ONE;
×
508
                word |= value << bit_index;
×
509
                *bits.get_unchecked_mut(word_index) = word;
×
510

511
                let mut word = *bits.get_unchecked_mut(word_index + 1);
×
512
                old_value |= word << (W::BITS - bit_index);
×
513
                word &= !(self.mask >> (W::BITS - bit_index));
×
514
                word |= value >> (W::BITS - bit_index);
×
515
                *bits.get_unchecked_mut(word_index + 1) = word;
×
516
                old_value & self.mask
×
517
            }
518
        }
519
    }
520
}
521

522
impl<W: Word, T> BitFieldSliceCore<W> for BitFieldVec<W, T> {
523
    #[inline(always)]
524
    fn bit_width(&self) -> usize {
13,798✔
525
        debug_assert!(self.bit_width <= W::BITS);
27,596✔
526
        self.bit_width
13,798✔
527
    }
528
    #[inline(always)]
529
    fn len(&self) -> usize {
2,577,828✔
530
        self.len
2,577,828✔
531
    }
532
}
533

534
impl<W: Word, B: AsRef<[W]>> BitFieldSlice<W> for BitFieldVec<W, B> {
535
    unsafe fn get_unchecked(&self, index: usize) -> W {
52,220,732✔
536
        let pos = index * self.bit_width;
104,441,464✔
537
        let word_index = pos / W::BITS;
104,441,464✔
538
        let bit_index = pos % W::BITS;
104,441,464✔
539
        let bits = self.bits.as_ref();
156,662,196✔
540

541
        unsafe {
542
            if bit_index + self.bit_width <= W::BITS {
52,220,732✔
543
                (*bits.get_unchecked(word_index) >> bit_index) & self.mask
78,111,840✔
544
            } else {
545
                ((*bits.get_unchecked(word_index) >> bit_index)
13,164,812✔
546
                    | (*bits.get_unchecked(word_index + 1) << (W::BITS - bit_index)))
13,164,812✔
547
                    & self.mask
13,164,812✔
548
            }
549
        }
550
    }
551
}
552

553
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> BitFieldSliceMut<W> for BitFieldVec<W, B> {
554
    #[inline(always)]
555
    fn mask(&self) -> W {
100,360✔
556
        self.mask
100,360✔
557
    }
558

559
    #[inline(always)]
560
    fn set(&mut self, index: usize, value: W) {
491,342✔
561
        panic_if_out_of_bounds!(index, self.len);
1,474,026✔
562
        panic_if_value!(value, self.mask, self.bit_width);
1,474,023✔
563
        unsafe {
564
            self.set_unchecked(index, value);
491,340✔
565
        }
566
    }
567

568
    unsafe fn set_unchecked(&mut self, index: usize, value: W) {
11,975,581✔
569
        let pos = index * self.bit_width;
23,951,162✔
570
        let word_index = pos / W::BITS;
23,951,162✔
571
        let bit_index = pos % W::BITS;
23,951,162✔
572
        let bits = self.bits.as_mut();
35,926,743✔
573

574
        unsafe {
575
            if bit_index + self.bit_width <= W::BITS {
21,019,576✔
576
                let mut word = *bits.get_unchecked_mut(word_index);
36,175,980✔
577
                word &= !(self.mask << bit_index);
27,131,985✔
578
                word |= value << bit_index;
27,131,985✔
579
                *bits.get_unchecked_mut(word_index) = word;
27,131,985✔
580
            } else {
581
                let mut word = *bits.get_unchecked_mut(word_index);
2,931,586✔
582
                word &= (W::ONE << bit_index) - W::ONE;
2,931,586✔
583
                word |= value << bit_index;
2,931,586✔
584
                *bits.get_unchecked_mut(word_index) = word;
2,931,586✔
585

586
                let mut word = *bits.get_unchecked_mut(word_index + 1);
2,931,586✔
587
                word &= !(self.mask >> (W::BITS - bit_index));
2,931,586✔
588
                word |= value >> (W::BITS - bit_index);
2,931,586✔
589
                *bits.get_unchecked_mut(word_index + 1) = word;
2,931,586✔
590
            }
591
        }
592
    }
593

594
    fn reset(&mut self) {
1✔
595
        let bit_len = self.len * self.bit_width;
2✔
596
        let full_words = bit_len / W::BITS;
2✔
597
        let residual = bit_len % W::BITS;
2✔
598
        let bits = self.bits.as_mut();
3✔
599
        bits[..full_words].iter_mut().for_each(|x| *x = W::ZERO);
3✔
600
        if residual != 0 {
2✔
601
            bits[full_words] &= W::MAX << residual;
1✔
602
        }
603
    }
604

605
    #[cfg(feature = "rayon")]
606
    fn par_reset(&mut self) {
1✔
607
        let bit_len = self.len * self.bit_width;
2✔
608
        let full_words = bit_len / W::BITS;
2✔
609
        let residual = bit_len % W::BITS;
2✔
610
        let bits = self.bits.as_mut();
3✔
611
        bits[..full_words]
1✔
612
            .par_iter_mut()
613
            .with_min_len(RAYON_MIN_LEN)
1✔
614
            .for_each(|x| *x = W::ZERO);
1✔
615
        if residual != 0 {
2✔
616
            bits[full_words] &= W::MAX << residual;
1✔
617
        }
618
    }
619

620
    /// This implementation perform the copy word by word, which is
621
    /// significantly faster than the default implementation.
622
    fn copy(&self, from: usize, dst: &mut Self, to: usize, len: usize) {
576✔
623
        assert_eq!(
576✔
624
            self.bit_width, dst.bit_width,
×
625
            "Bit widths must be equal (self: {}, dest: {})",
×
626
            self.bit_width, dst.bit_width
×
627
        );
628
        // Reduce len to the elements available in both vectors
629
        let len = Ord::min(Ord::min(len, dst.len - to), self.len - from);
3,456✔
630
        if len == 0 {
576✔
631
            return;
×
632
        }
633
        let bit_width = Ord::min(self.bit_width, dst.bit_width);
×
634
        let bit_len = len * bit_width;
×
635
        let src_pos = from * self.bit_width;
×
636
        let dst_pos = to * dst.bit_width;
×
637
        let src_bit = src_pos % W::BITS;
×
638
        let dst_bit = dst_pos % W::BITS;
×
639
        let src_first_word = src_pos / W::BITS;
×
640
        let dst_first_word = dst_pos / W::BITS;
×
641
        let src_last_word = (src_pos + bit_len - 1) / W::BITS;
×
642
        let dst_last_word = (dst_pos + bit_len - 1) / W::BITS;
×
643
        let source = self.bits.as_ref();
×
644
        let dest = dst.bits.as_mut();
×
645

646
        if src_first_word == src_last_word && dst_first_word == dst_last_word {
192✔
647
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
648
            let word = (source[src_first_word] >> src_bit) & mask;
192✔
649
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
650
            dest[dst_first_word] |= word << dst_bit;
128✔
651
        } else if src_first_word == src_last_word {
640✔
652
            // dst_first_word != dst_last_word
653
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
654
            let word = (source[src_first_word] >> src_bit) & mask;
192✔
655
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
656
            dest[dst_first_word] |= (word & mask) << dst_bit;
192✔
657
            dest[dst_last_word] &= !(mask >> (W::BITS - dst_bit));
192✔
658
            dest[dst_last_word] |= (word & mask) >> (W::BITS - dst_bit);
128✔
659
        } else if dst_first_word == dst_last_word {
576✔
660
            // src_first_word != src_last_word
661
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
662
            let word = ((source[src_first_word] >> src_bit)
256✔
663
                | (source[src_last_word] << (W::BITS - src_bit)))
128✔
664
                & mask;
128✔
665
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
666
            dest[dst_first_word] |= word << dst_bit;
128✔
667
        } else if src_bit == dst_bit {
576✔
668
            // src_first_word != src_last_word && dst_first_word != dst_last_word
669
            let mask = W::MAX << dst_bit;
384✔
670
            dest[dst_first_word] &= !mask;
256✔
671
            dest[dst_first_word] |= source[src_first_word] & mask;
384✔
672

673
            dest[(1 + dst_first_word)..dst_last_word]
384✔
674
                .copy_from_slice(&source[(1 + src_first_word)..src_last_word]);
512✔
675

676
            let residual =
256✔
677
                bit_len - (W::BITS - src_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
384✔
678
            let mask = W::MAX >> (W::BITS - residual);
384✔
679
            dest[dst_last_word] &= !mask;
256✔
680
            dest[dst_last_word] |= source[src_last_word] & mask;
256✔
681
        } else if src_bit < dst_bit {
384✔
682
            // src_first_word != src_last_word && dst_first_word !=
683
            // dst_last_word
684
            let dst_mask = W::MAX << dst_bit;
256✔
685
            let src_mask = W::MAX << src_bit;
256✔
686
            let shift = dst_bit - src_bit;
256✔
687
            dest[dst_first_word] &= !dst_mask;
128✔
688
            dest[dst_first_word] |= (source[src_first_word] & src_mask) << shift;
256✔
689

690
            let mut word = source[src_first_word] >> (W::BITS - shift);
256✔
691
            for i in 1..dst_last_word - dst_first_word {
256✔
692
                dest[dst_first_word + i] = word | (source[src_first_word + i] << shift);
×
693
                word = source[src_first_word + i] >> (W::BITS - shift);
×
694
            }
695
            let residual =
128✔
696
                bit_len - (W::BITS - dst_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
256✔
697
            let mask = W::MAX >> (W::BITS - residual);
256✔
698
            dest[dst_last_word] &= !mask;
128✔
699
            dest[dst_last_word] |= source[src_last_word] & mask;
256✔
700
        } else {
701
            // src_first_word != src_last_word && dst_first_word !=
702
            // dst_last_word && src_bit > dst_bit
703
            let dst_mask = W::MAX << dst_bit;
128✔
704
            let src_mask = W::MAX << src_bit;
×
705
            let shift = src_bit - dst_bit;
×
706
            dest[dst_first_word] &= !dst_mask;
×
707
            dest[dst_first_word] |= (source[src_first_word] & src_mask) >> shift;
×
708
            dest[dst_first_word] |= source[src_first_word + 1] << (W::BITS - shift);
×
709

710
            let mut word = source[src_first_word + 1] >> shift;
×
711

712
            for i in 1..dst_last_word - dst_first_word {
512✔
713
                word |= source[src_first_word + i + 1] << (W::BITS - shift);
×
714
                dest[dst_first_word + i] = word;
×
715
                word = source[src_first_word + i + 1] >> shift;
×
716
            }
717

718
            word |= source[src_last_word] << (W::BITS - shift);
×
719

720
            let residual =
×
721
                bit_len - (W::BITS - dst_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
×
722
            let mask = W::MAX >> (W::BITS - residual);
×
723
            dest[dst_last_word] &= !mask;
×
724
            dest[dst_last_word] |= word & mask;
×
725
        }
726
    }
727

728
    /// This implementation keeps a buffer of `W::BITS` bits for reading and
729
    /// writing, obtaining a significant speedup with respect to the default
730
    /// implementation.
731
    #[inline]
732
    unsafe fn apply_in_place_unchecked<F>(&mut self, mut f: F)
1,200✔
733
    where
734
        F: FnMut(W) -> W,
735
        Self: BitFieldSlice<W>,
736
    {
737
        if self.is_empty() {
2,400✔
738
            return;
×
739
        }
740
        let bit_width = self.bit_width();
×
741
        if bit_width == 0 {
×
742
            return;
40✔
743
        }
744
        let mask = self.mask();
×
745
        let number_of_words: usize = self.bits.as_ref().len();
×
746
        let last_word_idx = number_of_words.saturating_sub(1);
×
747

748
        let mut write_buffer: W = W::ZERO;
×
749
        let mut read_buffer: W = *unsafe { self.bits.as_ref().get_unchecked(0) };
×
750

751
        // specialized case because it's much faster
752
        if bit_width.is_power_of_two() {
×
753
            let mut bits_in_buffer = 0;
360✔
754

755
            // TODO!: figure out how to simplify
756
            let mut buffer_limit = (self.len() * bit_width) % W::BITS;
540✔
757
            if buffer_limit == 0 {
260✔
758
                buffer_limit = W::BITS;
80✔
759
            }
760

761
            for read_idx in 1..number_of_words {
3,830✔
762
                // pre-load the next word so it loads while we parse the buffer
763
                let next_word: W = *unsafe { self.bits.as_ref().get_unchecked(read_idx) };
14,600✔
764

765
                // parse as much as we can from the buffer
766
                loop {
×
767
                    let next_bits_in_buffer = bits_in_buffer + bit_width;
41,380✔
768

769
                    if next_bits_in_buffer > W::BITS {
20,690✔
770
                        break;
3,650✔
771
                    }
772

773
                    let value = read_buffer & mask;
×
774
                    // throw away the bits we just read
775
                    read_buffer >>= bit_width;
×
776
                    // apply user func
777
                    let new_value = f(value);
×
778
                    // put the new value in the write buffer
779
                    write_buffer |= new_value << bits_in_buffer;
×
780

781
                    bits_in_buffer = next_bits_in_buffer;
×
782
                }
783

784
                invariant_eq!(read_buffer, W::ZERO);
10,950✔
785
                *unsafe { self.bits.as_mut().get_unchecked_mut(read_idx - 1) } = write_buffer;
3,650✔
786
                read_buffer = next_word;
×
787
                write_buffer = W::ZERO;
×
788
                bits_in_buffer = 0;
×
789
            }
790

791
            // write the last word if we have some bits left
792
            while bits_in_buffer < buffer_limit {
2,100✔
793
                let value = read_buffer & mask;
960✔
794
                // throw away the bits we just read
795
                read_buffer >>= bit_width;
960✔
796
                // apply user func
797
                let new_value = f(value);
960✔
798
                // put the new value in the write buffer
799
                write_buffer |= new_value << bits_in_buffer;
960✔
800
                // -= bit_width but with no casts
801
                bits_in_buffer += bit_width;
960✔
802
            }
803

804
            *unsafe { self.bits.as_mut().get_unchecked_mut(last_word_idx) } = write_buffer;
180✔
805
            return;
×
806
        }
807

808
        // The position inside the word. In most parametrization of the
809
        // vector, since the bit_width is not necessarily a integer
810
        // divisor of the word size, we need to keep track of the position
811
        // inside the word. As we scroll through the bits, due to the bits
812
        // remainder, we may need to operate on two words at the same time.
813
        let mut global_bit_index: usize = 0;
×
814

815
        // The number of words in the bitvec.
816
        let mut lower_word_limit = 0;
×
817
        let mut upper_word_limit = W::BITS;
×
818

819
        // We iterate across the words
820
        for word_number in 0..last_word_idx {
53,710✔
821
            // We iterate across the elements in the word.
822
            while global_bit_index + bit_width <= upper_word_limit {
149,910✔
823
                // We retrieve the value from the current word.
824
                let offset = global_bit_index - lower_word_limit;
48,100✔
825
                global_bit_index += bit_width;
48,100✔
826
                let element = self.mask() & (read_buffer >> offset);
48,100✔
827

828
                // We apply the function to the element.
829
                let new_element = f(element);
48,100✔
830

831
                // We set the element in the new word.
832
                write_buffer |= new_element << offset;
48,100✔
833
            }
834

835
            // We retrieve the next word from the bitvec.
836
            let next_word = *unsafe { self.bits.as_ref().get_unchecked(word_number + 1) };
×
837

838
            let mut new_write_buffer = W::ZERO;
×
839
            if upper_word_limit != global_bit_index {
48,840✔
840
                let remainder = upper_word_limit - global_bit_index;
146,520✔
841
                let offset = global_bit_index - lower_word_limit;
146,520✔
842
                // We compose the element from the remaining elements in the
843
                // current word and the elements in the next word.
844
                let element = ((read_buffer >> offset) | (next_word << remainder)) & self.mask();
244,200✔
845
                global_bit_index += bit_width;
97,680✔
846

847
                // We apply the function to the element.
848
                let new_element = f(element);
146,520✔
849

850
                write_buffer |= new_element << offset;
146,520✔
851

852
                new_write_buffer = new_element >> remainder;
48,840✔
853
            };
854

855
            read_buffer = next_word;
×
856

857
            *unsafe { self.bits.as_mut().get_unchecked_mut(word_number) } = write_buffer;
×
858

859
            write_buffer = new_write_buffer;
×
860
            lower_word_limit = upper_word_limit;
×
861
            upper_word_limit += W::BITS;
×
862
        }
863

864
        let mut offset = global_bit_index - lower_word_limit;
×
865

866
        // We iterate across the elements in the word.
867
        while offset < self.len() * bit_width - global_bit_index {
7,180✔
868
            // We retrieve the value from the current word.
869
            let element = self.mask() & (read_buffer >> offset);
1,060✔
870

871
            // We apply the function to the element.
872
            let new_element = f(element);
1,060✔
873

874
            // We set the element in the new word.
875
            write_buffer |= new_element << offset;
1,060✔
876
            offset += bit_width;
1,060✔
877
        }
878

879
        *unsafe { self.bits.as_mut().get_unchecked_mut(last_word_idx) } = write_buffer;
×
880
    }
881

882
    type ChunksMut<'a>
883
        = ChunksMut<'a, W>
884
    where
885
        Self: 'a;
886

887
    /// # Errors
888
    ///
889
    /// This method will return an error if the chunk size multiplied by the by
890
    /// the [bit width](BitFieldSliceCore::bit_width) is not a multiple of
891
    /// `W::BITS` and more than one chunk must be returned.
892
    fn try_chunks_mut(&mut self, chunk_size: usize) -> Result<Self::ChunksMut<'_>, ()> {
87✔
893
        let len = self.len();
261✔
894
        let bit_width = self.bit_width();
261✔
895
        if len <= chunk_size || (chunk_size * bit_width) % W::BITS == 0 {
92✔
896
            Ok(ChunksMut {
85✔
897
                remaining: len,
85✔
898
                bit_width: self.bit_width,
85✔
899
                chunk_size,
85✔
900
                iter: self.bits.as_mut()[..(len * bit_width).div_ceil(W::BITS)]
85✔
901
                    .chunks_mut((chunk_size * bit_width).div_ceil(W::BITS)),
85✔
902
            })
903
        } else {
904
            Err(())
2✔
905
        }
906
    }
907

908
    fn as_mut_slice(&mut self) -> &mut [W] {
1✔
909
        self.bits.as_mut()
1✔
910
    }
911
}
912

913
impl<W: Word> core::iter::Extend<W> for BitFieldVec<W, Vec<W>> {
914
    /// Add values from
915
    fn extend<T: IntoIterator<Item = W>>(&mut self, iter: T) {
1✔
916
        for value in iter {
17✔
917
            self.push(value);
×
918
        }
919
    }
920
}
921

922
/// Equality between bit-field vectors requires that the word is the same, the
923
/// bit width is the same, and the content is the same.
924
impl<W: Word, B: AsRef<[W]>, C: AsRef<[W]>> PartialEq<BitFieldVec<W, C>> for BitFieldVec<W, B> {
925
    fn eq(&self, other: &BitFieldVec<W, C>) -> bool {
3,771✔
926
        if self.bit_width() != other.bit_width() {
11,313✔
927
            return false;
×
928
        }
929
        if self.len() != other.len() {
×
930
            return false;
2✔
931
        }
932
        let bit_len = self.len() * self.bit_width();
×
933
        if self.bits.as_ref()[..bit_len / W::BITS] != other.bits.as_ref()[..bit_len / W::BITS] {
×
934
            return false;
×
935
        }
936

937
        let residual = bit_len % W::BITS;
×
938
        residual == 0
×
939
            || (self.bits.as_ref()[bit_len / W::BITS] ^ other.bits.as_ref()[bit_len / W::BITS])
3,526✔
940
                << (W::BITS - residual)
3,526✔
941
                == W::ZERO
3,526✔
942
    }
943
}
944

945
impl Eq for BitFieldVec {}
946

947
// Support for unchecked iterators
948

949
/// An [`UncheckedIterator`] over the values of a [`BitFieldVec`].
950
#[derive(Debug, Clone, MemDbg, MemSize)]
951
pub struct BitFieldVectorUncheckedIterator<'a, W, B>
952
where
953
    W: Word,
954
{
955
    vec: &'a BitFieldVec<W, B>,
956
    word_index: usize,
957
    window: W,
958
    fill: usize,
959
}
960

961
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVectorUncheckedIterator<'a, W, B> {
962
    fn new(vec: &'a BitFieldVec<W, B>, index: usize) -> Self {
636,431✔
963
        if index > vec.len() {
1,272,862✔
964
            panic!("Start index out of bounds: {} > {}", index, vec.len());
×
965
        }
966
        let (fill, word_index);
×
967
        let window = if index == vec.len() {
1,909,293✔
968
            word_index = 0;
304✔
969
            fill = 0;
304✔
970
            W::ZERO
304✔
971
        } else {
972
            let bit_offset = index * vec.bit_width;
636,127✔
973
            let bit_index = bit_offset % W::BITS;
×
974

975
            word_index = bit_offset / W::BITS;
×
976
            fill = W::BITS - bit_index;
×
977
            unsafe {
978
                // SAFETY: index has been check at the start and it is within bounds
979
                *vec.bits.as_ref().get_unchecked(word_index) >> bit_index
×
980
            }
981
        };
982
        Self {
983
            vec,
984
            word_index,
985
            window,
986
            fill,
987
        }
988
    }
989
}
990

991
impl<W: Word, B: AsRef<[W]>> crate::traits::UncheckedIterator
992
    for BitFieldVectorUncheckedIterator<'_, W, B>
993
{
994
    type Item = W;
995
    unsafe fn next_unchecked(&mut self) -> W {
32,658,945✔
996
        let bit_width = self.vec.bit_width;
65,317,890✔
997

998
        if self.fill >= bit_width {
32,658,945✔
999
            self.fill -= bit_width;
17,183,192✔
1000
            let res = self.window & self.vec.mask;
34,366,384✔
1001
            self.window >>= bit_width;
17,183,192✔
1002
            return res;
17,183,192✔
1003
        }
1004

1005
        let res = self.window;
×
1006
        self.word_index += 1;
×
1007
        self.window = *unsafe { self.vec.bits.as_ref().get_unchecked(self.word_index) };
×
1008
        let res = (res | (self.window << self.fill)) & self.vec.mask;
×
1009
        let used = bit_width - self.fill;
×
1010
        self.window >>= used;
×
1011
        self.fill = W::BITS - used;
×
1012
        res
×
1013
    }
1014
}
1015

1016
impl<'a, W: Word, B: AsRef<[W]>> IntoUncheckedIterator for &'a BitFieldVec<W, B> {
1017
    type Item = W;
1018
    type IntoUncheckedIter = BitFieldVectorUncheckedIterator<'a, W, B>;
1019
    fn into_unchecked_iter_from(self, from: usize) -> Self::IntoUncheckedIter {
323,221✔
1020
        BitFieldVectorUncheckedIterator::new(self, from)
969,663✔
1021
    }
1022
}
1023

1024
/// An [`UncheckedIterator`] moving backwards over the values of a [`BitFieldVec`].
1025
#[derive(Debug, Clone, MemDbg, MemSize)]
1026
pub struct BitFieldVectorReverseUncheckedIterator<'a, W: Word, B> {
1027
    vec: &'a BitFieldVec<W, B>,
1028
    word_index: usize,
1029
    window: W,
1030
    fill: usize,
1031
}
1032

1033
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVectorReverseUncheckedIterator<'a, W, B> {
1034
    fn new(vec: &'a BitFieldVec<W, B>, index: usize) -> Self {
315,992✔
1035
        if index > vec.len() {
631,984✔
1036
            panic!("Start index out of bounds: {} > {}", index, vec.len());
×
1037
        }
1038
        let (word_index, fill);
×
1039

1040
        let window = if index == 0 {
631,984✔
1041
            word_index = 0;
3,120✔
1042
            fill = 0;
3,120✔
1043
            W::ZERO
3,120✔
1044
        } else {
1045
            // We have to handle the case of zero bit width
1046
            let bit_offset = (index * vec.bit_width).saturating_sub(1);
312,872✔
1047
            let bit_index = bit_offset % W::BITS;
×
1048

1049
            word_index = bit_offset / W::BITS;
×
1050
            fill = bit_index + 1;
×
1051
            unsafe {
1052
                // SAFETY: index has been check at the start and it is within bounds
1053
                *vec.bits.as_ref().get_unchecked(word_index) << (W::BITS - fill)
×
1054
            }
1055
        };
1056
        Self {
1057
            vec,
1058
            word_index,
1059
            window,
1060
            fill,
1061
        }
1062
    }
1063
}
1064

1065
impl<W: Word, B: AsRef<[W]>> crate::traits::UncheckedIterator
1066
    for BitFieldVectorReverseUncheckedIterator<'_, W, B>
1067
{
1068
    type Item = W;
1069
    unsafe fn next_unchecked(&mut self) -> W {
15,451,222✔
1070
        let bit_width = self.vec.bit_width;
30,902,444✔
1071

1072
        if self.fill >= bit_width {
15,451,222✔
1073
            self.fill -= bit_width;
8,036,076✔
1074
            self.window = self.window.rotate_left(bit_width as u32);
16,072,152✔
1075
            return self.window & self.vec.mask;
8,036,076✔
1076
        }
1077

1078
        let mut res = self.window.rotate_left(self.fill as u32);
×
1079
        self.word_index -= 1;
×
1080
        self.window = *unsafe { self.vec.bits.as_ref().get_unchecked(self.word_index) };
×
1081
        let used = bit_width - self.fill;
×
1082
        res = ((res << used) | (self.window >> (W::BITS - used))) & self.vec.mask;
×
1083
        self.window <<= used;
×
1084
        self.fill = W::BITS - used;
×
1085
        res
×
1086
    }
1087
}
1088

1089
impl<'a, W: Word, B: AsRef<[W]>> IntoReverseUncheckedIterator for &'a BitFieldVec<W, B> {
1090
    type Item = W;
1091
    type IntoRevUncheckedIter = BitFieldVectorReverseUncheckedIterator<'a, W, B>;
1092

1093
    fn into_rev_unchecked_iter(self) -> Self::IntoRevUncheckedIter {
×
1094
        BitFieldVectorReverseUncheckedIterator::new(self, self.len())
×
1095
    }
1096

1097
    fn into_rev_unchecked_iter_from(self, from: usize) -> Self::IntoRevUncheckedIter {
315,992✔
1098
        BitFieldVectorReverseUncheckedIterator::new(self, from)
947,976✔
1099
    }
1100
}
1101

1102
/// An [`Iterator`] over the values of a [`BitFieldVec`].
1103
#[derive(Debug, Clone, MemDbg, MemSize)]
1104
pub struct BitFieldVecIterator<'a, W, B>
1105
where
1106
    W: Word,
1107
{
1108
    unchecked: BitFieldVectorUncheckedIterator<'a, W, B>,
1109
    range: core::ops::Range<usize>,
1110
}
1111

1112
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVecIterator<'a, W, B> {
1113
    fn new(vec: &'a BitFieldVec<W, B>, from: usize) -> Self {
313,210✔
1114
        let len = vec.len();
939,630✔
1115
        if from > len {
313,210✔
1116
            panic!(
×
1117
                "Start index out of bounds: {} >
×
1118
            {}",
×
1119
                from, len
×
1120
            );
1121
        }
1122
        Self {
1123
            unchecked: BitFieldVectorUncheckedIterator::new(vec, from),
×
1124
            range: from..len,
×
1125
        }
1126
    }
1127
}
1128

1129
impl<W: Word, B: AsRef<[W]>> Iterator for BitFieldVecIterator<'_, W, B> {
1130
    type Item = W;
1131
    fn next(&mut self) -> Option<Self::Item> {
16,189,263✔
1132
        if self.range.is_empty() {
32,378,526✔
1133
            return None;
313,204✔
1134
        }
1135
        // SAFETY: index has just been checked.
1136
        let res = unsafe { self.unchecked.next_unchecked() };
×
1137
        self.range.start += 1;
×
1138
        Some(res)
×
1139
    }
1140

1141
    #[inline(always)]
1142
    fn size_hint(&self) -> (usize, Option<usize>) {
×
1143
        (self.range.len(), Some(self.range.len()))
×
1144
    }
1145
}
1146

1147
impl<W: Word, B: AsRef<[W]>> ExactSizeIterator for BitFieldVecIterator<'_, W, B> {
1148
    #[inline(always)]
1149
    fn len(&self) -> usize {
×
1150
        self.range.len()
×
1151
    }
1152
}
1153

1154
/// This implements iteration from the end, but its slower than the forward iteration
1155
/// as here we do a random access, while in the forward iterator we do a sequential access
1156
/// and we keep a buffer of `W::BITS` bits to speed up the iteration.
1157
///
1158
/// If needed we could also keep a buffer from the end, but the logic would be more complex
1159
/// and potentially slower.
1160
impl<W: Word, B: AsRef<[W]>> DoubleEndedIterator for BitFieldVecIterator<'_, W, B> {
1161
    fn next_back(&mut self) -> Option<Self::Item> {
×
1162
        if self.range.is_empty() {
×
1163
            return None;
×
1164
        }
1165
        // SAFETY: index has just been checked.
1166
        let res = unsafe { self.unchecked.next_unchecked() };
×
1167
        self.range.end -= 1;
×
1168
        Some(res)
×
1169
    }
1170
}
1171

1172
impl<'a, W: Word, B: AsRef<[W]>> IntoIterator for &'a BitFieldVec<W, B> {
1173
    type Item = W;
1174
    type IntoIter = BitFieldVecIterator<'a, W, B>;
1175

1176
    fn into_iter(self) -> Self::IntoIter {
2✔
1177
        BitFieldVecIterator::new(self, 0)
4✔
1178
    }
1179
}
1180

1181
impl<'a, W: Word, B: AsRef<[W]>> IntoIteratorFrom for &'a BitFieldVec<W, B> {
1182
    type IntoIterFrom = BitFieldVecIterator<'a, W, B>;
1183

1184
    fn into_iter_from(self, from: usize) -> Self::IntoIter {
×
1185
        BitFieldVecIterator::new(self, from)
×
1186
    }
1187
}
1188

1189
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {
1190
    pub fn iter_from(&self, from: usize) -> BitFieldVecIterator<'_, W, B> {
313,208✔
1191
        BitFieldVecIterator::new(self, from)
939,624✔
1192
    }
1193

1194
    pub fn iter(&self) -> BitFieldVecIterator<'_, W, B> {
1,202✔
1195
        self.iter_from(0)
2,404✔
1196
    }
1197
}
1198
/// A tentatively thread-safe vector of bit fields of fixed width.
1199
///
1200
/// This implementation provides some concurrency guarantees, albeit not
1201
/// full-fledged thread safety: more precisely, we can guarantee thread-safety
1202
/// if the bit width is a power of two; otherwise, concurrent writes to values
1203
/// that cross word boundaries might end up in different threads succeeding in
1204
/// writing only part of a value. If the user can guarantee that no two threads
1205
/// ever write to the same boundary-crossing value, then no race condition can
1206
/// happen.
1207
///
1208
/// Note that the trait [`AtomicHelper`] can be used to provide a more
1209
/// convenient naming for some methods.
1210

1211
#[derive(Debug, Clone, Hash, MemDbg, MemSize)]
1212
#[cfg_attr(feature = "epserde", derive(epserde::Epserde))]
1213
pub struct AtomicBitFieldVec<W: Word + IntoAtomic = usize, B = Vec<<W as IntoAtomic>::AtomicType>> {
1214
    /// The underlying storage.
1215
    bits: B,
1216
    /// The bit width of the values stored in the vector.
1217
    bit_width: usize,
1218
    /// A mask with its lowest `bit_width` bits set to one.
1219
    mask: W,
1220
    /// The length of the vector.
1221
    len: usize,
1222
}
1223

1224
impl<W: Word + IntoAtomic, B> AtomicBitFieldVec<W, B> {
1225
    /// # Safety
1226
    /// `len` * `bit_width` must be between 0 (included) the number of
1227
    /// bits in `bits` (included).
1228
    #[inline(always)]
1229
    pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
184✔
1230
        Self {
1231
            bits,
1232
            bit_width,
1233
            mask: mask(bit_width),
368✔
1234
            len,
1235
        }
1236
    }
1237

1238
    #[inline(always)]
1239
    pub fn into_raw_parts(self) -> (B, usize, usize) {
184✔
1240
        (self.bits, self.bit_width, self.len)
368✔
1241
    }
1242

1243
    /// Returns the mask used to extract values from the vector.
1244
    /// This will keep the lowest `bit_width` bits.
1245
    pub fn mask(&self) -> W {
184✔
1246
        self.mask
184✔
1247
    }
1248
}
1249

1250
impl<W: Word + IntoAtomic, B: AsRef<[W::AtomicType]>> AtomicBitFieldVec<W, B> {
1251
    /// Returns the backend of the `AtomicBitFieldVec` as a slice of `A`, where `A` is the
1252
    /// atomic variant of `W`.
1253
    pub fn as_slice(&self) -> &[W::AtomicType] {
4✔
1254
        self.bits.as_ref()
4✔
1255
    }
1256
}
1257

1258
impl<W: Word + IntoAtomic> AtomicBitFieldVec<W>
1259
where
1260
    W::AtomicType: AtomicUnsignedInt,
1261
{
1262
    pub fn new(bit_width: usize, len: usize) -> AtomicBitFieldVec<W> {
491✔
1263
        // we need at least two words to avoid branches in the gets
1264
        let n_of_words = Ord::max(1, (len * bit_width).div_ceil(W::BITS));
1,964✔
1265
        AtomicBitFieldVec::<W> {
1266
            bits: (0..n_of_words)
491✔
1267
                .map(|_| W::AtomicType::new(W::ZERO))
1268
                .collect(),
1269
            bit_width,
1270
            mask: mask(bit_width),
982✔
1271
            len,
1272
        }
1273
    }
1274
}
1275

1276
impl<W: Word + IntoAtomic, B: AsRef<[W::AtomicType]>> AtomicBitFieldVec<W, B>
1277
where
1278
    W::AtomicType: AtomicUnsignedInt + AsBytes,
1279
{
1280
    /// Writes zeros in all values.
1281
    #[deprecated(since = "0.4.4", note = "reset is deprecated in favor of reset_atomic")]
1282
    pub fn reset(&mut self, ordering: Ordering) {
×
1283
        self.reset_atomic(ordering)
×
1284
    }
1285
}
1286

1287
impl<W: Word + IntoAtomic, B> BitFieldSliceCore<W::AtomicType> for AtomicBitFieldVec<W, B> {
1288
    #[inline(always)]
1289
    fn bit_width(&self) -> usize {
184✔
1290
        debug_assert!(self.bit_width <= W::BITS);
368✔
1291
        self.bit_width
184✔
1292
    }
1293

1294
    #[inline(always)]
1295
    fn len(&self) -> usize {
404,824✔
1296
        self.len
404,824✔
1297
    }
1298
}
1299

1300
impl<W: Word + IntoAtomic, T: AsRef<[W::AtomicType]>> AtomicBitFieldSlice<W>
1301
    for AtomicBitFieldVec<W, T>
1302
where
1303
    W::AtomicType: AtomicUnsignedInt + AsBytes,
1304
{
1305
    #[inline]
1306
    unsafe fn get_atomic_unchecked(&self, index: usize, order: Ordering) -> W {
404,825✔
1307
        let pos = index * self.bit_width;
809,650✔
1308
        let word_index = pos / W::BITS;
809,650✔
1309
        let bit_index = pos % W::BITS;
809,650✔
1310
        let bits = self.bits.as_ref();
1,214,475✔
1311

1312
        unsafe {
1313
            if bit_index + self.bit_width <= W::BITS {
404,825✔
1314
                (bits.get_unchecked(word_index).load(order) >> bit_index) & self.mask
939,176✔
1315
            } else {
1316
                ((bits.get_unchecked(word_index).load(order) >> bit_index)
170,031✔
1317
                    | (bits.get_unchecked(word_index + 1).load(order) << (W::BITS - bit_index)))
170,031✔
1318
                    & self.mask
170,031✔
1319
            }
1320
        }
1321
    }
1322

1323
    // We reimplement set as we have the mask in the structure.
1324

1325
    /// Sets the element of the slice at the specified index.
1326
    ///
1327
    /// May panic if the index is not in in [0..[len](`BitFieldSliceCore::len`))
1328
    /// or the value does not fit in [`BitFieldSliceCore::bit_width`] bits.
1329
    #[inline(always)]
1330
    fn set_atomic(&self, index: usize, value: W, order: Ordering) {
56,026✔
1331
        panic_if_out_of_bounds!(index, self.len);
168,078✔
1332
        panic_if_value!(value, self.mask, self.bit_width);
168,075✔
1333
        unsafe {
1334
            self.set_atomic_unchecked(index, value, order);
56,024✔
1335
        }
1336
    }
1337

1338
    #[inline]
1339
    unsafe fn set_atomic_unchecked(&self, index: usize, value: W, order: Ordering) {
451,171✔
1340
        unsafe {
1341
            debug_assert!(self.bit_width != W::BITS);
902,342✔
1342
            let pos = index * self.bit_width;
902,342✔
1343
            let word_index = pos / W::BITS;
902,342✔
1344
            let bit_index = pos % W::BITS;
902,342✔
1345
            let bits = self.bits.as_ref();
1,353,513✔
1346

1347
            if bit_index + self.bit_width <= W::BITS {
451,171✔
1348
                // this is consistent
1349
                let mut current = bits.get_unchecked(word_index).load(order);
1,300,230✔
1350
                loop {
×
1351
                    let mut new = current;
520,092✔
1352
                    new &= !(self.mask << bit_index);
520,092✔
1353
                    new |= value << bit_index;
520,092✔
1354

1355
                    match bits
780,138✔
1356
                        .get_unchecked(word_index)
520,092✔
1357
                        .compare_exchange(current, new, order, order)
1,040,184✔
1358
                    {
1359
                        Ok(_) => break,
260,046✔
UNCOV
1360
                        Err(e) => current = e,
×
1361
                    }
1362
                }
1363
            } else {
1364
                let mut word = bits.get_unchecked(word_index).load(order);
191,125✔
1365
                // try to wait for the other thread to finish
1366
                fence(Ordering::Acquire);
×
1367
                loop {
×
1368
                    let mut new = word;
382,250✔
1369
                    new &= (W::ONE << bit_index) - W::ONE;
191,125✔
1370
                    new |= value << bit_index;
382,250✔
1371

1372
                    match bits
573,375✔
1373
                        .get_unchecked(word_index)
382,250✔
1374
                        .compare_exchange(word, new, order, order)
764,500✔
1375
                    {
1376
                        Ok(_) => break,
×
1377
                        Err(e) => word = e,
×
1378
                    }
1379
                }
1380
                fence(Ordering::Release);
×
1381

1382
                // ensures that the compiler does not reorder the two atomic operations
1383
                // this should increase the probability of having consistency
1384
                // between two concurrent writes as they will both execute the set
1385
                // of the bits in the same order, and the release / acquire fence
1386
                // should try to synchronize the threads as much as possible
1387
                compiler_fence(Ordering::SeqCst);
×
1388

1389
                let mut word = bits.get_unchecked(word_index + 1).load(order);
×
1390
                fence(Ordering::Acquire);
×
1391
                loop {
×
1392
                    let mut new = word;
382,250✔
1393
                    new &= !(self.mask >> (W::BITS - bit_index));
382,250✔
1394
                    new |= value >> (W::BITS - bit_index);
382,250✔
1395

1396
                    match bits
573,375✔
1397
                        .get_unchecked(word_index + 1)
382,250✔
1398
                        .compare_exchange(word, new, order, order)
764,500✔
1399
                    {
1400
                        Ok(_) => break,
×
1401
                        Err(e) => word = e,
×
1402
                    }
1403
                }
1404
                fence(Ordering::Release);
×
1405
            }
1406
        }
1407
    }
1408

1409
    fn reset_atomic(&mut self, ordering: Ordering) {
1✔
1410
        let bit_len = self.len * self.bit_width;
2✔
1411
        let full_words = bit_len / W::BITS;
2✔
1412
        let residual = bit_len % W::BITS;
2✔
1413
        let bits = self.bits.as_ref();
3✔
1414
        bits[..full_words]
1✔
1415
            .iter()
1416
            .for_each(|x| x.store(W::ZERO, ordering));
22✔
1417
        if residual != 0 {
2✔
1418
            bits[full_words].fetch_and(W::MAX << residual, ordering);
3✔
1419
        }
1420
    }
1421

1422
    #[cfg(feature = "rayon")]
1423
    fn par_reset_atomic(&mut self, ordering: Ordering) {
1✔
1424
        let bit_len = self.len * self.bit_width;
2✔
1425
        let full_words = bit_len / W::BITS;
2✔
1426
        let residual = bit_len % W::BITS;
2✔
1427
        let bits = self.bits.as_ref();
3✔
1428
        bits[..full_words]
1✔
1429
            .par_iter()
1430
            .with_min_len(RAYON_MIN_LEN)
1✔
1431
            .for_each(|x| x.store(W::ZERO, ordering));
22✔
1432
        if residual != 0 {
2✔
1433
            bits[full_words].fetch_and(W::MAX << residual, ordering);
3✔
1434
        }
1435
    }
1436
}
1437

1438
// Conversions
1439

1440
impl<W: Word + IntoAtomic> From<AtomicBitFieldVec<W, Vec<W::AtomicType>>>
1441
    for BitFieldVec<W, Vec<W>>
1442
{
1443
    #[inline]
1444
    fn from(value: AtomicBitFieldVec<W, Vec<W::AtomicType>>) -> Self {
470✔
1445
        BitFieldVec {
1446
            bits: transmute_vec_from_atomic(value.bits),
1,410✔
1447
            len: value.len,
940✔
1448
            bit_width: value.bit_width,
470✔
1449
            mask: value.mask,
470✔
1450
        }
1451
    }
1452
}
1453

1454
impl<W: Word + IntoAtomic> From<AtomicBitFieldVec<W, Box<[W::AtomicType]>>>
1455
    for BitFieldVec<W, Box<[W]>>
1456
{
1457
    #[inline]
1458
    fn from(value: AtomicBitFieldVec<W, Box<[W::AtomicType]>>) -> Self {
1✔
1459
        BitFieldVec {
1460
            bits: transmute_boxed_slice_from_atomic(value.bits),
3✔
1461

1462
            len: value.len,
2✔
1463
            bit_width: value.bit_width,
1✔
1464
            mask: value.mask,
1✔
1465
        }
1466
    }
1467
}
1468

1469
impl<'a, W: Word + IntoAtomic> From<AtomicBitFieldVec<W, &'a [W::AtomicType]>>
1470
    for BitFieldVec<W, &'a [W]>
1471
{
1472
    #[inline]
1473
    fn from(value: AtomicBitFieldVec<W, &'a [W::AtomicType]>) -> Self {
1✔
1474
        BitFieldVec {
1475
            bits: unsafe { core::mem::transmute::<&'a [W::AtomicType], &'a [W]>(value.bits) },
2✔
1476
            len: value.len,
2✔
1477
            bit_width: value.bit_width,
1✔
1478
            mask: value.mask,
1✔
1479
        }
1480
    }
1481
}
1482

1483
impl<'a, W: Word + IntoAtomic> From<AtomicBitFieldVec<W, &'a mut [W::AtomicType]>>
1484
    for BitFieldVec<W, &'a mut [W]>
1485
{
1486
    #[inline]
1487
    fn from(value: AtomicBitFieldVec<W, &'a mut [W::AtomicType]>) -> Self {
1✔
1488
        BitFieldVec {
1489
            bits: unsafe {
1✔
1490
                core::mem::transmute::<&'a mut [W::AtomicType], &'a mut [W]>(value.bits)
1491
            },
1492
            len: value.len,
2✔
1493
            bit_width: value.bit_width,
1✔
1494
            mask: value.mask,
1✔
1495
        }
1496
    }
1497
}
1498

1499
impl<W: Word + IntoAtomic> From<BitFieldVec<W, Vec<W>>>
1500
    for AtomicBitFieldVec<W, Vec<W::AtomicType>>
1501
{
1502
    #[inline]
1503
    fn from(value: BitFieldVec<W, Vec<W>>) -> Self {
369✔
1504
        AtomicBitFieldVec {
1505
            bits: transmute_vec_into_atomic(value.bits),
1,107✔
1506

1507
            len: value.len,
738✔
1508
            bit_width: value.bit_width,
369✔
1509
            mask: value.mask,
369✔
1510
        }
1511
    }
1512
}
1513

1514
impl<W: Word + IntoAtomic> From<BitFieldVec<W, Box<[W]>>>
1515
    for AtomicBitFieldVec<W, Box<[W::AtomicType]>>
1516
{
1517
    #[inline]
1518
    fn from(value: BitFieldVec<W, Box<[W]>>) -> Self {
1✔
1519
        AtomicBitFieldVec {
1520
            bits: transmute_boxed_slice_into_atomic(value.bits),
3✔
1521
            len: value.len,
2✔
1522
            bit_width: value.bit_width,
1✔
1523
            mask: value.mask,
1✔
1524
        }
1525
    }
1526
}
1527

1528
impl<'a, W: Word + IntoAtomic> TryFrom<BitFieldVec<W, &'a [W]>>
1529
    for AtomicBitFieldVec<W, &'a [W::AtomicType]>
1530
{
1531
    type Error = CannotCastToAtomicError<W>;
1532

1533
    #[inline]
1534
    fn try_from(value: BitFieldVec<W, &'a [W]>) -> Result<Self, Self::Error> {
1✔
1535
        if core::mem::align_of::<W::AtomicType>() != core::mem::align_of::<W>() {
1✔
1536
            return Err(CannotCastToAtomicError::default());
×
1537
        }
1538
        Ok(AtomicBitFieldVec {
×
1539
            bits: unsafe { core::mem::transmute::<&'a [W], &'a [W::AtomicType]>(value.bits) },
×
1540
            len: value.len,
×
1541
            bit_width: value.bit_width,
×
1542
            mask: value.mask,
×
1543
        })
1544
    }
1545
}
1546

1547
impl<'a, W: Word + IntoAtomic> TryFrom<BitFieldVec<W, &'a mut [W]>>
1548
    for AtomicBitFieldVec<W, &'a mut [W::AtomicType]>
1549
{
1550
    type Error = CannotCastToAtomicError<W>;
1551

1552
    #[inline]
1553
    fn try_from(value: BitFieldVec<W, &'a mut [W]>) -> Result<Self, Self::Error> {
1✔
1554
        if core::mem::align_of::<W::AtomicType>() != core::mem::align_of::<W>() {
1✔
1555
            return Err(CannotCastToAtomicError::default());
×
1556
        }
1557
        Ok(AtomicBitFieldVec {
×
1558
            bits: unsafe {
×
1559
                core::mem::transmute::<&'a mut [W], &'a mut [W::AtomicType]>(value.bits)
×
1560
            },
1561
            len: value.len,
×
1562
            bit_width: value.bit_width,
×
1563
            mask: value.mask,
×
1564
        })
1565
    }
1566
}
1567

1568
impl<W: Word> From<BitFieldVec<W, Vec<W>>> for BitFieldVec<W, Box<[W]>> {
1569
    fn from(value: BitFieldVec<W, Vec<W>>) -> Self {
126✔
1570
        BitFieldVec {
1571
            bits: value.bits.into_boxed_slice(),
378✔
1572
            len: value.len,
252✔
1573
            bit_width: value.bit_width,
126✔
1574
            mask: value.mask,
126✔
1575
        }
1576
    }
1577
}
1578

1579
impl<W: Word> From<BitFieldVec<W, Box<[W]>>> for BitFieldVec<W, Vec<W>> {
1580
    fn from(value: BitFieldVec<W, Box<[W]>>) -> Self {
1✔
1581
        BitFieldVec {
1582
            bits: value.bits.into_vec(),
3✔
1583
            len: value.len,
2✔
1584
            bit_width: value.bit_width,
1✔
1585
            mask: value.mask,
1✔
1586
        }
1587
    }
1588
}
1589

1590
impl<W: Word, B: AsRef<[W]>> value_traits::slices::SliceByValue for BitFieldVec<W, B> {
1591
    type Value = W;
1592

1593
    fn len(&self) -> usize {
×
1594
        <Self as BitFieldSliceCore<W>>::len(self)
×
1595
    }
1596
}
1597

1598
impl<W: Word, B: AsRef<[W]>> value_traits::slices::SliceByValueGet for BitFieldVec<W, B> {
1599
    unsafe fn get_value_unchecked(&self, index: usize) -> Self::Value {
×
1600
        unsafe { <Self as BitFieldSlice<W>>::get_unchecked(self, index) }
×
1601
    }
1602
}
1603

1604
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> value_traits::slices::SliceByValueSet
1605
    for BitFieldVec<W, B>
1606
{
1607
    unsafe fn set_value_unchecked(&mut self, index: usize, value: Self::Value) {
×
1608
        unsafe { <Self as BitFieldSliceMut<W>>::set_unchecked(self, index, value) };
×
1609
    }
1610
}
1611

1612
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> value_traits::slices::SliceByValueRepl
1613
    for BitFieldVec<W, B>
1614
{
1615
    unsafe fn replace_value_unchecked(&mut self, index: usize, value: Self::Value) -> Self::Value {
×
1616
        unsafe { self.replace_unchecked(index, value) }
×
1617
    }
1618
}
1619

1620
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueGat<'a> for BitFieldVec<W, B> {
1621
    type Item = W;
1622
    type Iter = BitFieldVecIterator<'a, W, B>;
1623
}
1624

1625
impl<W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValue for BitFieldVec<W, B> {
1626
    fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
×
1627
        self.iter_from(0)
×
1628
    }
1629
}
1630

1631
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFromGat<'a>
1632
    for BitFieldVec<W, B>
1633
{
1634
    type Item = W;
1635
    type IterFrom = BitFieldVecIterator<'a, W, B>;
1636
}
1637

1638
impl<W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFrom for BitFieldVec<W, B> {
1639
    fn iter_value_from(
×
1640
        &self,
1641
        from: usize,
1642
    ) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
1643
        self.iter_from(from)
×
1644
    }
1645
}
1646

1647
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueGat<'a>
1648
    for BitFieldVecSubsliceImpl<'b, W, B>
1649
{
1650
    type Item = W;
1651
    type Iter = BitFieldVecIterator<'a, W, B>;
1652
}
1653

1654
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValue
1655
    for BitFieldVecSubsliceImpl<'a, W, B>
1656
{
1657
    fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
×
1658
        self.slice.iter_from(0)
×
1659
    }
1660
}
1661

1662
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFromGat<'a>
1663
    for BitFieldVecSubsliceImpl<'b, W, B>
1664
{
1665
    type Item = W;
1666
    type IterFrom = BitFieldVecIterator<'a, W, B>;
1667
}
1668

1669
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFrom
1670
    for BitFieldVecSubsliceImpl<'a, W, B>
1671
{
1672
    fn iter_value_from(
×
1673
        &self,
1674
        from: usize,
1675
    ) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
1676
        self.slice.iter_from(from)
×
1677
    }
1678
}
1679

1680
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueGat<'a>
1681
    for BitFieldVecSubsliceImplMut<'b, W, B>
1682
{
1683
    type Item = W;
1684
    type Iter = BitFieldVecIterator<'a, W, B>;
1685
}
1686

1687
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValue
1688
    for BitFieldVecSubsliceImplMut<'a, W, B>
1689
{
1690
    fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
×
1691
        self.slice.iter_from(0)
×
1692
    }
1693
}
1694

1695
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFromGat<'a>
1696
    for BitFieldVecSubsliceImplMut<'b, W, B>
1697
{
1698
    type Item = W;
1699
    type IterFrom = BitFieldVecIterator<'a, W, B>;
1700
}
1701

1702
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFrom
1703
    for BitFieldVecSubsliceImplMut<'a, W, B>
1704
{
1705
    fn iter_value_from(
×
1706
        &self,
1707
        from: usize,
1708
    ) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
1709
        self.slice.iter_from(from)
×
1710
    }
1711
}
1712

1713
#[cfg(test)]
1714
mod tests {
1715
    use super::*;
1716

1717
    #[test]
1718
    fn test_with_capacity() {
1719
        let mut b = BitFieldVec::<usize, _>::with_capacity(10, 100);
1720
        let capacity = b.bits.capacity();
1721
        for _ in 0..100 {
1722
            b.push(0);
1723
        }
1724
        assert_eq!(b.bits.capacity(), capacity);
1725
    }
1726

1727
    fn copy<W: Word, B: AsRef<[W]>, C: AsRef<[W]> + AsMut<[W]>>(
1728
        source: &BitFieldVec<W, B>,
1729
        from: usize,
1730
        dest: &mut BitFieldVec<W, C>,
1731
        to: usize,
1732
        len: usize,
1733
    ) {
1734
        let len = Ord::min(Ord::min(len, dest.len - to), source.len - from);
1735
        for i in 0..len {
1736
            dest.set(to + i, source.get(from + i));
1737
        }
1738
    }
1739

1740
    #[test]
1741
    fn test_copy() {
1742
        for src_pattern in 0..8 {
1743
            for dst_pattern in 0..8 {
1744
                // if from_first_word == from_last_word && to_first_word == to_last_word
1745
                let source = bit_field_vec![3 => src_pattern; 100];
1746
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1747
                let mut dest_expected = dest_actual.clone();
1748
                source.copy(1, &mut dest_actual, 2, 10);
1749
                copy(&source, 1, &mut dest_expected, 2, 10);
1750
                assert_eq!(dest_actual, dest_expected);
1751
                // else if from_first_word == from_last_word
1752
                let source = bit_field_vec![3 => src_pattern; 100];
1753
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1754
                let mut dest_expected = dest_actual.clone();
1755
                source.copy(1, &mut dest_actual, 20, 10);
1756
                copy(&source, 1, &mut dest_expected, 20, 10);
1757
                assert_eq!(dest_actual, dest_expected);
1758
                // else if to_first_word == to_last_word
1759
                let source = bit_field_vec![3 => src_pattern; 100];
1760
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1761
                let mut dest_expected = dest_actual.clone();
1762
                source.copy(20, &mut dest_actual, 1, 10);
1763
                copy(&source, 20, &mut dest_expected, 1, 10);
1764
                assert_eq!(dest_actual, dest_expected);
1765
                // else if src_bit == dest_bit (residual = 1)
1766
                let source = bit_field_vec![3 => src_pattern; 1000];
1767
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1768
                let mut dest_expected = dest_actual.clone();
1769
                source.copy(3, &mut dest_actual, 3 + 3 * 128, 40);
1770
                copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 40);
1771
                assert_eq!(dest_actual, dest_expected);
1772
                // else if src_bit == dest_bit (residual = 64)
1773
                let source = bit_field_vec![3 => src_pattern; 1000];
1774
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1775
                let mut dest_expected = dest_actual.clone();
1776
                source.copy(3, &mut dest_actual, 3 + 3 * 128, 61);
1777
                copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 61);
1778
                assert_eq!(dest_actual, dest_expected);
1779
                // else if src_bit < dest_bit (residual = 1)
1780
                let source = bit_field_vec![3 => src_pattern; 1000];
1781
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1782
                let mut dest_expected = dest_actual.clone();
1783
                source.copy(3, &mut dest_actual, 7 + 64 * 3, 40);
1784
                copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40);
1785
                assert_eq!(dest_actual, dest_expected);
1786
                // else if src_bit < dest_bit (residual = 64)
1787
                let source = bit_field_vec![3 => src_pattern; 1000];
1788
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1789
                let mut dest_expected = dest_actual.clone();
1790
                source.copy(3, &mut dest_actual, 7 + 64 * 3, 40 + 17);
1791
                copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40 + 17);
1792
                assert_eq!(dest_actual, dest_expected);
1793
                // else if src_bit > dest_bit (residual = 1)
1794
                let source = bit_field_vec![3 => src_pattern; 1000];
1795
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1796
                let mut dest_expected = dest_actual.clone();
1797
                source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 64);
1798
                copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 64);
1799
                assert_eq!(dest_actual, dest_expected);
1800
                // else if src_bit > dest_bit (residual = 64)
1801
                let source = bit_field_vec![3 => src_pattern; 1000];
1802
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1803
                let mut dest_expected = dest_actual.clone();
1804
                source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 21 + 64);
1805
                copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 21 + 64);
1806
                assert_eq!(dest_actual, dest_expected);
1807
            }
1808
        }
1809
    }
1810
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc