• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vigna / sux-rs / 17841719366

18 Sep 2025 09:21PM UTC coverage: 64.717% (-2.2%) from 66.912%
17841719366

push

github

vigna
Completed delegations for custom iterators; fixed infinite recursion when delegating traits to MemCase

0 of 7 new or added lines in 3 files covered. (0.0%)

909 existing lines in 21 files now uncovered.

3674 of 5677 relevant lines covered (64.72%)

117564551.11 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

66.35
/src/bits/bit_field_vec.rs
1
/*
2
 * SPDX-FileCopyrightText: 2023 Inria
3
 * SPDX-FileCopyrightText: 2023 Sebastiano Vigna
4
 *
5
 * SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
6
 */
7

8
//! Vectors of values of fixed bit width.
9
//!
10
//! Elements are stored contiguously, with no padding bits (in particular,
11
//! unless the bit width is a power of two some elements will be stored across
12
//! word boundaries).
13
//!
14
//! There are two flavors: [`BitFieldVec`], a mutable bit-field vector, and
15
//! [`AtomicBitFieldVec`], a mutable, thread-safe bit-field vector.
16
//!
17
//! These flavors depends on a backend, and presently we provide, given an
18
//! unsigned integer type `W` or an unsigned atomic integer type `A`:
19
//!
20
//! - `BitFieldVec<Vec<T>>`: a mutable, growable and resizable bit-field vector;
21
//! - `BitFieldVec<AsRef<[W]>>`: an immutable bit-field vector, useful for
22
//!   [ε-serde](epserde) support;
23
//! - `BitFieldVec<AsRef<[W]> + AsMut<[W]>>`: a mutable (but not resizable) bit
24
//!   vector;
25
//! - `AtomicBitFieldVec<AsRef<[A]>>`: a partially thread-safe, mutable (but not
26
//!   resizable) bit-field vector.
27
//!
28
//! More generally, the underlying type must satisfy the trait [`Word`] for
29
//! [`BitFieldVec`] and additionally [`IntoAtomic`] for [`AtomicBitFieldVec`].
30
//! A blanket implementation exposes slices of elements of type `W` as bit-field
31
//! vectors of width `W::BITS`, analogously for atomic types `A`.
32
//!
33
//! Note that nothing is assumed about the content of the backend outside the
34
//! bits of the vector. Moreover, the content of the backend outside of the
35
//! vector is never modified by the methods of this structure.
36
//!
37
//! For high-speed unchecked scanning, we implement [`IntoUncheckedIterator`]
38
//! and [`IntoReverseUncheckedIterator`] on a reference to this type. The are
39
//! used, for example, to provide
40
//! [predecessor](crate::traits::indexed_dict::Pred) and
41
//! [successor](crate::traits::indexed_dict::Succ) primitives for
42
//! [`EliasFano`].
43
//!
44
//! # Low-level support
45
//!
46
//! The methods [`address_of`](BitFieldVec::addr_of) and
47
//! [`get_unaligned`](BitFieldVec::get_unaligned) can be used to manually
48
//! prefetch parts of the data structure, or read values using unaligned read,
49
//! when the bit width makes it possible.
50
//!
51
//! # Examples
52
//! ```
53
//! # use sux::prelude::*;
54
//! # use bit_field_slice::*;
55
//! // Bit field vector of bit width 5 and length 10, all entries set to zero
56
//! let mut b = <BitFieldVec<usize>>::new(5, 10);
57
//! assert_eq!(b.len(), 10);
58
//! assert_eq!(b.bit_width(), 5);
59
//! b.set(0, 3);
60
//! assert_eq!(b.get(0), 3);
61
//!
62
//! // Empty bit field vector of bit width 20 with capacity 10
63
//! let mut b = <BitFieldVec<usize>>::with_capacity(20, 10);
64
//! assert_eq!(b.len(), 0);
65
//! assert_eq!(b.bit_width(), 20);
66
//! b.push(20);
67
//! assert_eq!(b.len(), 1);
68
//! assert_eq!(b.get(0), 20);
69
//! assert_eq!(b.pop(), Some(20));
70
//!
71
//! // Convenience macro
72
//! let b = bit_field_vec![10; 4, 500, 2, 0, 1];
73
//! assert_eq!(b.len(), 5);
74
//! assert_eq!(b.bit_width(), 10);
75
//! assert_eq!(b.get(0), 4);
76
//! assert_eq!(b.get(1), 500);
77
//! assert_eq!(b.get(2), 2);
78
//! assert_eq!(b.get(3), 0);
79
//! assert_eq!(b.get(4), 1);
80
//! ```
81

82
#[cfg(feature = "rayon")]
83
use crate::RAYON_MIN_LEN;
84
use crate::prelude::{bit_field_slice::*, *};
85
use crate::utils::{
86
    CannotCastToAtomicError, transmute_boxed_slice_from_atomic, transmute_boxed_slice_into_atomic,
87
    transmute_vec_from_atomic, transmute_vec_into_atomic,
88
};
89
use crate::{panic_if_out_of_bounds, panic_if_value};
90
use anyhow::{Result, bail};
91
use common_traits::{
92
    AsBytes, Atomic, AtomicInteger, AtomicUnsignedInt, CastableInto, IntoAtomic, invariant_eq,
93
};
94
use epserde::*;
95
use mem_dbg::*;
96
#[cfg(feature = "rayon")]
97
use rayon::prelude::*;
98
use std::sync::atomic::{Ordering, compiler_fence, fence};
99

100
/// Convenient, [`vec!`]-like macro to initialize `usize`-based bit-field
101
/// vectors.
102
///
103
/// Note that the syntax `bit_field_vec![width; length; value]` that has been
104
/// deprecated in favor of `bit_field_vec![width => value; length]`, so that
105
/// value and length are in the same order as in [`vec!`].
106
///
107
/// # Examples
108
///
109
/// ```
110
/// # use sux::prelude::*;
111
/// # use bit_field_slice::*;
112
/// // Empty bit field vector of bit width 5
113
/// let b = bit_field_vec![5];
114
/// assert_eq!(b.len(), 0);
115
/// assert_eq!(b.bit_width(), 5);
116
///
117
/// // 10 values of bit width 6, all set to 3
118
/// let b = bit_field_vec![6 => 3; 10];
119
/// assert_eq!(b.len(), 10);
120
/// assert_eq!(b.bit_width(), 6);
121
/// assert_eq!(b.iter().all(|x| x == 3), true);
122
///
123
/// // List of values of bit width 10
124
/// let b = bit_field_vec![10; 4, 500, 2, 0, 1];
125
/// assert_eq!(b.len(), 5);
126
/// assert_eq!(b.bit_width(), 10);
127
/// assert_eq!(b.get(0), 4);
128
/// assert_eq!(b.get(1), 500);
129
/// assert_eq!(b.get(2), 2);
130
/// assert_eq!(b.get(3), 0);
131
/// assert_eq!(b.get(4), 1);
132
/// ```
133
#[macro_export]
134
macro_rules! bit_field_vec {
135
    ($w:expr) => {
136
        $crate::bits::BitFieldVec::<usize, _>::new($w, 0)
137
    };
138
    ($w:expr; $n:expr; $v:expr) => {
139
        {
140
            let mut bit_field_vec = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, $n);
141
            // Force type
142
            let v: usize = $v;
143
            bit_field_vec.resize($n, v);
144
            bit_field_vec
145
        }
146
    };
147
    ($w:expr => $v:expr; $n:expr) => {
148
        {
149
            let mut bit_field_vec = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, $n);
150
            // Force type
151
            let v: usize = $v;
152
            bit_field_vec.resize($n, v);
153
            bit_field_vec
154
        }
155
    };
156
    ($w:expr; $($x:expr),+ $(,)?) => {
157
        {
158
            let mut b = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, [$($x),+].len());
159
            $(
160
                // Force type
161
                let x: usize = $x;
162
                b.push(x);
163
            )*
164
            b
165
        }
166
    };
167
}
168

169
/// A vector of bit fields of fixed width.
170
#[derive(Epserde, Debug, Clone, Copy, Hash, MemDbg, MemSize, value_traits::Subslices)]
171
#[value_traits_subslices(bound = "B: AsRef<[W]>")]
172
#[derive(value_traits::SubslicesMut)]
173
#[value_traits_subslices_mut(bound = "B: AsRef<[W]> + AsMut<[W]>")]
174
pub struct BitFieldVec<W: Word = usize, B = Vec<W>> {
175
    /// The underlying storage.
176
    bits: B,
177
    /// The bit width of the values stored in the vector.
178
    bit_width: usize,
179
    /// A mask with its lowest `bit_width` bits set to one.
180
    mask: W,
181
    /// The length of the vector.
182
    len: usize,
183
}
184

185
fn mask<W: Word>(bit_width: usize) -> W {
9,661✔
186
    if bit_width == 0 {
9,661✔
187
        W::ZERO
192✔
188
    } else {
189
        W::MAX >> (W::BITS - bit_width)
9,469✔
190
    }
191
}
192

193
impl<W: Word, B> BitFieldVec<W, B> {
194
    /// # Safety
195
    /// `len` * `bit_width` must be between 0 (included) the number of
196
    /// bits in `bits` (included).
197
    #[inline(always)]
198
    pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
3,209✔
199
        Self {
200
            bits,
201
            bit_width,
202
            mask: mask(bit_width),
6,418✔
203
            len,
204
        }
205
    }
206

207
    #[inline(always)]
208
    pub fn into_raw_parts(self) -> (B, usize, usize) {
3,122✔
209
        (self.bits, self.bit_width, self.len)
6,244✔
210
    }
211

212
    #[inline(always)]
213
    /// Modifies the bit field in place.
214
    ///
215
    /// # Safety
216
    /// This is unsafe because it's the caller's responsibility to ensure that
217
    /// that the length is compatible with the modified bits.
218
    pub unsafe fn map<W2: Word, B2>(self, f: impl FnOnce(B) -> B2) -> BitFieldVec<W2, B2> {
×
219
        BitFieldVec {
220
            bits: f(self.bits),
×
UNCOV
221
            bit_width: self.bit_width,
×
UNCOV
222
            mask: mask(self.bit_width),
×
UNCOV
223
            len: self.len,
×
224
        }
225
    }
226
}
227

228
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {
229
    /// Gets the address of the item storing (the first part of)
230
    /// the element of given index.
231
    ///
232
    /// This method is mainly useful for manually prefetching
233
    /// parts of the data structure.
234
    pub fn addr_of(&self, index: usize) -> *const W {
4✔
235
        let start_bit = index * self.bit_width;
8✔
236
        let word_index = start_bit / W::BITS;
8✔
237
        (&self.bits.as_ref()[word_index]) as *const _
8✔
238
    }
239

240
    /// Like [`BitFieldSlice::get`], but using unaligned reads.
241
    ///
242
    /// This method can be used only for bit width smaller than or equal to
243
    /// `W::BITS - 8 + 2` or equal to `W::BITS - 8 + 4` or `W::BITS`. Moreover,
244
    /// an additional padding word must be present at the end of the vector.
245
    ///
246
    /// Note that to guarantee the absence of undefined behavior this method
247
    /// has to perform several tests. Consider using
248
    /// [`get_unaligned_unchecked`](Self::get_unaligned_unchecked) if you are
249
    /// sure that the constraints are respected.
250
    ///
251
    /// # Panics
252
    ///
253
    /// This method will panic if the constraints above are not respected.
254
    pub fn get_unaligned(&self, index: usize) -> W {
65✔
255
        assert!(
65✔
256
            self.bit_width <= W::BITS - 8 + 2
65✔
257
                || self.bit_width == W::BITS - 8 + 4
24✔
258
                || self.bit_width == W::BITS
14✔
259
        );
260
        panic_if_out_of_bounds!(index, self.len);
61✔
261
        assert!(
61✔
262
            (index * self.bit_width) / W::BYTES + W::BYTES <= self.bits.as_ref().len() * W::BYTES
122✔
263
        );
264
        unsafe { self.get_unaligned_unchecked(index) }
180✔
265
    }
266

267
    /// Like [`BitFieldSlice::get_unchecked`], but using unaligned reads.
268
    ///
269
    /// # Safety
270
    ///
271
    /// This method can be used only for bit width smaller than or equal to
272
    /// `W::BITS - 8 + 2` or equal to `W::BITS - 8 + 4` or `W::BITS`. Moreover,
273
    /// an additional padding word must be present at the end of the vector.
274
    ///
275
    /// # Panics
276
    ///
277
    /// This method will panic in debug mode if the safety constraints are not
278
    /// respected.
279
    pub unsafe fn get_unaligned_unchecked(&self, index: usize) -> W {
65✔
280
        debug_assert!(
65✔
281
            self.bit_width <= W::BITS - 8 + 2
65✔
282
                || self.bit_width == W::BITS - 8 + 4
24✔
283
                || self.bit_width == W::BITS
14✔
284
        );
285
        let base_ptr = self.bits.as_ref().as_ptr() as *const u8;
61✔
UNCOV
286
        let start_bit = index * self.bit_width;
×
UNCOV
287
        debug_assert!(start_bit / W::BYTES + W::BYTES <= self.bits.as_ref().len() * W::BYTES);
×
288
        let ptr = unsafe { base_ptr.add(start_bit / 8) } as *const W;
180✔
289
        let word = unsafe { core::ptr::read_unaligned(ptr) };
180✔
290
        (word >> (start_bit % 8)) & self.mask
60✔
291
    }
292

293
    /// Returns the backend of the vector as a slice of `W`.
294
    pub fn as_slice(&self) -> &[W] {
3✔
295
        self.bits.as_ref()
3✔
296
    }
297
}
298
/// An iterator over non-overlapping chunks of a bit-field vector, starting at
299
/// the beginning of the vector.
300
///
301
/// When the vector len is not evenly divided by the chunk size, the last chunk
302
/// of the iteration will be shorter.
303
///
304
/// This struct is created by the
305
/// [`try_chunks_mut`](crate::bits::bit_field_vec::BitFieldVec#impl-BitFieldSliceMut<W>-for-BitFieldVec<W,+B>)
306
/// method.
307
pub struct ChunksMut<'a, W: Word> {
308
    remaining: usize,
309
    bit_width: usize,
310
    chunk_size: usize,
311
    iter: std::slice::ChunksMut<'a, W>,
312
}
313

314
impl<'a, W: Word> Iterator for ChunksMut<'a, W> {
315
    type Item = BitFieldVec<W, &'a mut [W]>;
316

317
    #[inline]
318
    fn next(&mut self) -> Option<Self::Item> {
90✔
319
        self.iter.next().map(|chunk| {
359✔
320
            let size = Ord::min(self.chunk_size, self.remaining);
356✔
321
            let next = unsafe { BitFieldVec::from_raw_parts(chunk, self.bit_width, size) };
445✔
322
            self.remaining -= size;
89✔
323
            next
89✔
324
        })
325
    }
326
}
327

328
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {}
329

330
impl<W: Word> BitFieldVec<W, Vec<W>> {
331
    /// Creates a new zero-initialized vector of given bit width and length.
332
    pub fn new(bit_width: usize, len: usize) -> Self {
937✔
333
        // We need at least one word to handle the case of bit width zero.
334
        let n_of_words = Ord::max(1, (len * bit_width).div_ceil(W::BITS));
3,748✔
335
        Self {
336
            bits: vec![W::ZERO; n_of_words],
2,811✔
337
            bit_width,
338
            mask: mask(bit_width),
1,874✔
339
            len,
340
        }
341
    }
342

343
    /// Creates a new zero-initialized vector of given bit width and length,
344
    /// adding padding bits to the end of the vector so that unaligned reads are
345
    /// possible.
346
    ///
347
    /// Note that this convenience method is a one-off: if the vector is resized
348
    /// or expanded, the padding will be lost.
349
    pub fn new_unaligned(bit_width: usize, len: usize) -> Self {
88✔
350
        let n_of_words = (len * bit_width).div_ceil(W::BITS);
264✔
351
        Self {
352
            // We add a word at the end
353
            bits: vec![W::ZERO; n_of_words + 1],
264✔
354
            bit_width,
355
            mask: mask(bit_width),
176✔
356
            len,
357
        }
358
    }
359

360
    /// Creates an empty vector that doesn't need to reallocate for up to
361
    /// `capacity` elements.
362
    pub fn with_capacity(bit_width: usize, capacity: usize) -> Self {
1,163✔
363
        // We need at least one word to handle the case of bit width zero.
364
        let n_of_words = Ord::max(1, (capacity * bit_width).div_ceil(W::BITS));
4,652✔
365
        Self {
366
            bits: Vec::with_capacity(n_of_words),
3,489✔
367
            bit_width,
368
            mask: mask(bit_width),
1,163✔
369
            len: 0,
370
        }
371
    }
372

373
    /// Sets the length.
374
    ///
375
    /// # Safety
376
    ///
377
    /// `len * bit_width` must be at most `self.bits.len() * W::BITS`. Note that
378
    /// setting the length might result in reading uninitialized data.
379
    pub unsafe fn set_len(&mut self, len: usize) {
1✔
380
        debug_assert!(len * self.bit_width <= self.bits.len() * W::BITS);
3✔
381
        self.len = len;
1✔
382
    }
383

384
    /// Sets len to 0
385
    pub fn clear(&mut self) {
1✔
386
        self.len = 0;
1✔
387
    }
388

389
    /// Returns the bit width of the values inside the vector.
390
    pub fn bit_width(&self) -> usize {
315✔
391
        debug_assert!(self.bit_width <= W::BITS);
630✔
392
        self.bit_width
315✔
393
    }
394

395
    /// Returns the mask used to extract values from the vector.
396
    /// This will keep the lowest `bit_width` bits.
397
    pub fn mask(&self) -> W {
312✔
398
        self.mask
312✔
399
    }
400

401
    /// Creates a new vector by copying a slice; the bit width will be the minimum
402
    /// width sufficient to hold all values in the slice.
403
    ///
404
    /// Returns an error if the bit width of the values in `slice` is larger than
405
    /// `W::BITS`.
406
    pub fn from_slice<SW>(slice: &impl BitFieldSlice<SW>) -> Result<Self>
3✔
407
    where
408
        SW: Word + CastableInto<W>,
409
    {
410
        let mut max_len: usize = 0;
9✔
411
        for i in 0..slice.len() {
3,006✔
UNCOV
412
            max_len = Ord::max(max_len, unsafe { slice.get_unchecked(i).len() as usize });
×
413
        }
414

415
        if max_len > W::BITS {
3✔
416
            bail!(
1✔
417
                "Cannot convert a slice of bit width {} into a slice with W = {}",
1✔
UNCOV
418
                max_len,
×
419
                std::any::type_name::<W>()
1✔
420
            );
421
        }
UNCOV
422
        let mut result = Self::new(max_len, slice.len());
×
423
        for i in 0..slice.len() {
2,000✔
424
            unsafe { result.set_unchecked(i, slice.get_unchecked(i).cast()) };
×
425
        }
426

UNCOV
427
        Ok(result)
×
428
    }
429

430
    /// Adds a value at the end of the vector.
431
    pub fn push(&mut self, value: W) {
2,249✔
432
        panic_if_value!(value, self.mask, self.bit_width);
6,747✔
433
        if (self.len + 1) * self.bit_width > self.bits.len() * W::BITS {
4,896✔
434
            self.bits.push(W::ZERO);
398✔
435
        }
436
        unsafe {
437
            self.set_unchecked(self.len, value);
6,747✔
438
        }
439
        self.len += 1;
2,249✔
440
    }
441

442
    /// Truncates or extend with `value` the vector.
443
    pub fn resize(&mut self, new_len: usize, value: W) {
1,162✔
444
        panic_if_value!(value, self.mask, self.bit_width);
3,486✔
445
        if new_len > self.len {
1,162✔
446
            if new_len * self.bit_width > self.bits.len() * W::BITS {
3,483✔
447
                self.bits
2,322✔
448
                    .resize((new_len * self.bit_width).div_ceil(W::BITS), W::ZERO);
2,322✔
449
            }
450
            for i in self.len..new_len {
808,882✔
451
                unsafe {
UNCOV
452
                    self.set_unchecked(i, value);
×
453
                }
454
            }
455
        }
456
        self.len = new_len;
1,162✔
457
    }
458

459
    /// Removes and returns a value from the end of the vector.
460
    ///
461
    /// Returns None if the [`BitFieldVec`] is empty.
462
    pub fn pop(&mut self) -> Option<W> {
1,002✔
463
        if self.len == 0 {
1,002✔
464
            return None;
2✔
465
        }
UNCOV
466
        let value = self.get(self.len - 1);
×
UNCOV
467
        self.len -= 1;
×
UNCOV
468
        Some(value)
×
469
    }
470
}
471

472
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> BitFieldVec<W, B> {
473
    /// A version of [`BitFieldSliceMut::set`] that returns the previous value.
474
    ///
475
    /// # Panics
476
    /// - If `index` is out of bounds.
477
    /// - If `value` is not a valid value for the bit width of the vector.
478
    pub fn replace(&mut self, index: usize, value: W) -> Result<W> {
×
UNCOV
479
        panic_if_out_of_bounds!(index, self.len);
×
UNCOV
480
        panic_if_value!(value, self.mask, self.bit_width);
×
UNCOV
481
        Ok(unsafe { self.replace_unchecked(index, value) })
×
482
    }
483

484
    /// A version of [`BitFieldSliceMut::set_unchecked`] that returns the previous value,
485
    /// that doesn't check for out-of-bounds access or value validity.
486
    ///
487
    /// # Safety
488
    /// This method is unsafe because it does not check that `index` is within bounds
489
    pub unsafe fn replace_unchecked(&mut self, index: usize, value: W) -> W {
×
490
        let pos = index * self.bit_width;
×
UNCOV
491
        let word_index = pos / W::BITS;
×
492
        let bit_index = pos % W::BITS;
×
493
        let bits = self.bits.as_mut();
×
494

495
        unsafe {
496
            if bit_index + self.bit_width <= W::BITS {
×
497
                let mut word = *bits.get_unchecked_mut(word_index);
×
498
                let old_value = (word >> bit_index) & self.mask;
×
UNCOV
499
                word &= !(self.mask << bit_index);
×
500
                word |= value << bit_index;
×
501
                *bits.get_unchecked_mut(word_index) = word;
×
502
                old_value
×
503
            } else {
504
                let mut word = *bits.get_unchecked_mut(word_index);
×
UNCOV
505
                let mut old_value = word >> bit_index;
×
506
                word &= (W::ONE << bit_index) - W::ONE;
×
507
                word |= value << bit_index;
×
508
                *bits.get_unchecked_mut(word_index) = word;
×
509

510
                let mut word = *bits.get_unchecked_mut(word_index + 1);
×
511
                old_value |= word << (W::BITS - bit_index);
×
UNCOV
512
                word &= !(self.mask >> (W::BITS - bit_index));
×
UNCOV
513
                word |= value >> (W::BITS - bit_index);
×
UNCOV
514
                *bits.get_unchecked_mut(word_index + 1) = word;
×
UNCOV
515
                old_value & self.mask
×
516
            }
517
        }
518
    }
519
}
520

521
impl<W: Word, T> BitFieldSliceCore<W> for BitFieldVec<W, T> {
522
    #[inline(always)]
523
    fn bit_width(&self) -> usize {
13,798✔
524
        debug_assert!(self.bit_width <= W::BITS);
27,596✔
525
        self.bit_width
13,798✔
526
    }
527
    #[inline(always)]
528
    fn len(&self) -> usize {
65,890✔
529
        self.len
65,890✔
530
    }
531
}
532

533
impl<W: Word, B: AsRef<[W]>> BitFieldSlice<W> for BitFieldVec<W, B> {
534
    unsafe fn get_unchecked(&self, index: usize) -> W {
52,220,732✔
535
        let pos = index * self.bit_width;
104,441,464✔
536
        let word_index = pos / W::BITS;
104,441,464✔
537
        let bit_index = pos % W::BITS;
104,441,464✔
538
        let bits = self.bits.as_ref();
156,662,196✔
539

540
        unsafe {
541
            if bit_index + self.bit_width <= W::BITS {
52,220,732✔
542
                (*bits.get_unchecked(word_index) >> bit_index) & self.mask
78,111,840✔
543
            } else {
544
                ((*bits.get_unchecked(word_index) >> bit_index)
13,164,812✔
545
                    | (*bits.get_unchecked(word_index + 1) << (W::BITS - bit_index)))
13,164,812✔
546
                    & self.mask
13,164,812✔
547
            }
548
        }
549
    }
550
}
551

552
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> BitFieldSliceMut<W> for BitFieldVec<W, B> {
553
    #[inline(always)]
554
    fn mask(&self) -> W {
100,360✔
555
        self.mask
100,360✔
556
    }
557

558
    #[inline(always)]
559
    fn set(&mut self, index: usize, value: W) {
711,439✔
560
        panic_if_out_of_bounds!(index, self.len);
2,134,317✔
561
        panic_if_value!(value, self.mask, self.bit_width);
2,134,314✔
562
        unsafe {
563
            self.set_unchecked(index, value);
711,437✔
564
        }
565
    }
566

567
    unsafe fn set_unchecked(&mut self, index: usize, value: W) {
11,975,556✔
568
        let pos = index * self.bit_width;
23,951,112✔
569
        let word_index = pos / W::BITS;
23,951,112✔
570
        let bit_index = pos % W::BITS;
23,951,112✔
571
        let bits = self.bits.as_mut();
35,926,668✔
572

573
        unsafe {
574
            if bit_index + self.bit_width <= W::BITS {
21,019,526✔
575
                let mut word = *bits.get_unchecked_mut(word_index);
36,175,880✔
576
                word &= !(self.mask << bit_index);
27,131,910✔
577
                word |= value << bit_index;
27,131,910✔
578
                *bits.get_unchecked_mut(word_index) = word;
27,131,910✔
579
            } else {
580
                let mut word = *bits.get_unchecked_mut(word_index);
2,931,586✔
581
                word &= (W::ONE << bit_index) - W::ONE;
2,931,586✔
582
                word |= value << bit_index;
2,931,586✔
583
                *bits.get_unchecked_mut(word_index) = word;
2,931,586✔
584

585
                let mut word = *bits.get_unchecked_mut(word_index + 1);
2,931,586✔
586
                word &= !(self.mask >> (W::BITS - bit_index));
2,931,586✔
587
                word |= value >> (W::BITS - bit_index);
2,931,586✔
588
                *bits.get_unchecked_mut(word_index + 1) = word;
2,931,586✔
589
            }
590
        }
591
    }
592

593
    fn reset(&mut self) {
1✔
594
        let bit_len = self.len * self.bit_width;
2✔
595
        let full_words = bit_len / W::BITS;
2✔
596
        let residual = bit_len % W::BITS;
2✔
597
        let bits = self.bits.as_mut();
3✔
598
        bits[..full_words].iter_mut().for_each(|x| *x = W::ZERO);
3✔
599
        if residual != 0 {
2✔
600
            bits[full_words] &= W::MAX << residual;
1✔
601
        }
602
    }
603

604
    #[cfg(feature = "rayon")]
605
    fn par_reset(&mut self) {
1✔
606
        let bit_len = self.len * self.bit_width;
2✔
607
        let full_words = bit_len / W::BITS;
2✔
608
        let residual = bit_len % W::BITS;
2✔
609
        let bits = self.bits.as_mut();
3✔
610
        bits[..full_words]
1✔
611
            .par_iter_mut()
612
            .with_min_len(RAYON_MIN_LEN)
1✔
613
            .for_each(|x| *x = W::ZERO);
1✔
614
        if residual != 0 {
2✔
615
            bits[full_words] &= W::MAX << residual;
1✔
616
        }
617
    }
618

619
    /// This implementation perform the copy word by word, which is
620
    /// significantly faster than the default implementation.
621
    fn copy(&self, from: usize, dst: &mut Self, to: usize, len: usize) {
576✔
622
        assert_eq!(
576✔
623
            self.bit_width, dst.bit_width,
×
624
            "Bit widths must be equal (self: {}, dest: {})",
×
625
            self.bit_width, dst.bit_width
×
626
        );
627
        // Reduce len to the elements available in both vectors
628
        let len = Ord::min(Ord::min(len, dst.len - to), self.len - from);
3,456✔
629
        if len == 0 {
576✔
630
            return;
×
631
        }
632
        let bit_width = Ord::min(self.bit_width, dst.bit_width);
×
633
        let bit_len = len * bit_width;
×
634
        let src_pos = from * self.bit_width;
×
UNCOV
635
        let dst_pos = to * dst.bit_width;
×
UNCOV
636
        let src_bit = src_pos % W::BITS;
×
UNCOV
637
        let dst_bit = dst_pos % W::BITS;
×
UNCOV
638
        let src_first_word = src_pos / W::BITS;
×
UNCOV
639
        let dst_first_word = dst_pos / W::BITS;
×
UNCOV
640
        let src_last_word = (src_pos + bit_len - 1) / W::BITS;
×
UNCOV
641
        let dst_last_word = (dst_pos + bit_len - 1) / W::BITS;
×
UNCOV
642
        let source = self.bits.as_ref();
×
UNCOV
643
        let dest = dst.bits.as_mut();
×
644

645
        if src_first_word == src_last_word && dst_first_word == dst_last_word {
192✔
646
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
647
            let word = (source[src_first_word] >> src_bit) & mask;
192✔
648
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
649
            dest[dst_first_word] |= word << dst_bit;
128✔
650
        } else if src_first_word == src_last_word {
640✔
651
            // dst_first_word != dst_last_word
652
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
653
            let word = (source[src_first_word] >> src_bit) & mask;
192✔
654
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
655
            dest[dst_first_word] |= (word & mask) << dst_bit;
192✔
656
            dest[dst_last_word] &= !(mask >> (W::BITS - dst_bit));
192✔
657
            dest[dst_last_word] |= (word & mask) >> (W::BITS - dst_bit);
128✔
658
        } else if dst_first_word == dst_last_word {
576✔
659
            // src_first_word != src_last_word
660
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
661
            let word = ((source[src_first_word] >> src_bit)
256✔
662
                | (source[src_last_word] << (W::BITS - src_bit)))
128✔
663
                & mask;
128✔
664
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
665
            dest[dst_first_word] |= word << dst_bit;
128✔
666
        } else if src_bit == dst_bit {
576✔
667
            // src_first_word != src_last_word && dst_first_word != dst_last_word
668
            let mask = W::MAX << dst_bit;
384✔
669
            dest[dst_first_word] &= !mask;
256✔
670
            dest[dst_first_word] |= source[src_first_word] & mask;
384✔
671

672
            dest[(1 + dst_first_word)..dst_last_word]
384✔
673
                .copy_from_slice(&source[(1 + src_first_word)..src_last_word]);
512✔
674

675
            let residual =
256✔
676
                bit_len - (W::BITS - src_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
384✔
677
            let mask = W::MAX >> (W::BITS - residual);
384✔
678
            dest[dst_last_word] &= !mask;
256✔
679
            dest[dst_last_word] |= source[src_last_word] & mask;
256✔
680
        } else if src_bit < dst_bit {
384✔
681
            // src_first_word != src_last_word && dst_first_word !=
682
            // dst_last_word
683
            let dst_mask = W::MAX << dst_bit;
256✔
684
            let src_mask = W::MAX << src_bit;
256✔
685
            let shift = dst_bit - src_bit;
256✔
686
            dest[dst_first_word] &= !dst_mask;
128✔
687
            dest[dst_first_word] |= (source[src_first_word] & src_mask) << shift;
256✔
688

689
            let mut word = source[src_first_word] >> (W::BITS - shift);
256✔
690
            for i in 1..dst_last_word - dst_first_word {
256✔
UNCOV
691
                dest[dst_first_word + i] = word | (source[src_first_word + i] << shift);
×
UNCOV
692
                word = source[src_first_word + i] >> (W::BITS - shift);
×
693
            }
694
            let residual =
128✔
695
                bit_len - (W::BITS - dst_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
256✔
696
            let mask = W::MAX >> (W::BITS - residual);
256✔
697
            dest[dst_last_word] &= !mask;
128✔
698
            dest[dst_last_word] |= source[src_last_word] & mask;
256✔
699
        } else {
700
            // src_first_word != src_last_word && dst_first_word !=
701
            // dst_last_word && src_bit > dst_bit
702
            let dst_mask = W::MAX << dst_bit;
128✔
703
            let src_mask = W::MAX << src_bit;
×
704
            let shift = src_bit - dst_bit;
×
705
            dest[dst_first_word] &= !dst_mask;
×
UNCOV
706
            dest[dst_first_word] |= (source[src_first_word] & src_mask) >> shift;
×
UNCOV
707
            dest[dst_first_word] |= source[src_first_word + 1] << (W::BITS - shift);
×
708

UNCOV
709
            let mut word = source[src_first_word + 1] >> shift;
×
710

711
            for i in 1..dst_last_word - dst_first_word {
512✔
712
                word |= source[src_first_word + i + 1] << (W::BITS - shift);
×
713
                dest[dst_first_word + i] = word;
×
714
                word = source[src_first_word + i + 1] >> shift;
×
715
            }
716

UNCOV
717
            word |= source[src_last_word] << (W::BITS - shift);
×
718

UNCOV
719
            let residual =
×
UNCOV
720
                bit_len - (W::BITS - dst_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
×
UNCOV
721
            let mask = W::MAX >> (W::BITS - residual);
×
UNCOV
722
            dest[dst_last_word] &= !mask;
×
UNCOV
723
            dest[dst_last_word] |= word & mask;
×
724
        }
725
    }
726

727
    /// This implementation keeps a buffer of `W::BITS` bits for reading and
728
    /// writing, obtaining a significant speedup with respect to the default
729
    /// implementation.
730
    #[inline]
731
    unsafe fn apply_in_place_unchecked<F>(&mut self, mut f: F)
1,200✔
732
    where
733
        F: FnMut(W) -> W,
734
        Self: BitFieldSlice<W>,
735
    {
736
        if self.is_empty() {
2,400✔
UNCOV
737
            return;
×
738
        }
739
        let bit_width = self.bit_width();
×
UNCOV
740
        if bit_width == 0 {
×
741
            return;
40✔
742
        }
UNCOV
743
        let mask = self.mask();
×
UNCOV
744
        let number_of_words: usize = self.bits.as_ref().len();
×
UNCOV
745
        let last_word_idx = number_of_words.saturating_sub(1);
×
746

UNCOV
747
        let mut write_buffer: W = W::ZERO;
×
UNCOV
748
        let mut read_buffer: W = *unsafe { self.bits.as_ref().get_unchecked(0) };
×
749

750
        // specialized case because it's much faster
UNCOV
751
        if bit_width.is_power_of_two() {
×
752
            let mut bits_in_buffer = 0;
360✔
753

754
            // TODO!: figure out how to simplify
755
            let mut buffer_limit = (self.len() * bit_width) % W::BITS;
540✔
756
            if buffer_limit == 0 {
260✔
757
                buffer_limit = W::BITS;
80✔
758
            }
759

760
            for read_idx in 1..number_of_words {
3,830✔
761
                // pre-load the next word so it loads while we parse the buffer
762
                let next_word: W = *unsafe { self.bits.as_ref().get_unchecked(read_idx) };
14,600✔
763

764
                // parse as much as we can from the buffer
765
                loop {
×
766
                    let next_bits_in_buffer = bits_in_buffer + bit_width;
41,380✔
767

768
                    if next_bits_in_buffer > W::BITS {
20,690✔
769
                        break;
3,650✔
770
                    }
771

UNCOV
772
                    let value = read_buffer & mask;
×
773
                    // throw away the bits we just read
UNCOV
774
                    read_buffer >>= bit_width;
×
775
                    // apply user func
776
                    let new_value = f(value);
×
777
                    // put the new value in the write buffer
778
                    write_buffer |= new_value << bits_in_buffer;
×
779

UNCOV
780
                    bits_in_buffer = next_bits_in_buffer;
×
781
                }
782

783
                invariant_eq!(read_buffer, W::ZERO);
10,950✔
784
                *unsafe { self.bits.as_mut().get_unchecked_mut(read_idx - 1) } = write_buffer;
3,650✔
UNCOV
785
                read_buffer = next_word;
×
UNCOV
786
                write_buffer = W::ZERO;
×
UNCOV
787
                bits_in_buffer = 0;
×
788
            }
789

790
            // write the last word if we have some bits left
791
            while bits_in_buffer < buffer_limit {
2,100✔
792
                let value = read_buffer & mask;
960✔
793
                // throw away the bits we just read
794
                read_buffer >>= bit_width;
960✔
795
                // apply user func
796
                let new_value = f(value);
960✔
797
                // put the new value in the write buffer
798
                write_buffer |= new_value << bits_in_buffer;
960✔
799
                // -= bit_width but with no casts
800
                bits_in_buffer += bit_width;
960✔
801
            }
802

803
            *unsafe { self.bits.as_mut().get_unchecked_mut(last_word_idx) } = write_buffer;
180✔
UNCOV
804
            return;
×
805
        }
806

807
        // The position inside the word. In most parametrization of the
808
        // vector, since the bit_width is not necessarily a integer
809
        // divisor of the word size, we need to keep track of the position
810
        // inside the word. As we scroll through the bits, due to the bits
811
        // remainder, we may need to operate on two words at the same time.
UNCOV
812
        let mut global_bit_index: usize = 0;
×
813

814
        // The number of words in the bitvec.
UNCOV
815
        let mut lower_word_limit = 0;
×
UNCOV
816
        let mut upper_word_limit = W::BITS;
×
817

818
        // We iterate across the words
819
        for word_number in 0..last_word_idx {
53,710✔
820
            // We iterate across the elements in the word.
821
            while global_bit_index + bit_width <= upper_word_limit {
149,910✔
822
                // We retrieve the value from the current word.
823
                let offset = global_bit_index - lower_word_limit;
48,100✔
824
                global_bit_index += bit_width;
48,100✔
825
                let element = self.mask() & (read_buffer >> offset);
48,100✔
826

827
                // We apply the function to the element.
828
                let new_element = f(element);
48,100✔
829

830
                // We set the element in the new word.
831
                write_buffer |= new_element << offset;
48,100✔
832
            }
833

834
            // We retrieve the next word from the bitvec.
UNCOV
835
            let next_word = *unsafe { self.bits.as_ref().get_unchecked(word_number + 1) };
×
836

UNCOV
837
            let mut new_write_buffer = W::ZERO;
×
838
            if upper_word_limit != global_bit_index {
48,840✔
839
                let remainder = upper_word_limit - global_bit_index;
146,520✔
840
                let offset = global_bit_index - lower_word_limit;
146,520✔
841
                // We compose the element from the remaining elements in the
842
                // current word and the elements in the next word.
843
                let element = ((read_buffer >> offset) | (next_word << remainder)) & self.mask();
244,200✔
844
                global_bit_index += bit_width;
97,680✔
845

846
                // We apply the function to the element.
847
                let new_element = f(element);
146,520✔
848

849
                write_buffer |= new_element << offset;
146,520✔
850

851
                new_write_buffer = new_element >> remainder;
48,840✔
852
            };
853

854
            read_buffer = next_word;
×
855

UNCOV
856
            *unsafe { self.bits.as_mut().get_unchecked_mut(word_number) } = write_buffer;
×
857

UNCOV
858
            write_buffer = new_write_buffer;
×
UNCOV
859
            lower_word_limit = upper_word_limit;
×
UNCOV
860
            upper_word_limit += W::BITS;
×
861
        }
862

UNCOV
863
        let mut offset = global_bit_index - lower_word_limit;
×
864

865
        // We iterate across the elements in the word.
866
        while offset < self.len() * bit_width - global_bit_index {
7,180✔
867
            // We retrieve the value from the current word.
868
            let element = self.mask() & (read_buffer >> offset);
1,060✔
869

870
            // We apply the function to the element.
871
            let new_element = f(element);
1,060✔
872

873
            // We set the element in the new word.
874
            write_buffer |= new_element << offset;
1,060✔
875
            offset += bit_width;
1,060✔
876
        }
877

UNCOV
878
        *unsafe { self.bits.as_mut().get_unchecked_mut(last_word_idx) } = write_buffer;
×
879
    }
880

881
    type ChunksMut<'a>
882
        = ChunksMut<'a, W>
883
    where
884
        Self: 'a;
885

886
    /// # Errors
887
    ///
888
    /// This method will return an error if the chunk size multiplied by the by
889
    /// the [bit width](BitFieldSliceCore::bit_width) is not a multiple of
890
    /// `W::BITS` and more than one chunk must be returned.
891
    fn try_chunks_mut(&mut self, chunk_size: usize) -> Result<Self::ChunksMut<'_>, ()> {
87✔
892
        let len = self.len();
261✔
893
        let bit_width = self.bit_width();
261✔
894
        if len <= chunk_size || (chunk_size * bit_width) % W::BITS == 0 {
92✔
895
            Ok(ChunksMut {
85✔
896
                remaining: len,
85✔
897
                bit_width: self.bit_width,
85✔
898
                chunk_size,
85✔
899
                iter: self.bits.as_mut()[..(len * bit_width).div_ceil(W::BITS)]
85✔
900
                    .chunks_mut((chunk_size * bit_width).div_ceil(W::BITS)),
85✔
901
            })
902
        } else {
903
            Err(())
2✔
904
        }
905
    }
906

907
    fn as_mut_slice(&mut self) -> &mut [W] {
1✔
908
        self.bits.as_mut()
1✔
909
    }
910
}
911

912
impl<W: Word> core::iter::Extend<W> for BitFieldVec<W, Vec<W>> {
913
    /// Add values from
914
    fn extend<T: IntoIterator<Item = W>>(&mut self, iter: T) {
1✔
915
        for value in iter {
17✔
UNCOV
916
            self.push(value);
×
917
        }
918
    }
919
}
920

921
/// Equality between bit-field vectors requires that the word is the same, the
922
/// bit width is the same, and the content is the same.
923
impl<W: Word, B: AsRef<[W]>, C: AsRef<[W]>> PartialEq<BitFieldVec<W, C>> for BitFieldVec<W, B> {
924
    fn eq(&self, other: &BitFieldVec<W, C>) -> bool {
3,771✔
925
        if self.bit_width() != other.bit_width() {
11,313✔
UNCOV
926
            return false;
×
927
        }
928
        if self.len() != other.len() {
×
929
            return false;
2✔
930
        }
UNCOV
931
        let bit_len = self.len() * self.bit_width();
×
UNCOV
932
        if self.bits.as_ref()[..bit_len / W::BITS] != other.bits.as_ref()[..bit_len / W::BITS] {
×
UNCOV
933
            return false;
×
934
        }
935

UNCOV
936
        let residual = bit_len % W::BITS;
×
UNCOV
937
        residual == 0
×
938
            || (self.bits.as_ref()[bit_len / W::BITS] ^ other.bits.as_ref()[bit_len / W::BITS])
3,526✔
939
                << (W::BITS - residual)
3,526✔
940
                == W::ZERO
3,526✔
941
    }
942
}
943

944
impl Eq for BitFieldVec {}
945

946
// Support for unchecked iterators
947

948
/// An [`UncheckedIterator`] over the values of a [`BitFieldVec`].
949
#[derive(Debug, Clone, MemDbg, MemSize)]
950
pub struct BitFieldVectorUncheckedIterator<'a, W, B>
951
where
952
    W: Word,
953
{
954
    vec: &'a BitFieldVec<W, B>,
955
    word_index: usize,
956
    window: W,
957
    fill: usize,
958
}
959

960
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVectorUncheckedIterator<'a, W, B> {
961
    fn new(vec: &'a BitFieldVec<W, B>, index: usize) -> Self {
635,922✔
962
        if index > vec.len() {
1,271,844✔
963
            panic!("Start index out of bounds: {} > {}", index, vec.len());
×
964
        }
965
        let (fill, word_index);
×
966
        let window = if index == vec.len() {
1,907,766✔
967
            word_index = 0;
304✔
968
            fill = 0;
304✔
969
            W::ZERO
304✔
970
        } else {
971
            let bit_offset = index * vec.bit_width;
635,618✔
UNCOV
972
            let bit_index = bit_offset % W::BITS;
×
973

UNCOV
974
            word_index = bit_offset / W::BITS;
×
UNCOV
975
            fill = W::BITS - bit_index;
×
976
            unsafe {
977
                // SAFETY: index has been check at the start and it is within bounds
UNCOV
978
                *vec.bits.as_ref().get_unchecked(word_index) >> bit_index
×
979
            }
980
        };
981
        Self {
982
            vec,
983
            word_index,
984
            window,
985
            fill,
986
        }
987
    }
988
}
989

990
impl<W: Word, B: AsRef<[W]>> crate::traits::UncheckedIterator
991
    for BitFieldVectorUncheckedIterator<'_, W, B>
992
{
993
    type Item = W;
994
    unsafe fn next_unchecked(&mut self) -> W {
32,658,435✔
995
        let bit_width = self.vec.bit_width;
65,316,870✔
996

997
        if self.fill >= bit_width {
32,658,435✔
998
            self.fill -= bit_width;
17,182,682✔
999
            let res = self.window & self.vec.mask;
34,365,364✔
1000
            self.window >>= bit_width;
17,182,682✔
1001
            return res;
17,182,682✔
1002
        }
1003

UNCOV
1004
        let res = self.window;
×
UNCOV
1005
        self.word_index += 1;
×
UNCOV
1006
        self.window = *unsafe { self.vec.bits.as_ref().get_unchecked(self.word_index) };
×
UNCOV
1007
        let res = (res | (self.window << self.fill)) & self.vec.mask;
×
UNCOV
1008
        let used = bit_width - self.fill;
×
UNCOV
1009
        self.window >>= used;
×
UNCOV
1010
        self.fill = W::BITS - used;
×
UNCOV
1011
        res
×
1012
    }
1013
}
1014

1015
impl<'a, W: Word, B: AsRef<[W]>> IntoUncheckedIterator for &'a BitFieldVec<W, B> {
1016
    type Item = W;
1017
    type IntoUncheckedIter = BitFieldVectorUncheckedIterator<'a, W, B>;
1018
    fn into_unchecked_iter_from(self, from: usize) -> Self::IntoUncheckedIter {
322,712✔
1019
        BitFieldVectorUncheckedIterator::new(self, from)
968,136✔
1020
    }
1021
}
1022

1023
/// An [`UncheckedIterator`] moving backwards over the values of a [`BitFieldVec`].
1024
#[derive(Debug, Clone, MemDbg, MemSize)]
1025
pub struct BitFieldVectorReverseUncheckedIterator<'a, W: Word, B> {
1026
    vec: &'a BitFieldVec<W, B>,
1027
    word_index: usize,
1028
    window: W,
1029
    fill: usize,
1030
}
1031

1032
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVectorReverseUncheckedIterator<'a, W, B> {
1033
    fn new(vec: &'a BitFieldVec<W, B>, index: usize) -> Self {
315,992✔
1034
        if index > vec.len() {
631,984✔
UNCOV
1035
            panic!("Start index out of bounds: {} > {}", index, vec.len());
×
1036
        }
1037
        let (word_index, fill);
×
1038

1039
        let window = if index == 0 {
631,984✔
1040
            word_index = 0;
3,120✔
1041
            fill = 0;
3,120✔
1042
            W::ZERO
3,120✔
1043
        } else {
1044
            // We have to handle the case of zero bit width
1045
            let bit_offset = (index * vec.bit_width).saturating_sub(1);
312,872✔
UNCOV
1046
            let bit_index = bit_offset % W::BITS;
×
1047

UNCOV
1048
            word_index = bit_offset / W::BITS;
×
UNCOV
1049
            fill = bit_index + 1;
×
1050
            unsafe {
1051
                // SAFETY: index has been check at the start and it is within bounds
UNCOV
1052
                *vec.bits.as_ref().get_unchecked(word_index) << (W::BITS - fill)
×
1053
            }
1054
        };
1055
        Self {
1056
            vec,
1057
            word_index,
1058
            window,
1059
            fill,
1060
        }
1061
    }
1062
}
1063

1064
impl<W: Word, B: AsRef<[W]>> crate::traits::UncheckedIterator
1065
    for BitFieldVectorReverseUncheckedIterator<'_, W, B>
1066
{
1067
    type Item = W;
1068
    unsafe fn next_unchecked(&mut self) -> W {
15,451,222✔
1069
        let bit_width = self.vec.bit_width;
30,902,444✔
1070

1071
        if self.fill >= bit_width {
15,451,222✔
1072
            self.fill -= bit_width;
8,036,076✔
1073
            self.window = self.window.rotate_left(bit_width as u32);
16,072,152✔
1074
            return self.window & self.vec.mask;
8,036,076✔
1075
        }
1076

UNCOV
1077
        let mut res = self.window.rotate_left(self.fill as u32);
×
UNCOV
1078
        self.word_index -= 1;
×
UNCOV
1079
        self.window = *unsafe { self.vec.bits.as_ref().get_unchecked(self.word_index) };
×
UNCOV
1080
        let used = bit_width - self.fill;
×
UNCOV
1081
        res = ((res << used) | (self.window >> (W::BITS - used))) & self.vec.mask;
×
UNCOV
1082
        self.window <<= used;
×
1083
        self.fill = W::BITS - used;
×
1084
        res
×
1085
    }
1086
}
1087

1088
impl<'a, W: Word, B: AsRef<[W]>> IntoReverseUncheckedIterator for &'a BitFieldVec<W, B> {
1089
    type Item = W;
1090
    type IntoRevUncheckedIter = BitFieldVectorReverseUncheckedIterator<'a, W, B>;
1091

UNCOV
1092
    fn into_rev_unchecked_iter(self) -> Self::IntoRevUncheckedIter {
×
UNCOV
1093
        BitFieldVectorReverseUncheckedIterator::new(self, self.len())
×
1094
    }
1095

1096
    fn into_rev_unchecked_iter_from(self, from: usize) -> Self::IntoRevUncheckedIter {
315,992✔
1097
        BitFieldVectorReverseUncheckedIterator::new(self, from)
947,976✔
1098
    }
1099
}
1100

1101
/// An [`Iterator`] over the values of a [`BitFieldVec`].
1102
#[derive(Debug, Clone, MemDbg, MemSize)]
1103
pub struct BitFieldVecIterator<'a, W, B>
1104
where
1105
    W: Word,
1106
{
1107
    unchecked: BitFieldVectorUncheckedIterator<'a, W, B>,
1108
    range: core::ops::Range<usize>,
1109
}
1110

1111
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVecIterator<'a, W, B> {
1112
    fn new(vec: &'a BitFieldVec<W, B>, from: usize) -> Self {
313,210✔
1113
        let len = vec.len();
939,630✔
1114
        if from > len {
313,210✔
UNCOV
1115
            panic!(
×
UNCOV
1116
                "Start index out of bounds: {} >
×
UNCOV
1117
            {}",
×
UNCOV
1118
                from, len
×
1119
            );
1120
        }
1121
        Self {
UNCOV
1122
            unchecked: BitFieldVectorUncheckedIterator::new(vec, from),
×
UNCOV
1123
            range: from..len,
×
1124
        }
1125
    }
1126
}
1127

1128
impl<W: Word, B: AsRef<[W]>> Iterator for BitFieldVecIterator<'_, W, B> {
1129
    type Item = W;
1130
    fn next(&mut self) -> Option<Self::Item> {
16,189,263✔
1131
        if self.range.is_empty() {
32,378,526✔
1132
            return None;
313,204✔
1133
        }
1134
        // SAFETY: index has just been checked.
UNCOV
1135
        let res = unsafe { self.unchecked.next_unchecked() };
×
UNCOV
1136
        self.range.start += 1;
×
UNCOV
1137
        Some(res)
×
1138
    }
1139

1140
    #[inline(always)]
UNCOV
1141
    fn size_hint(&self) -> (usize, Option<usize>) {
×
UNCOV
1142
        (self.range.len(), Some(self.range.len()))
×
1143
    }
1144
}
1145

1146
impl<W: Word, B: AsRef<[W]>> ExactSizeIterator for BitFieldVecIterator<'_, W, B> {
1147
    #[inline(always)]
UNCOV
1148
    fn len(&self) -> usize {
×
UNCOV
1149
        self.range.len()
×
1150
    }
1151
}
1152

1153
/// This implements iteration from the end, but its slower than the forward iteration
1154
/// as here we do a random access, while in the forward iterator we do a sequential access
1155
/// and we keep a buffer of `W::BITS` bits to speed up the iteration.
1156
///
1157
/// If needed we could also keep a buffer from the end, but the logic would be more complex
1158
/// and potentially slower.
1159
impl<W: Word, B: AsRef<[W]>> DoubleEndedIterator for BitFieldVecIterator<'_, W, B> {
UNCOV
1160
    fn next_back(&mut self) -> Option<Self::Item> {
×
UNCOV
1161
        if self.range.is_empty() {
×
UNCOV
1162
            return None;
×
1163
        }
1164
        // SAFETY: index has just been checked.
UNCOV
1165
        let res = unsafe { self.unchecked.next_unchecked() };
×
UNCOV
1166
        self.range.end -= 1;
×
UNCOV
1167
        Some(res)
×
1168
    }
1169
}
1170

1171
impl<'a, W: Word, B: AsRef<[W]>> IntoIterator for &'a BitFieldVec<W, B> {
1172
    type Item = W;
1173
    type IntoIter = BitFieldVecIterator<'a, W, B>;
1174

1175
    fn into_iter(self) -> Self::IntoIter {
2✔
1176
        BitFieldVecIterator::new(self, 0)
4✔
1177
    }
1178
}
1179

1180
impl<'a, W: Word, B: AsRef<[W]>> IntoIteratorFrom for &'a BitFieldVec<W, B> {
1181
    type IntoIterFrom = BitFieldVecIterator<'a, W, B>;
1182

UNCOV
1183
    fn into_iter_from(self, from: usize) -> Self::IntoIter {
×
UNCOV
1184
        BitFieldVecIterator::new(self, from)
×
1185
    }
1186
}
1187

1188
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {
1189
    pub fn iter_from(&self, from: usize) -> BitFieldVecIterator<'_, W, B> {
313,208✔
1190
        BitFieldVecIterator::new(self, from)
939,624✔
1191
    }
1192

1193
    pub fn iter(&self) -> BitFieldVecIterator<'_, W, B> {
1,202✔
1194
        self.iter_from(0)
2,404✔
1195
    }
1196
}
1197
/// A tentatively thread-safe vector of bit fields of fixed width.
1198
///
1199
/// This implementation provides some concurrency guarantees, albeit not
1200
/// full-fledged thread safety: more precisely, we can guarantee thread-safety
1201
/// if the bit width is a power of two; otherwise, concurrent writes to values
1202
/// that cross word boundaries might end up in different threads succeeding in
1203
/// writing only part of a value. If the user can guarantee that no two threads
1204
/// ever write to the same boundary-crossing value, then no race condition can
1205
/// happen.
1206
///
1207
/// Note that the trait [`AtomicHelper`] can be used to provide a more
1208
/// convenient naming for some methods.
1209

1210
#[derive(Epserde, Debug, Clone, Hash, MemDbg, MemSize)]
1211
pub struct AtomicBitFieldVec<W: Word + IntoAtomic = usize, B = Vec<<W as IntoAtomic>::AtomicType>> {
1212
    /// The underlying storage.
1213
    bits: B,
1214
    /// The bit width of the values stored in the vector.
1215
    bit_width: usize,
1216
    /// A mask with its lowest `bit_width` bits set to one.
1217
    mask: W,
1218
    /// The length of the vector.
1219
    len: usize,
1220
}
1221

1222
impl<W: Word + IntoAtomic, B> AtomicBitFieldVec<W, B> {
1223
    /// # Safety
1224
    /// `len` * `bit_width` must be between 0 (included) the number of
1225
    /// bits in `bits` (included).
1226
    #[inline(always)]
1227
    pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
184✔
1228
        Self {
1229
            bits,
1230
            bit_width,
1231
            mask: mask(bit_width),
368✔
1232
            len,
1233
        }
1234
    }
1235

1236
    #[inline(always)]
1237
    pub fn into_raw_parts(self) -> (B, usize, usize) {
184✔
1238
        (self.bits, self.bit_width, self.len)
368✔
1239
    }
1240

1241
    /// Returns the mask used to extract values from the vector.
1242
    /// This will keep the lowest `bit_width` bits.
1243
    pub fn mask(&self) -> W {
184✔
1244
        self.mask
184✔
1245
    }
1246
}
1247

1248
impl<W: Word + IntoAtomic, B: AsRef<[W::AtomicType]>> AtomicBitFieldVec<W, B> {
1249
    /// Returns the backend of the `AtomicBitFieldVec` as a slice of `A`, where `A` is the
1250
    /// atomic variant of `W`.
1251
    pub fn as_slice(&self) -> &[W::AtomicType] {
4✔
1252
        self.bits.as_ref()
4✔
1253
    }
1254
}
1255

1256
impl<W: Word + IntoAtomic> AtomicBitFieldVec<W>
1257
where
1258
    W::AtomicType: AtomicUnsignedInt,
1259
{
1260
    pub fn new(bit_width: usize, len: usize) -> AtomicBitFieldVec<W> {
491✔
1261
        // we need at least two words to avoid branches in the gets
1262
        let n_of_words = Ord::max(1, (len * bit_width).div_ceil(W::BITS));
1,964✔
1263
        AtomicBitFieldVec::<W> {
1264
            bits: (0..n_of_words)
491✔
1265
                .map(|_| W::AtomicType::new(W::ZERO))
1266
                .collect(),
1267
            bit_width,
1268
            mask: mask(bit_width),
982✔
1269
            len,
1270
        }
1271
    }
1272
}
1273

1274
impl<W: Word + IntoAtomic, B: AsRef<[W::AtomicType]>> AtomicBitFieldVec<W, B>
1275
where
1276
    W::AtomicType: AtomicUnsignedInt + AsBytes,
1277
{
1278
    /// Writes zeros in all values.
1279
    #[deprecated(since = "0.4.4", note = "reset is deprecated in favor of reset_atomic")]
UNCOV
1280
    pub fn reset(&mut self, ordering: Ordering) {
×
UNCOV
1281
        self.reset_atomic(ordering)
×
1282
    }
1283
}
1284

1285
impl<W: Word + IntoAtomic, B> BitFieldSliceCore<W::AtomicType> for AtomicBitFieldVec<W, B> {
1286
    #[inline(always)]
1287
    fn bit_width(&self) -> usize {
184✔
1288
        debug_assert!(self.bit_width <= W::BITS);
368✔
1289
        self.bit_width
184✔
1290
    }
1291

1292
    #[inline(always)]
1293
    fn len(&self) -> usize {
404,824✔
1294
        self.len
404,824✔
1295
    }
1296
}
1297

1298
impl<W: Word + IntoAtomic, T: AsRef<[W::AtomicType]>> AtomicBitFieldSlice<W>
1299
    for AtomicBitFieldVec<W, T>
1300
where
1301
    W::AtomicType: AtomicUnsignedInt + AsBytes,
1302
{
1303
    #[inline]
1304
    unsafe fn get_atomic_unchecked(&self, index: usize, order: Ordering) -> W {
404,825✔
1305
        let pos = index * self.bit_width;
809,650✔
1306
        let word_index = pos / W::BITS;
809,650✔
1307
        let bit_index = pos % W::BITS;
809,650✔
1308
        let bits = self.bits.as_ref();
1,214,475✔
1309

1310
        unsafe {
1311
            if bit_index + self.bit_width <= W::BITS {
404,825✔
1312
                (bits.get_unchecked(word_index).load(order) >> bit_index) & self.mask
939,176✔
1313
            } else {
1314
                ((bits.get_unchecked(word_index).load(order) >> bit_index)
170,031✔
1315
                    | (bits.get_unchecked(word_index + 1).load(order) << (W::BITS - bit_index)))
170,031✔
1316
                    & self.mask
170,031✔
1317
            }
1318
        }
1319
    }
1320

1321
    // We reimplement set as we have the mask in the structure.
1322

1323
    /// Sets the element of the slice at the specified index.
1324
    ///
1325
    /// May panic if the index is not in in [0..[len](`BitFieldSliceCore::len`))
1326
    /// or the value does not fit in [`BitFieldSliceCore::bit_width`] bits.
1327
    #[inline(always)]
1328
    fn set_atomic(&self, index: usize, value: W, order: Ordering) {
184,005✔
1329
        panic_if_out_of_bounds!(index, self.len);
552,015✔
1330
        panic_if_value!(value, self.mask, self.bit_width);
552,012✔
1331
        unsafe {
1332
            self.set_atomic_unchecked(index, value, order);
184,003✔
1333
        }
1334
    }
1335

1336
    #[inline]
1337
    unsafe fn set_atomic_unchecked(&self, index: usize, value: W, order: Ordering) {
451,171✔
1338
        unsafe {
1339
            debug_assert!(self.bit_width != W::BITS);
902,342✔
1340
            let pos = index * self.bit_width;
902,342✔
1341
            let word_index = pos / W::BITS;
902,342✔
1342
            let bit_index = pos % W::BITS;
902,342✔
1343
            let bits = self.bits.as_ref();
1,353,513✔
1344

1345
            if bit_index + self.bit_width <= W::BITS {
451,171✔
1346
                // this is consistent
1347
                let mut current = bits.get_unchecked(word_index).load(order);
1,300,230✔
UNCOV
1348
                loop {
×
1349
                    let mut new = current;
520,122✔
1350
                    new &= !(self.mask << bit_index);
520,122✔
1351
                    new |= value << bit_index;
520,122✔
1352

1353
                    match bits
780,183✔
1354
                        .get_unchecked(word_index)
520,122✔
1355
                        .compare_exchange(current, new, order, order)
1,040,244✔
1356
                    {
1357
                        Ok(_) => break,
260,046✔
1358
                        Err(e) => current = e,
15✔
1359
                    }
1360
                }
1361
            } else {
1362
                let mut word = bits.get_unchecked(word_index).load(order);
191,125✔
1363
                // try to wait for the other thread to finish
1364
                fence(Ordering::Acquire);
×
UNCOV
1365
                loop {
×
1366
                    let mut new = word;
382,250✔
1367
                    new &= (W::ONE << bit_index) - W::ONE;
191,125✔
1368
                    new |= value << bit_index;
382,250✔
1369

1370
                    match bits
573,375✔
1371
                        .get_unchecked(word_index)
382,250✔
1372
                        .compare_exchange(word, new, order, order)
764,500✔
1373
                    {
1374
                        Ok(_) => break,
×
UNCOV
1375
                        Err(e) => word = e,
×
1376
                    }
1377
                }
1378
                fence(Ordering::Release);
×
1379

1380
                // ensures that the compiler does not reorder the two atomic operations
1381
                // this should increase the probability of having consistency
1382
                // between two concurrent writes as they will both execute the set
1383
                // of the bits in the same order, and the release / acquire fence
1384
                // should try to synchronize the threads as much as possible
UNCOV
1385
                compiler_fence(Ordering::SeqCst);
×
1386

1387
                let mut word = bits.get_unchecked(word_index + 1).load(order);
×
1388
                fence(Ordering::Acquire);
×
UNCOV
1389
                loop {
×
1390
                    let mut new = word;
382,260✔
1391
                    new &= !(self.mask >> (W::BITS - bit_index));
382,260✔
1392
                    new |= value >> (W::BITS - bit_index);
382,260✔
1393

1394
                    match bits
573,390✔
1395
                        .get_unchecked(word_index + 1)
382,260✔
1396
                        .compare_exchange(word, new, order, order)
764,520✔
1397
                    {
UNCOV
1398
                        Ok(_) => break,
×
1399
                        Err(e) => word = e,
5✔
1400
                    }
1401
                }
UNCOV
1402
                fence(Ordering::Release);
×
1403
            }
1404
        }
1405
    }
1406

1407
    fn reset_atomic(&mut self, ordering: Ordering) {
1✔
1408
        let bit_len = self.len * self.bit_width;
2✔
1409
        let full_words = bit_len / W::BITS;
2✔
1410
        let residual = bit_len % W::BITS;
2✔
1411
        let bits = self.bits.as_ref();
3✔
1412
        bits[..full_words]
1✔
1413
            .iter()
1414
            .for_each(|x| x.store(W::ZERO, ordering));
22✔
1415
        if residual != 0 {
2✔
1416
            bits[full_words].fetch_and(W::MAX << residual, ordering);
3✔
1417
        }
1418
    }
1419

1420
    #[cfg(feature = "rayon")]
1421
    fn par_reset_atomic(&mut self, ordering: Ordering) {
1✔
1422
        let bit_len = self.len * self.bit_width;
2✔
1423
        let full_words = bit_len / W::BITS;
2✔
1424
        let residual = bit_len % W::BITS;
2✔
1425
        let bits = self.bits.as_ref();
3✔
1426
        bits[..full_words]
1✔
1427
            .par_iter()
1428
            .with_min_len(RAYON_MIN_LEN)
1✔
1429
            .for_each(|x| x.store(W::ZERO, ordering));
22✔
1430
        if residual != 0 {
2✔
1431
            bits[full_words].fetch_and(W::MAX << residual, ordering);
3✔
1432
        }
1433
    }
1434
}
1435

1436
// Conversions
1437

1438
impl<W: Word + IntoAtomic> From<AtomicBitFieldVec<W, Vec<W::AtomicType>>>
1439
    for BitFieldVec<W, Vec<W>>
1440
{
1441
    #[inline]
1442
    fn from(value: AtomicBitFieldVec<W, Vec<W::AtomicType>>) -> Self {
470✔
1443
        BitFieldVec {
1444
            bits: transmute_vec_from_atomic(value.bits),
1,410✔
1445
            len: value.len,
940✔
1446
            bit_width: value.bit_width,
470✔
1447
            mask: value.mask,
470✔
1448
        }
1449
    }
1450
}
1451

1452
impl<W: Word + IntoAtomic> From<AtomicBitFieldVec<W, Box<[W::AtomicType]>>>
1453
    for BitFieldVec<W, Box<[W]>>
1454
{
1455
    #[inline]
1456
    fn from(value: AtomicBitFieldVec<W, Box<[W::AtomicType]>>) -> Self {
1✔
1457
        BitFieldVec {
1458
            bits: transmute_boxed_slice_from_atomic(value.bits),
3✔
1459

1460
            len: value.len,
2✔
1461
            bit_width: value.bit_width,
1✔
1462
            mask: value.mask,
1✔
1463
        }
1464
    }
1465
}
1466

1467
impl<'a, W: Word + IntoAtomic> From<AtomicBitFieldVec<W, &'a [W::AtomicType]>>
1468
    for BitFieldVec<W, &'a [W]>
1469
{
1470
    #[inline]
1471
    fn from(value: AtomicBitFieldVec<W, &'a [W::AtomicType]>) -> Self {
1✔
1472
        BitFieldVec {
1473
            bits: unsafe { core::mem::transmute::<&'a [W::AtomicType], &'a [W]>(value.bits) },
2✔
1474
            len: value.len,
2✔
1475
            bit_width: value.bit_width,
1✔
1476
            mask: value.mask,
1✔
1477
        }
1478
    }
1479
}
1480

1481
impl<'a, W: Word + IntoAtomic> From<AtomicBitFieldVec<W, &'a mut [W::AtomicType]>>
1482
    for BitFieldVec<W, &'a mut [W]>
1483
{
1484
    #[inline]
1485
    fn from(value: AtomicBitFieldVec<W, &'a mut [W::AtomicType]>) -> Self {
1✔
1486
        BitFieldVec {
1487
            bits: unsafe {
1✔
1488
                core::mem::transmute::<&'a mut [W::AtomicType], &'a mut [W]>(value.bits)
1489
            },
1490
            len: value.len,
2✔
1491
            bit_width: value.bit_width,
1✔
1492
            mask: value.mask,
1✔
1493
        }
1494
    }
1495
}
1496

1497
impl<W: Word + IntoAtomic> From<BitFieldVec<W, Vec<W>>>
1498
    for AtomicBitFieldVec<W, Vec<W::AtomicType>>
1499
{
1500
    #[inline]
1501
    fn from(value: BitFieldVec<W, Vec<W>>) -> Self {
369✔
1502
        AtomicBitFieldVec {
1503
            bits: transmute_vec_into_atomic(value.bits),
1,107✔
1504

1505
            len: value.len,
738✔
1506
            bit_width: value.bit_width,
369✔
1507
            mask: value.mask,
369✔
1508
        }
1509
    }
1510
}
1511

1512
impl<W: Word + IntoAtomic> From<BitFieldVec<W, Box<[W]>>>
1513
    for AtomicBitFieldVec<W, Box<[W::AtomicType]>>
1514
{
1515
    #[inline]
1516
    fn from(value: BitFieldVec<W, Box<[W]>>) -> Self {
1✔
1517
        AtomicBitFieldVec {
1518
            bits: transmute_boxed_slice_into_atomic(value.bits),
3✔
1519
            len: value.len,
2✔
1520
            bit_width: value.bit_width,
1✔
1521
            mask: value.mask,
1✔
1522
        }
1523
    }
1524
}
1525

1526
impl<'a, W: Word + IntoAtomic> TryFrom<BitFieldVec<W, &'a [W]>>
1527
    for AtomicBitFieldVec<W, &'a [W::AtomicType]>
1528
{
1529
    type Error = CannotCastToAtomicError<W>;
1530

1531
    #[inline]
1532
    fn try_from(value: BitFieldVec<W, &'a [W]>) -> Result<Self, Self::Error> {
1✔
1533
        if core::mem::align_of::<W::AtomicType>() != core::mem::align_of::<W>() {
1✔
UNCOV
1534
            return Err(CannotCastToAtomicError::default());
×
1535
        }
UNCOV
1536
        Ok(AtomicBitFieldVec {
×
UNCOV
1537
            bits: unsafe { core::mem::transmute::<&'a [W], &'a [W::AtomicType]>(value.bits) },
×
UNCOV
1538
            len: value.len,
×
UNCOV
1539
            bit_width: value.bit_width,
×
UNCOV
1540
            mask: value.mask,
×
1541
        })
1542
    }
1543
}
1544

1545
impl<'a, W: Word + IntoAtomic> TryFrom<BitFieldVec<W, &'a mut [W]>>
1546
    for AtomicBitFieldVec<W, &'a mut [W::AtomicType]>
1547
{
1548
    type Error = CannotCastToAtomicError<W>;
1549

1550
    #[inline]
1551
    fn try_from(value: BitFieldVec<W, &'a mut [W]>) -> Result<Self, Self::Error> {
1✔
1552
        if core::mem::align_of::<W::AtomicType>() != core::mem::align_of::<W>() {
1✔
UNCOV
1553
            return Err(CannotCastToAtomicError::default());
×
1554
        }
UNCOV
1555
        Ok(AtomicBitFieldVec {
×
UNCOV
1556
            bits: unsafe {
×
UNCOV
1557
                core::mem::transmute::<&'a mut [W], &'a mut [W::AtomicType]>(value.bits)
×
1558
            },
UNCOV
1559
            len: value.len,
×
UNCOV
1560
            bit_width: value.bit_width,
×
UNCOV
1561
            mask: value.mask,
×
1562
        })
1563
    }
1564
}
1565

1566
impl<W: Word> From<BitFieldVec<W, Vec<W>>> for BitFieldVec<W, Box<[W]>> {
1567
    fn from(value: BitFieldVec<W, Vec<W>>) -> Self {
96✔
1568
        BitFieldVec {
1569
            bits: value.bits.into_boxed_slice(),
288✔
1570
            len: value.len,
192✔
1571
            bit_width: value.bit_width,
96✔
1572
            mask: value.mask,
96✔
1573
        }
1574
    }
1575
}
1576

1577
impl<W: Word> From<BitFieldVec<W, Box<[W]>>> for BitFieldVec<W, Vec<W>> {
1578
    fn from(value: BitFieldVec<W, Box<[W]>>) -> Self {
1✔
1579
        BitFieldVec {
1580
            bits: value.bits.into_vec(),
3✔
1581
            len: value.len,
2✔
1582
            bit_width: value.bit_width,
1✔
1583
            mask: value.mask,
1✔
1584
        }
1585
    }
1586
}
1587

1588
impl<W: Word, B: AsRef<[W]>> value_traits::slices::SliceByValue for BitFieldVec<W, B> {
1589
    type Value = W;
1590

1591
    fn len(&self) -> usize {
×
UNCOV
1592
        <Self as BitFieldSliceCore<W>>::len(self)
×
1593
    }
1594
}
1595

1596
impl<W: Word, B: AsRef<[W]>> value_traits::slices::SliceByValueGet for BitFieldVec<W, B> {
UNCOV
1597
    unsafe fn get_value_unchecked(&self, index: usize) -> Self::Value {
×
UNCOV
1598
        unsafe { <Self as BitFieldSlice<W>>::get_unchecked(self, index) }
×
1599
    }
1600
}
1601

1602
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> value_traits::slices::SliceByValueSet
1603
    for BitFieldVec<W, B>
1604
{
UNCOV
1605
    unsafe fn set_value_unchecked(&mut self, index: usize, value: Self::Value) {
×
UNCOV
1606
        unsafe { <Self as BitFieldSliceMut<W>>::set_unchecked(self, index, value) };
×
1607
    }
1608
}
1609

1610
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> value_traits::slices::SliceByValueRepl
1611
    for BitFieldVec<W, B>
1612
{
UNCOV
1613
    unsafe fn replace_value_unchecked(&mut self, index: usize, value: Self::Value) -> Self::Value {
×
1614
        unsafe { self.replace_unchecked(index, value) }
×
1615
    }
1616
}
1617

1618
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueGat<'a> for BitFieldVec<W, B> {
1619
    type Item = W;
1620
    type Iter = BitFieldVecIterator<'a, W, B>;
1621
}
1622

1623
impl<W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValue for BitFieldVec<W, B> {
UNCOV
1624
    fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
×
UNCOV
1625
        self.iter_from(0)
×
1626
    }
1627
}
1628

1629
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFromGat<'a>
1630
    for BitFieldVec<W, B>
1631
{
1632
    type Item = W;
1633
    type IterFrom = BitFieldVecIterator<'a, W, B>;
1634
}
1635

1636
impl<W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFrom for BitFieldVec<W, B> {
UNCOV
1637
    fn iter_value_from(
×
1638
        &self,
1639
        from: usize,
1640
    ) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
UNCOV
1641
        self.iter_from(from)
×
1642
    }
1643
}
1644

1645
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueGat<'a>
1646
    for BitFieldVecSubsliceImpl<'b, W, B>
1647
{
1648
    type Item = W;
1649
    type Iter = BitFieldVecIterator<'a, W, B>;
1650
}
1651

1652
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValue
1653
    for BitFieldVecSubsliceImpl<'a, W, B>
1654
{
UNCOV
1655
    fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
×
UNCOV
1656
        self.slice.iter_from(0)
×
1657
    }
1658
}
1659

1660
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFromGat<'a>
1661
    for BitFieldVecSubsliceImpl<'b, W, B>
1662
{
1663
    type Item = W;
1664
    type IterFrom = BitFieldVecIterator<'a, W, B>;
1665
}
1666

1667
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFrom
1668
    for BitFieldVecSubsliceImpl<'a, W, B>
1669
{
UNCOV
1670
    fn iter_value_from(
×
1671
        &self,
1672
        from: usize,
1673
    ) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
UNCOV
1674
        self.slice.iter_from(from)
×
1675
    }
1676
}
1677

1678
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueGat<'a>
1679
    for BitFieldVecSubsliceImplMut<'b, W, B>
1680
{
1681
    type Item = W;
1682
    type Iter = BitFieldVecIterator<'a, W, B>;
1683
}
1684

1685
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValue
1686
    for BitFieldVecSubsliceImplMut<'a, W, B>
1687
{
UNCOV
1688
    fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
×
UNCOV
1689
        self.slice.iter_from(0)
×
1690
    }
1691
}
1692

1693
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFromGat<'a>
1694
    for BitFieldVecSubsliceImplMut<'b, W, B>
1695
{
1696
    type Item = W;
1697
    type IterFrom = BitFieldVecIterator<'a, W, B>;
1698
}
1699

1700
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFrom
1701
    for BitFieldVecSubsliceImplMut<'a, W, B>
1702
{
UNCOV
1703
    fn iter_value_from(
×
1704
        &self,
1705
        from: usize,
1706
    ) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
UNCOV
1707
        self.slice.iter_from(from)
×
1708
    }
1709
}
1710

1711
#[cfg(test)]
1712
mod tests {
1713
    use super::*;
1714

1715
    #[test]
1716
    fn test_with_capacity() {
1717
        let mut b = BitFieldVec::<usize, _>::with_capacity(10, 100);
1718
        let capacity = b.bits.capacity();
1719
        for _ in 0..100 {
1720
            b.push(0);
1721
        }
1722
        assert_eq!(b.bits.capacity(), capacity);
1723
    }
1724

1725
    fn copy<W: Word, B: AsRef<[W]>, C: AsRef<[W]> + AsMut<[W]>>(
1726
        source: &BitFieldVec<W, B>,
1727
        from: usize,
1728
        dest: &mut BitFieldVec<W, C>,
1729
        to: usize,
1730
        len: usize,
1731
    ) {
1732
        let len = Ord::min(Ord::min(len, dest.len - to), source.len - from);
1733
        for i in 0..len {
1734
            dest.set(to + i, source.get(from + i));
1735
        }
1736
    }
1737

1738
    #[test]
1739
    fn test_copy() {
1740
        for src_pattern in 0..8 {
1741
            for dst_pattern in 0..8 {
1742
                // if from_first_word == from_last_word && to_first_word == to_last_word
1743
                let source = bit_field_vec![3 => src_pattern; 100];
1744
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1745
                let mut dest_expected = dest_actual.clone();
1746
                source.copy(1, &mut dest_actual, 2, 10);
1747
                copy(&source, 1, &mut dest_expected, 2, 10);
1748
                assert_eq!(dest_actual, dest_expected);
1749
                // else if from_first_word == from_last_word
1750
                let source = bit_field_vec![3 => src_pattern; 100];
1751
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1752
                let mut dest_expected = dest_actual.clone();
1753
                source.copy(1, &mut dest_actual, 20, 10);
1754
                copy(&source, 1, &mut dest_expected, 20, 10);
1755
                assert_eq!(dest_actual, dest_expected);
1756
                // else if to_first_word == to_last_word
1757
                let source = bit_field_vec![3 => src_pattern; 100];
1758
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1759
                let mut dest_expected = dest_actual.clone();
1760
                source.copy(20, &mut dest_actual, 1, 10);
1761
                copy(&source, 20, &mut dest_expected, 1, 10);
1762
                assert_eq!(dest_actual, dest_expected);
1763
                // else if src_bit == dest_bit (residual = 1)
1764
                let source = bit_field_vec![3 => src_pattern; 1000];
1765
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1766
                let mut dest_expected = dest_actual.clone();
1767
                source.copy(3, &mut dest_actual, 3 + 3 * 128, 40);
1768
                copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 40);
1769
                assert_eq!(dest_actual, dest_expected);
1770
                // else if src_bit == dest_bit (residual = 64)
1771
                let source = bit_field_vec![3 => src_pattern; 1000];
1772
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1773
                let mut dest_expected = dest_actual.clone();
1774
                source.copy(3, &mut dest_actual, 3 + 3 * 128, 61);
1775
                copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 61);
1776
                assert_eq!(dest_actual, dest_expected);
1777
                // else if src_bit < dest_bit (residual = 1)
1778
                let source = bit_field_vec![3 => src_pattern; 1000];
1779
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1780
                let mut dest_expected = dest_actual.clone();
1781
                source.copy(3, &mut dest_actual, 7 + 64 * 3, 40);
1782
                copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40);
1783
                assert_eq!(dest_actual, dest_expected);
1784
                // else if src_bit < dest_bit (residual = 64)
1785
                let source = bit_field_vec![3 => src_pattern; 1000];
1786
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1787
                let mut dest_expected = dest_actual.clone();
1788
                source.copy(3, &mut dest_actual, 7 + 64 * 3, 40 + 17);
1789
                copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40 + 17);
1790
                assert_eq!(dest_actual, dest_expected);
1791
                // else if src_bit > dest_bit (residual = 1)
1792
                let source = bit_field_vec![3 => src_pattern; 1000];
1793
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1794
                let mut dest_expected = dest_actual.clone();
1795
                source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 64);
1796
                copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 64);
1797
                assert_eq!(dest_actual, dest_expected);
1798
                // else if src_bit > dest_bit (residual = 64)
1799
                let source = bit_field_vec![3 => src_pattern; 1000];
1800
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1801
                let mut dest_expected = dest_actual.clone();
1802
                source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 21 + 64);
1803
                copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 21 + 64);
1804
                assert_eq!(dest_actual, dest_expected);
1805
            }
1806
        }
1807
    }
1808
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc