• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vigna / sux-rs / 17511253202

06 Sep 2025 06:40AM UTC coverage: 66.333% (-0.06%) from 66.388%
17511253202

push

github

vigna
Updated crates

3576 of 5391 relevant lines covered (66.33%)

122200768.21 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

68.05
/src/bits/bit_field_vec.rs
1
/*
2
 * SPDX-FileCopyrightText: 2023 Inria
3
 * SPDX-FileCopyrightText: 2023 Sebastiano Vigna
4
 *
5
 * SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
6
 */
7

8
//! Vectors of values of fixed bit width.
9
//!
10
//! Elements are stored contiguously, with no padding bits (in particular,
11
//! unless the bit width is a power of two some elements will be stored across
12
//! word boundaries).
13
//!
14
//! There are two flavors: [`BitFieldVec`], a mutable bit-field vector, and
15
//! [`AtomicBitFieldVec`], a mutable, thread-safe bit-field vector.
16
//!
17
//! These flavors depends on a backend, and presently we provide, given an
18
//! unsigned integer type `W` or an unsigned atomic integer type `A`:
19
//!
20
//! - `BitFieldVec<Vec<T>>`: a mutable, growable and resizable bit-field vector;
21
//! - `BitFieldVec<AsRef<[W]>>`: an immutable bit-field vector, useful for
22
//!   [ε-serde](epserde) support;
23
//! - `BitFieldVec<AsRef<[W]> + AsMut<[W]>>`: a mutable (but not resizable) bit
24
//!   vector;
25
//! - `AtomicBitFieldVec<AsRef<[A]>>`: a partially thread-safe, mutable (but not
26
//!   resizable) bit-field vector.
27
//!
28
//! More generally, the underlying type must satisfy the trait [`Word`] for
29
//! [`BitFieldVec`] and additionally [`IntoAtomic`] for [`AtomicBitFieldVec`].
30
//! A blanket implementation exposes slices of elements of type `W` as bit-field
31
//! vectors of width `W::BITS`, analogously for atomic types `A`.
32
//!
33
//! Note that nothing is assumed about the content of the backend outside the
34
//! bits of the vector. Moreover, the content of the backend outside of the
35
//! vector is never modified by the methods of this structure.
36
//!
37
//! For high-speed unchecked scanning, we implement [`IntoUncheckedIterator`]
38
//! and [`IntoReverseUncheckedIterator`] on a reference to this type. The are
39
//! used, for example, to provide
40
//! [predecessor](crate::traits::indexed_dict::Pred) and
41
//! [successor](crate::traits::indexed_dict::Succ) primitives for
42
//! [`EliasFano`].
43
//!
44
//! # Low-level support
45
//!
46
//! The methods [`address_of`](BitFieldVec::addr_of) and
47
//! [`get_unaligned`](BitFieldVec::get_unaligned) can be used to manually
48
//! prefetch parts of the data structure, or read values using unaligned read,
49
//! when the bit width makes it possible.
50
//!
51
//! # Examples
52
//! ```rust
53
//! use sux::prelude::*;
54
//!
55
//! // Bit field vector of bit width 5 and length 10, all entries set to zero
56
//! let mut b = <BitFieldVec<usize>>::new(5, 10);
57
//! assert_eq!(b.len(), 10);
58
//! assert_eq!(b.bit_width(), 5);
59
//! b.set(0, 3);
60
//! assert_eq!(b.get(0), 3);
61
//!
62
//! // Empty bit field vector of bit width 20 with capacity 10
63
//! let mut b = <BitFieldVec<usize>>::with_capacity(20, 10);
64
//! assert_eq!(b.len(), 0);
65
//! assert_eq!(b.bit_width(), 20);
66
//! b.push(20);
67
//! assert_eq!(b.len(), 1);
68
//! assert_eq!(b.get(0), 20);
69
//! assert_eq!(b.pop(), Some(20));
70
//!
71
//! // Convenience macro
72
//! let b = bit_field_vec![10; 4, 500, 2, 0, 1];
73
//! assert_eq!(b.len(), 5);
74
//! assert_eq!(b.bit_width(), 10);
75
//! assert_eq!(b.get(0), 4);
76
//! assert_eq!(b.get(1), 500);
77
//! assert_eq!(b.get(2), 2);
78
//! assert_eq!(b.get(3), 0);
79
//! assert_eq!(b.get(4), 1);
80
//! ```
81

82
use crate::prelude::*;
83
use crate::traits::bit_field_slice::{panic_if_out_of_bounds, panic_if_value};
84
use crate::utils::{transmute_boxed_slice, transmute_vec};
85
#[cfg(feature = "rayon")]
86
use crate::RAYON_MIN_LEN;
87
use anyhow::{bail, Result};
88
use common_traits::{
89
    invariant_eq, AsBytes, Atomic, AtomicInteger, AtomicUnsignedInt, CastableInto, IntoAtomic,
90
};
91
use epserde::*;
92
use mem_dbg::*;
93
#[cfg(feature = "rayon")]
94
use rayon::prelude::*;
95
use std::sync::atomic::{compiler_fence, fence, Ordering};
96

97
/// Convenient, [`vec!`]-like macro to initialize `usize`-based bit-field
98
/// vectors.
99
///
100
/// Note that the syntax `bit_field_vec![width; length; value]` that has been
101
/// deprecated in favor of `bit_field_vec![width => value; length]`, so that
102
/// value and length are in the same order as in [`vec!`].
103
///
104
/// # Examples
105
///
106
/// ```rust
107
/// use sux::prelude::*;
108
///
109
/// // Empty bit field vector of bit width 5
110
/// let b = bit_field_vec![5];
111
/// assert_eq!(b.len(), 0);
112
/// assert_eq!(b.bit_width(), 5);
113
///
114
/// // 10 values of bit width 6, all set to 3
115
/// let b = bit_field_vec![6 => 3; 10];
116
/// assert_eq!(b.len(), 10);
117
/// assert_eq!(b.bit_width(), 6);
118
/// assert_eq!(b.iter().all(|x| x == 3), true);
119
///
120
/// // List of values of bit width 10
121
/// let b = bit_field_vec![10; 4, 500, 2, 0, 1];
122
/// assert_eq!(b.len(), 5);
123
/// assert_eq!(b.bit_width(), 10);
124
/// assert_eq!(b.get(0), 4);
125
/// assert_eq!(b.get(1), 500);
126
/// assert_eq!(b.get(2), 2);
127
/// assert_eq!(b.get(3), 0);
128
/// assert_eq!(b.get(4), 1);
129
/// ```
130
#[macro_export]
131
macro_rules! bit_field_vec {
132
    ($w:expr) => {
133
        $crate::bits::BitFieldVec::<usize, _>::new($w, 0)
134
    };
135
    ($w:expr; $n:expr; $v:expr) => {
136
        {
137
            let mut bit_field_vec = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, $n);
138
            // Force type
139
            let v: usize = $v;
140
            bit_field_vec.resize($n, v);
141
            bit_field_vec
142
        }
143
    };
144
    ($w:expr => $v:expr; $n:expr) => {
145
        {
146
            let mut bit_field_vec = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, $n);
147
            // Force type
148
            let v: usize = $v;
149
            bit_field_vec.resize($n, v);
150
            bit_field_vec
151
        }
152
    };
153
    ($w:expr; $($x:expr),+ $(,)?) => {
154
        {
155
            let mut b = $crate::bits::BitFieldVec::<usize, _>::with_capacity($w, [$($x),+].len());
156
            $(
157
                // Force type
158
                let x: usize = $x;
159
                b.push(x);
160
            )*
161
            b
162
        }
163
    };
164
}
165

166
/// A vector of bit fields of fixed width.
167
#[derive(Epserde, Debug, Clone, Hash, MemDbg, MemSize, value_traits::Subslices)]
168
#[value_traits_subslices(bound = "B: AsRef<[W]>")]
169
#[derive(value_traits::SubslicesMut)]
170
#[value_traits_subslices_mut(bound = "B: AsRef<[W]> + AsMut<[W]>")]
171
pub struct BitFieldVec<W: Word = usize, B = Vec<W>> {
172
    /// The underlying storage.
173
    bits: B,
174
    /// The bit width of the values stored in the vector.
175
    bit_width: usize,
176
    /// A mask with its lowest `bit_width` bits set to one.
177
    mask: W,
178
    /// The length of the vector.
179
    len: usize,
180
}
181

182
fn mask<W: Word>(bit_width: usize) -> W {
9,661✔
183
    if bit_width == 0 {
9,661✔
184
        W::ZERO
192✔
185
    } else {
186
        W::MAX >> (W::BITS - bit_width)
9,469✔
187
    }
188
}
189

190
impl<W: Word, B> BitFieldVec<W, B> {
191
    /// # Safety
192
    /// `len` * `bit_width` must be between 0 (included) the number of
193
    /// bits in `bits` (included).
194
    #[inline(always)]
195
    pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
3,209✔
196
        Self {
197
            bits,
198
            bit_width,
199
            mask: mask(bit_width),
6,418✔
200
            len,
201
        }
202
    }
203

204
    #[inline(always)]
205
    pub fn into_raw_parts(self) -> (B, usize, usize) {
3,122✔
206
        (self.bits, self.bit_width, self.len)
6,244✔
207
    }
208

209
    #[inline(always)]
210
    /// Modifies the bit field in place.
211
    ///
212
    /// # Safety
213
    /// This is unsafe because it's the caller's responsibility to ensure that
214
    /// that the length is compatible with the modified bits.
215
    pub unsafe fn map<W2: Word, B2>(self, f: impl FnOnce(B) -> B2) -> BitFieldVec<W2, B2> {
×
216
        BitFieldVec {
217
            bits: f(self.bits),
×
218
            bit_width: self.bit_width,
×
219
            mask: mask(self.bit_width),
×
220
            len: self.len,
×
221
        }
222
    }
223
}
224

225
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {
226
    /// Gets the address of the item storing (the first part of)
227
    /// the element of given index.
228
    ///
229
    /// This method is mainly useful for manually prefetching
230
    /// parts of the data structure.
231
    pub fn addr_of(&self, index: usize) -> *const W {
4✔
232
        let start_bit = index * self.bit_width;
8✔
233
        let word_index = start_bit / W::BITS;
8✔
234
        (&self.bits.as_ref()[word_index]) as *const _
8✔
235
    }
236

237
    /// Like [`BitFieldSlice::get`], but using unaligned reads.
238
    ///
239
    /// This method can be used only for bit width smaller than or equal to
240
    /// `W::BITS - 8 + 2` or equal to `W::BITS - 8 + 4` or `W::BITS`. Moreover,
241
    /// an additional padding word must be present at the end of the vector.
242
    ///
243
    /// Note that to guarantee the absence of undefined behavior this method
244
    /// has to perform several tests. Consider using
245
    /// [`get_unaligned_unchecked`](Self::get_unaligned_unchecked) if you are
246
    /// sure that the constraints are respected.
247
    ///
248
    /// # Panics
249
    ///
250
    /// This method will panic if the constraints above are not respected.
251
    pub fn get_unaligned(&self, index: usize) -> W {
65✔
252
        assert!(
61✔
253
            self.bit_width <= W::BITS - 8 + 2
65✔
254
                || self.bit_width == W::BITS - 8 + 4
24✔
255
                || self.bit_width == W::BITS
14✔
256
        );
257
        panic_if_out_of_bounds!(index, self.len);
183✔
258
        assert!(
60✔
259
            (index * self.bit_width) / W::BYTES + W::BYTES <= self.bits.as_ref().len() * W::BYTES
122✔
260
        );
261
        unsafe { self.get_unaligned_unchecked(index) }
180✔
262
    }
263

264
    /// Like [`BitFieldSlice::get_unchecked`], but using unaligned reads.
265
    ///
266
    /// # Safety
267
    ///
268
    /// This method can be used only for bit width smaller than or equal to
269
    /// `W::BITS - 8 + 2` or equal to `W::BITS - 8 + 4` or `W::BITS`. Moreover,
270
    /// an additional padding word must be present at the end of the vector.
271
    ///
272
    /// # Panics
273
    ///
274
    /// This method will panic in debug mode if the safety constraints are not
275
    /// respected.
276
    pub unsafe fn get_unaligned_unchecked(&self, index: usize) -> W {
65✔
277
        debug_assert!(
65✔
278
            self.bit_width <= W::BITS - 8 + 2
65✔
279
                || self.bit_width == W::BITS - 8 + 4
24✔
280
                || self.bit_width == W::BITS
14✔
281
        );
282
        let base_ptr = self.bits.as_ref().as_ptr() as *const u8;
122✔
283
        let start_bit = index * self.bit_width;
122✔
284
        debug_assert!(start_bit / W::BYTES + W::BYTES <= self.bits.as_ref().len() * W::BYTES);
183✔
285
        let ptr = base_ptr.add(start_bit / 8) as *const W;
180✔
286
        let word = core::ptr::read_unaligned(ptr);
180✔
287
        (word >> (start_bit % 8)) & self.mask
60✔
288
    }
289

290
    /// Returns the backend of the vector as a slice of `W`.
291
    pub fn as_slice(&self) -> &[W] {
3✔
292
        self.bits.as_ref()
3✔
293
    }
294
}
295
/// An iterator over non-overlapping chunks of a bit-field vector, starting at
296
/// the beginning of the vector.
297
///
298
/// When the vector len is not evenly divided by the chunk size, the last chunk
299
/// of the iteration will be shorter.
300
///
301
/// This struct is created by the
302
/// [`try_chunks_mut`](crate::bits::bit_field_vec::BitFieldVec#impl-BitFieldSliceMut<W>-for-BitFieldVec<W,+B>)
303
/// method.
304
pub struct ChunksMut<'a, W: Word> {
305
    remaining: usize,
306
    bit_width: usize,
307
    chunk_size: usize,
308
    iter: std::slice::ChunksMut<'a, W>,
309
}
310

311
impl<'a, W: Word> Iterator for ChunksMut<'a, W> {
312
    type Item = BitFieldVec<W, &'a mut [W]>;
313

314
    #[inline]
315
    fn next(&mut self) -> Option<Self::Item> {
90✔
316
        self.iter.next().map(|chunk| {
359✔
317
            let size = Ord::min(self.chunk_size, self.remaining);
356✔
318
            let next = unsafe { BitFieldVec::from_raw_parts(chunk, self.bit_width, size) };
445✔
319
            self.remaining -= size;
89✔
320
            next
89✔
321
        })
322
    }
323
}
324

325
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {}
326

327
impl<W: Word> BitFieldVec<W, Vec<W>> {
328
    /// Creates a new zero-initialized vector of given bit width and length.
329
    pub fn new(bit_width: usize, len: usize) -> Self {
937✔
330
        // We need at least one word to handle the case of bit width zero.
331
        let n_of_words = Ord::max(1, (len * bit_width).div_ceil(W::BITS));
3,748✔
332
        Self {
333
            bits: vec![W::ZERO; n_of_words],
2,811✔
334
            bit_width,
335
            mask: mask(bit_width),
1,874✔
336
            len,
337
        }
338
    }
339

340
    /// Creates a new zero-initialized vector of given bit width and length,
341
    /// adding padding bits to the end of the vector so that unaligned reads are
342
    /// possible.
343
    ///
344
    /// Note that this convenience method is a one-off: if the vector is resized
345
    /// or expanded, the padding will be lost.
346
    pub fn new_unaligned(bit_width: usize, len: usize) -> Self {
88✔
347
        let n_of_words = (len * bit_width).div_ceil(W::BITS);
264✔
348
        Self {
349
            // We add a word at the end
350
            bits: vec![W::ZERO; n_of_words + 1],
264✔
351
            bit_width,
352
            mask: mask(bit_width),
176✔
353
            len,
354
        }
355
    }
356

357
    /// Creates an empty vector that doesn't need to reallocate for up to
358
    /// `capacity` elements.
359
    pub fn with_capacity(bit_width: usize, capacity: usize) -> Self {
1,163✔
360
        // We need at least one word to handle the case of bit width zero.
361
        let n_of_words = Ord::max(1, (capacity * bit_width).div_ceil(W::BITS));
4,652✔
362
        Self {
363
            bits: Vec::with_capacity(n_of_words),
3,489✔
364
            bit_width,
365
            mask: mask(bit_width),
1,163✔
366
            len: 0,
367
        }
368
    }
369

370
    /// Sets the length.
371
    ///
372
    /// # Safety
373
    ///
374
    /// `len * bit_width` must be at most `self.bits.len() * W::BITS`. Note that
375
    /// setting the length might result in reading uninitialized data.
376
    pub unsafe fn set_len(&mut self, len: usize) {
1✔
377
        debug_assert!(len * self.bit_width <= self.bits.len() * W::BITS);
3✔
378
        self.len = len;
1✔
379
    }
380

381
    /// Sets len to 0
382
    pub fn clear(&mut self) {
1✔
383
        self.len = 0;
1✔
384
    }
385

386
    /// Returns the bit width of the values inside the vector.
387
    pub fn bit_width(&self) -> usize {
315✔
388
        debug_assert!(self.bit_width <= W::BITS);
630✔
389
        self.bit_width
315✔
390
    }
391

392
    /// Returns the mask used to extract values from the vector.
393
    /// This will keep the lowest `bit_width` bits.
394
    pub fn mask(&self) -> W {
312✔
395
        self.mask
312✔
396
    }
397

398
    /// Creates a new vector by copying a slice; the bit width will be the minimum
399
    /// width sufficient to hold all values in the slice.
400
    ///
401
    /// Returns an error if the bit width of the values in `slice` is larger than
402
    /// `W::BITS`.
403
    pub fn from_slice<SW>(slice: &impl BitFieldSlice<SW>) -> Result<Self>
3✔
404
    where
405
        SW: Word + CastableInto<W>,
406
    {
407
        let mut max_len: usize = 0;
9✔
408
        for i in 0..slice.len() {
3,006✔
409
            max_len = Ord::max(max_len, unsafe { slice.get_unchecked(i).len() as usize });
×
410
        }
411

412
        if max_len > W::BITS {
3✔
413
            bail!(
1✔
414
                "Cannot convert a slice of bit width {} into a slice with W = {}",
1✔
415
                max_len,
×
416
                std::any::type_name::<W>()
1✔
417
            );
418
        }
419
        let mut result = Self::new(max_len, slice.len());
×
420
        for i in 0..slice.len() {
2,000✔
421
            unsafe { result.set_unchecked(i, slice.get_unchecked(i).cast()) };
×
422
        }
423

424
        Ok(result)
×
425
    }
426

427
    /// Adds a value at the end of the vector.
428
    pub fn push(&mut self, value: W) {
2,249✔
429
        panic_if_value!(value, self.mask, self.bit_width);
6,747✔
430
        if (self.len + 1) * self.bit_width > self.bits.len() * W::BITS {
4,896✔
431
            self.bits.push(W::ZERO);
398✔
432
        }
433
        unsafe {
434
            self.set_unchecked(self.len, value);
6,747✔
435
        }
436
        self.len += 1;
2,249✔
437
    }
438

439
    /// Truncates or extend with `value` the vector.
440
    pub fn resize(&mut self, new_len: usize, value: W) {
1,162✔
441
        panic_if_value!(value, self.mask, self.bit_width);
3,486✔
442
        if new_len > self.len {
1,162✔
443
            if new_len * self.bit_width > self.bits.len() * W::BITS {
3,483✔
444
                self.bits
2,322✔
445
                    .resize((new_len * self.bit_width).div_ceil(W::BITS), W::ZERO);
2,322✔
446
            }
447
            for i in self.len..new_len {
808,882✔
448
                unsafe {
449
                    self.set_unchecked(i, value);
×
450
                }
451
            }
452
        }
453
        self.len = new_len;
1,162✔
454
    }
455

456
    /// Removes and returns a value from the end of the vector.
457
    ///
458
    /// Returns None if the [`BitFieldVec`] is empty.
459
    pub fn pop(&mut self) -> Option<W> {
1,002✔
460
        if self.len == 0 {
1,002✔
461
            return None;
2✔
462
        }
463
        let value = self.get(self.len - 1);
×
464
        self.len -= 1;
×
465
        Some(value)
×
466
    }
467
}
468

469
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> BitFieldVec<W, B> {
470
    /// A version of [`BitFieldSliceMut::set`] that returns the previous value.
471
    ///
472
    /// # Panics
473
    /// - If `index` is out of bounds.
474
    /// - If `value` is not a valid value for the bit width of the vector.
475
    pub fn replace(&mut self, index: usize, value: W) -> Result<W> {
×
476
        panic_if_out_of_bounds!(index, self.len);
×
477
        panic_if_value!(value, self.mask, self.bit_width);
×
478
        Ok(unsafe { self.replace_unchecked(index, value) })
×
479
    }
480

481
    /// A version of [`BitFieldSliceMut::set_unchecked`] that returns the previous value,
482
    /// that doesn't check for out-of-bounds access or value validity.
483
    ///
484
    /// # Safety
485
    /// This method is unsafe because it does not check that `index` is within bounds
486
    pub unsafe fn replace_unchecked(&mut self, index: usize, value: W) -> W {
×
487
        let pos = index * self.bit_width;
×
488
        let word_index = pos / W::BITS;
×
489
        let bit_index = pos % W::BITS;
×
490
        let bits = self.bits.as_mut();
×
491

492
        if bit_index + self.bit_width <= W::BITS {
×
493
            let mut word = *bits.get_unchecked_mut(word_index);
×
494
            let old_value = (word >> bit_index) & self.mask;
×
495
            word &= !(self.mask << bit_index);
×
496
            word |= value << bit_index;
×
497
            *bits.get_unchecked_mut(word_index) = word;
×
498
            old_value
×
499
        } else {
500
            let mut word = *bits.get_unchecked_mut(word_index);
×
501
            let mut old_value = word >> bit_index;
×
502
            word &= (W::ONE << bit_index) - W::ONE;
×
503
            word |= value << bit_index;
×
504
            *bits.get_unchecked_mut(word_index) = word;
×
505

506
            let mut word = *bits.get_unchecked_mut(word_index + 1);
×
507
            old_value |= word << (W::BITS - bit_index);
×
508
            word &= !(self.mask >> (W::BITS - bit_index));
×
509
            word |= value >> (W::BITS - bit_index);
×
510
            *bits.get_unchecked_mut(word_index + 1) = word;
×
511
            old_value & self.mask
×
512
        }
513
    }
514
}
515

516
impl<W: Word, T> BitFieldSliceCore<W> for BitFieldVec<W, T> {
517
    #[inline(always)]
518
    fn bit_width(&self) -> usize {
13,798✔
519
        debug_assert!(self.bit_width <= W::BITS);
27,596✔
520
        self.bit_width
13,798✔
521
    }
522
    #[inline(always)]
523
    fn len(&self) -> usize {
65,860✔
524
        self.len
65,860✔
525
    }
526
}
527

528
impl<W: Word, B: AsRef<[W]>> BitFieldSlice<W> for BitFieldVec<W, B> {
529
    unsafe fn get_unchecked(&self, index: usize) -> W {
52,220,732✔
530
        let pos = index * self.bit_width;
104,441,464✔
531
        let word_index = pos / W::BITS;
104,441,464✔
532
        let bit_index = pos % W::BITS;
104,441,464✔
533
        let bits = self.bits.as_ref();
156,662,196✔
534

535
        if bit_index + self.bit_width <= W::BITS {
52,220,732✔
536
            (*bits.get_unchecked(word_index) >> bit_index) & self.mask
78,111,840✔
537
        } else {
538
            ((*bits.get_unchecked(word_index) >> bit_index)
13,164,812✔
539
                | (*bits.get_unchecked(word_index + 1) << (W::BITS - bit_index)))
13,164,812✔
540
                & self.mask
13,164,812✔
541
        }
542
    }
543
}
544

545
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> BitFieldSliceMut<W> for BitFieldVec<W, B> {
546
    #[inline(always)]
547
    fn mask(&self) -> W {
100,360✔
548
        self.mask
100,360✔
549
    }
550

551
    #[inline(always)]
552
    fn set(&mut self, index: usize, value: W) {
682,222✔
553
        panic_if_out_of_bounds!(index, self.len);
2,046,666✔
554
        panic_if_value!(value, self.mask, self.bit_width);
2,046,663✔
555
        unsafe {
556
            self.set_unchecked(index, value);
682,220✔
557
        }
558
    }
559

560
    unsafe fn set_unchecked(&mut self, index: usize, value: W) {
11,975,556✔
561
        let pos = index * self.bit_width;
23,951,112✔
562
        let word_index = pos / W::BITS;
23,951,112✔
563
        let bit_index = pos % W::BITS;
23,951,112✔
564
        let bits = self.bits.as_mut();
35,926,668✔
565

566
        if bit_index + self.bit_width <= W::BITS {
21,019,526✔
567
            let mut word = *bits.get_unchecked_mut(word_index);
36,175,880✔
568
            word &= !(self.mask << bit_index);
27,131,910✔
569
            word |= value << bit_index;
27,131,910✔
570
            *bits.get_unchecked_mut(word_index) = word;
27,131,910✔
571
        } else {
572
            let mut word = *bits.get_unchecked_mut(word_index);
2,931,586✔
573
            word &= (W::ONE << bit_index) - W::ONE;
2,931,586✔
574
            word |= value << bit_index;
2,931,586✔
575
            *bits.get_unchecked_mut(word_index) = word;
2,931,586✔
576

577
            let mut word = *bits.get_unchecked_mut(word_index + 1);
2,931,586✔
578
            word &= !(self.mask >> (W::BITS - bit_index));
2,931,586✔
579
            word |= value >> (W::BITS - bit_index);
2,931,586✔
580
            *bits.get_unchecked_mut(word_index + 1) = word;
2,931,586✔
581
        }
582
    }
583

584
    fn reset(&mut self) {
1✔
585
        let bit_len = self.len * self.bit_width;
2✔
586
        let full_words = bit_len / W::BITS;
2✔
587
        let residual = bit_len % W::BITS;
2✔
588
        let bits = self.bits.as_mut();
3✔
589
        bits[..full_words].iter_mut().for_each(|x| *x = W::ZERO);
3✔
590
        if residual != 0 {
2✔
591
            bits[full_words] &= W::MAX << residual;
1✔
592
        }
593
    }
594

595
    #[cfg(feature = "rayon")]
596
    fn par_reset(&mut self) {
1✔
597
        let bit_len = self.len * self.bit_width;
2✔
598
        let full_words = bit_len / W::BITS;
2✔
599
        let residual = bit_len % W::BITS;
2✔
600
        let bits = self.bits.as_mut();
3✔
601
        bits[..full_words]
1✔
602
            .par_iter_mut()
603
            .with_min_len(RAYON_MIN_LEN)
1✔
604
            .for_each(|x| *x = W::ZERO);
1✔
605
        if residual != 0 {
2✔
606
            bits[full_words] &= W::MAX << residual;
1✔
607
        }
608
    }
609

610
    /// This implementation perform the copy word by word, which is
611
    /// significantly faster than the default implementation.
612
    fn copy(&self, from: usize, dst: &mut Self, to: usize, len: usize) {
576✔
613
        assert_eq!(
576✔
614
            self.bit_width, dst.bit_width,
×
615
            "Bit widths must be equal (self: {}, dest: {})",
×
616
            self.bit_width, dst.bit_width
×
617
        );
618
        // Reduce len to the elements available in both vectors
619
        let len = Ord::min(Ord::min(len, dst.len - to), self.len - from);
3,456✔
620
        if len == 0 {
576✔
621
            return;
×
622
        }
623
        let bit_width = Ord::min(self.bit_width, dst.bit_width);
×
624
        let bit_len = len * bit_width;
×
625
        let src_pos = from * self.bit_width;
×
626
        let dst_pos = to * dst.bit_width;
×
627
        let src_bit = src_pos % W::BITS;
×
628
        let dst_bit = dst_pos % W::BITS;
×
629
        let src_first_word = src_pos / W::BITS;
×
630
        let dst_first_word = dst_pos / W::BITS;
×
631
        let src_last_word = (src_pos + bit_len - 1) / W::BITS;
×
632
        let dst_last_word = (dst_pos + bit_len - 1) / W::BITS;
×
633
        let source = self.bits.as_ref();
×
634
        let dest = dst.bits.as_mut();
×
635

636
        if src_first_word == src_last_word && dst_first_word == dst_last_word {
192✔
637
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
638
            let word = (source[src_first_word] >> src_bit) & mask;
192✔
639
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
640
            dest[dst_first_word] |= word << dst_bit;
128✔
641
        } else if src_first_word == src_last_word {
640✔
642
            // dst_first_word != dst_last_word
643
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
644
            let word = (source[src_first_word] >> src_bit) & mask;
192✔
645
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
646
            dest[dst_first_word] |= (word & mask) << dst_bit;
192✔
647
            dest[dst_last_word] &= !(mask >> (W::BITS - dst_bit));
192✔
648
            dest[dst_last_word] |= (word & mask) >> (W::BITS - dst_bit);
128✔
649
        } else if dst_first_word == dst_last_word {
576✔
650
            // src_first_word != src_last_word
651
            let mask = W::MAX >> (W::BITS - bit_len);
192✔
652
            let word = ((source[src_first_word] >> src_bit)
256✔
653
                | (source[src_last_word] << (W::BITS - src_bit)))
128✔
654
                & mask;
128✔
655
            dest[dst_first_word] &= !(mask << dst_bit);
192✔
656
            dest[dst_first_word] |= word << dst_bit;
128✔
657
        } else if src_bit == dst_bit {
576✔
658
            // src_first_word != src_last_word && dst_first_word != dst_last_word
659
            let mask = W::MAX << dst_bit;
384✔
660
            dest[dst_first_word] &= !mask;
256✔
661
            dest[dst_first_word] |= source[src_first_word] & mask;
384✔
662

663
            dest[(1 + dst_first_word)..dst_last_word]
384✔
664
                .copy_from_slice(&source[(1 + src_first_word)..src_last_word]);
512✔
665

666
            let residual =
256✔
667
                bit_len - (W::BITS - src_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
384✔
668
            let mask = W::MAX >> (W::BITS - residual);
384✔
669
            dest[dst_last_word] &= !mask;
256✔
670
            dest[dst_last_word] |= source[src_last_word] & mask;
256✔
671
        } else if src_bit < dst_bit {
384✔
672
            // src_first_word != src_last_word && dst_first_word !=
673
            // dst_last_word
674
            let dst_mask = W::MAX << dst_bit;
256✔
675
            let src_mask = W::MAX << src_bit;
256✔
676
            let shift = dst_bit - src_bit;
256✔
677
            dest[dst_first_word] &= !dst_mask;
128✔
678
            dest[dst_first_word] |= (source[src_first_word] & src_mask) << shift;
256✔
679

680
            let mut word = source[src_first_word] >> (W::BITS - shift);
256✔
681
            for i in 1..dst_last_word - dst_first_word {
256✔
682
                dest[dst_first_word + i] = word | (source[src_first_word + i] << shift);
×
683
                word = source[src_first_word + i] >> (W::BITS - shift);
×
684
            }
685
            let residual =
128✔
686
                bit_len - (W::BITS - dst_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
256✔
687
            let mask = W::MAX >> (W::BITS - residual);
256✔
688
            dest[dst_last_word] &= !mask;
128✔
689
            dest[dst_last_word] |= source[src_last_word] & mask;
256✔
690
        } else {
691
            // src_first_word != src_last_word && dst_first_word !=
692
            // dst_last_word && src_bit > dst_bit
693
            let dst_mask = W::MAX << dst_bit;
128✔
694
            let src_mask = W::MAX << src_bit;
×
695
            let shift = src_bit - dst_bit;
×
696
            dest[dst_first_word] &= !dst_mask;
×
697
            dest[dst_first_word] |= (source[src_first_word] & src_mask) >> shift;
×
698
            dest[dst_first_word] |= source[src_first_word + 1] << (W::BITS - shift);
×
699

700
            let mut word = source[src_first_word + 1] >> shift;
×
701

702
            for i in 1..dst_last_word - dst_first_word {
512✔
703
                word |= source[src_first_word + i + 1] << (W::BITS - shift);
×
704
                dest[dst_first_word + i] = word;
×
705
                word = source[src_first_word + i + 1] >> shift;
×
706
            }
707

708
            word |= source[src_last_word] << (W::BITS - shift);
×
709

710
            let residual =
×
711
                bit_len - (W::BITS - dst_bit) - (dst_last_word - dst_first_word - 1) * W::BITS;
×
712
            let mask = W::MAX >> (W::BITS - residual);
×
713
            dest[dst_last_word] &= !mask;
×
714
            dest[dst_last_word] |= word & mask;
×
715
        }
716
    }
717

718
    /// This implementation keeps a buffer of `W::BITS` bits for reading and
719
    /// writing, obtaining a significant speedup with respect to the default
720
    /// implementation.
721
    #[inline]
722
    unsafe fn apply_in_place_unchecked<F>(&mut self, mut f: F)
1,200✔
723
    where
724
        F: FnMut(W) -> W,
725
        Self: BitFieldSlice<W>,
726
    {
727
        if self.is_empty() {
2,400✔
728
            return;
×
729
        }
730
        let bit_width = self.bit_width();
×
731
        if bit_width == 0 {
×
732
            return;
40✔
733
        }
734
        let mask = self.mask();
×
735
        let number_of_words: usize = self.bits.as_ref().len();
×
736
        let last_word_idx = number_of_words.saturating_sub(1);
×
737

738
        let mut write_buffer: W = W::ZERO;
×
739
        let mut read_buffer: W = *self.bits.as_ref().get_unchecked(0);
×
740

741
        // specialized case because it's much faster
742
        if bit_width.is_power_of_two() {
×
743
            let mut bits_in_buffer = 0;
360✔
744

745
            // TODO!: figure out how to simplify
746
            let mut buffer_limit = (self.len() * bit_width) % W::BITS;
540✔
747
            if buffer_limit == 0 {
260✔
748
                buffer_limit = W::BITS;
80✔
749
            }
750

751
            for read_idx in 1..number_of_words {
3,830✔
752
                // pre-load the next word so it loads while we parse the buffer
753
                let next_word: W = *self.bits.as_ref().get_unchecked(read_idx);
14,600✔
754

755
                // parse as much as we can from the buffer
756
                loop {
×
757
                    let next_bits_in_buffer = bits_in_buffer + bit_width;
41,380✔
758

759
                    if next_bits_in_buffer > W::BITS {
20,690✔
760
                        break;
3,650✔
761
                    }
762

763
                    let value = read_buffer & mask;
×
764
                    // throw away the bits we just read
765
                    read_buffer >>= bit_width;
×
766
                    // apply user func
767
                    let new_value = f(value);
×
768
                    // put the new value in the write buffer
769
                    write_buffer |= new_value << bits_in_buffer;
×
770

771
                    bits_in_buffer = next_bits_in_buffer;
×
772
                }
773

774
                invariant_eq!(read_buffer, W::ZERO);
10,950✔
775
                *self.bits.as_mut().get_unchecked_mut(read_idx - 1) = write_buffer;
3,650✔
776
                read_buffer = next_word;
×
777
                write_buffer = W::ZERO;
×
778
                bits_in_buffer = 0;
×
779
            }
780

781
            // write the last word if we have some bits left
782
            while bits_in_buffer < buffer_limit {
2,100✔
783
                let value = read_buffer & mask;
960✔
784
                // throw away the bits we just read
785
                read_buffer >>= bit_width;
960✔
786
                // apply user func
787
                let new_value = f(value);
960✔
788
                // put the new value in the write buffer
789
                write_buffer |= new_value << bits_in_buffer;
960✔
790
                // -= bit_width but with no casts
791
                bits_in_buffer += bit_width;
960✔
792
            }
793

794
            *self.bits.as_mut().get_unchecked_mut(last_word_idx) = write_buffer;
180✔
795
            return;
×
796
        }
797

798
        // The position inside the word. In most parametrization of the
799
        // vector, since the bit_width is not necessarily a integer
800
        // divisor of the word size, we need to keep track of the position
801
        // inside the word. As we scroll through the bits, due to the bits
802
        // remainder, we may need to operate on two words at the same time.
803
        let mut global_bit_index: usize = 0;
×
804

805
        // The number of words in the bitvec.
806
        let mut lower_word_limit = 0;
×
807
        let mut upper_word_limit = W::BITS;
×
808

809
        // We iterate across the words
810
        for word_number in 0..last_word_idx {
53,710✔
811
            // We iterate across the elements in the word.
812
            while global_bit_index + bit_width <= upper_word_limit {
149,910✔
813
                // We retrieve the value from the current word.
814
                let offset = global_bit_index - lower_word_limit;
48,100✔
815
                global_bit_index += bit_width;
48,100✔
816
                let element = self.mask() & (read_buffer >> offset);
48,100✔
817

818
                // We apply the function to the element.
819
                let new_element = f(element);
48,100✔
820

821
                // We set the element in the new word.
822
                write_buffer |= new_element << offset;
48,100✔
823
            }
824

825
            // We retrieve the next word from the bitvec.
826
            let next_word = *self.bits.as_ref().get_unchecked(word_number + 1);
×
827

828
            let mut new_write_buffer = W::ZERO;
×
829
            if upper_word_limit != global_bit_index {
48,840✔
830
                let remainder = upper_word_limit - global_bit_index;
146,520✔
831
                let offset = global_bit_index - lower_word_limit;
146,520✔
832
                // We compose the element from the remaining elements in the
833
                // current word and the elements in the next word.
834
                let element = ((read_buffer >> offset) | (next_word << remainder)) & self.mask();
244,200✔
835
                global_bit_index += bit_width;
97,680✔
836

837
                // We apply the function to the element.
838
                let new_element = f(element);
146,520✔
839

840
                write_buffer |= new_element << offset;
146,520✔
841

842
                new_write_buffer = new_element >> remainder;
48,840✔
843
            };
844

845
            read_buffer = next_word;
×
846

847
            *self.bits.as_mut().get_unchecked_mut(word_number) = write_buffer;
×
848

849
            write_buffer = new_write_buffer;
×
850
            lower_word_limit = upper_word_limit;
×
851
            upper_word_limit += W::BITS;
×
852
        }
853

854
        let mut offset = global_bit_index - lower_word_limit;
×
855

856
        // We iterate across the elements in the word.
857
        while offset < self.len() * bit_width - global_bit_index {
7,180✔
858
            // We retrieve the value from the current word.
859
            let element = self.mask() & (read_buffer >> offset);
1,060✔
860

861
            // We apply the function to the element.
862
            let new_element = f(element);
1,060✔
863

864
            // We set the element in the new word.
865
            write_buffer |= new_element << offset;
1,060✔
866
            offset += bit_width;
1,060✔
867
        }
868

869
        *self.bits.as_mut().get_unchecked_mut(last_word_idx) = write_buffer;
×
870
    }
871

872
    type ChunksMut<'a>
873
        = ChunksMut<'a, W>
874
    where
875
        Self: 'a;
876

877
    /// # Errors
878
    ///
879
    /// This method will return an error if the chunk size multiplied by the by
880
    /// the [bit width](BitFieldSliceCore::bit_width) is not a multiple of
881
    /// `W::BITS` and more than one chunk must be returned.
882
    fn try_chunks_mut(&mut self, chunk_size: usize) -> Result<Self::ChunksMut<'_>, ()> {
87✔
883
        let len = self.len();
261✔
884
        let bit_width = self.bit_width();
261✔
885
        if len <= chunk_size || (chunk_size * bit_width) % W::BITS == 0 {
92✔
886
            Ok(ChunksMut {
85✔
887
                remaining: len,
85✔
888
                bit_width: self.bit_width,
85✔
889
                chunk_size,
85✔
890
                iter: self.bits.as_mut()[..(len * bit_width).div_ceil(W::BITS)]
85✔
891
                    .chunks_mut((chunk_size * bit_width).div_ceil(W::BITS)),
85✔
892
            })
893
        } else {
894
            Err(())
2✔
895
        }
896
    }
897

898
    fn as_mut_slice(&mut self) -> &mut [W] {
1✔
899
        self.bits.as_mut()
1✔
900
    }
901
}
902

903
impl<W: Word> core::iter::Extend<W> for BitFieldVec<W, Vec<W>> {
904
    /// Add values from
905
    fn extend<T: IntoIterator<Item = W>>(&mut self, iter: T) {
1✔
906
        for value in iter {
17✔
907
            self.push(value);
×
908
        }
909
    }
910
}
911

912
/// Equality between bit-field vectors requires that the word is the same, the
913
/// bit width is the same, and the content is the same.
914
impl<W: Word, B: AsRef<[W]>, C: AsRef<[W]>> PartialEq<BitFieldVec<W, C>> for BitFieldVec<W, B> {
915
    fn eq(&self, other: &BitFieldVec<W, C>) -> bool {
3,771✔
916
        if self.bit_width() != other.bit_width() {
11,313✔
917
            return false;
×
918
        }
919
        if self.len() != other.len() {
×
920
            return false;
2✔
921
        }
922
        let bit_len = self.len() * self.bit_width();
×
923
        if self.bits.as_ref()[..bit_len / W::BITS] != other.bits.as_ref()[..bit_len / W::BITS] {
×
924
            return false;
×
925
        }
926

927
        let residual = bit_len % W::BITS;
×
928
        residual == 0
×
929
            || (self.bits.as_ref()[bit_len / W::BITS] ^ other.bits.as_ref()[bit_len / W::BITS])
3,526✔
930
                << (W::BITS - residual)
3,526✔
931
                == W::ZERO
3,526✔
932
    }
933
}
934

935
impl Eq for BitFieldVec {}
936

937
// Support for unchecked iterators
938

939
/// An [`UncheckedIterator`] over the values of a [`BitFieldVec`].
940
#[derive(Debug, Clone, MemDbg, MemSize)]
941
pub struct BitFieldVectorUncheckedIterator<'a, W, B>
942
where
943
    W: Word,
944
{
945
    vec: &'a BitFieldVec<W, B>,
946
    word_index: usize,
947
    window: W,
948
    fill: usize,
949
}
950

951
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVectorUncheckedIterator<'a, W, B> {
952
    fn new(vec: &'a BitFieldVec<W, B>, index: usize) -> Self {
635,922✔
953
        if index > vec.len() {
1,271,844✔
954
            panic!("Start index out of bounds: {} > {}", index, vec.len());
×
955
        }
956
        let (fill, word_index);
×
957
        let window = if index == vec.len() {
1,907,766✔
958
            word_index = 0;
304✔
959
            fill = 0;
304✔
960
            W::ZERO
304✔
961
        } else {
962
            let bit_offset = index * vec.bit_width;
635,618✔
963
            let bit_index = bit_offset % W::BITS;
×
964

965
            word_index = bit_offset / W::BITS;
×
966
            fill = W::BITS - bit_index;
×
967
            unsafe {
968
                // SAFETY: index has been check at the start and it is within bounds
969
                *vec.bits.as_ref().get_unchecked(word_index) >> bit_index
×
970
            }
971
        };
972
        Self {
973
            vec,
974
            word_index,
975
            window,
976
            fill,
977
        }
978
    }
979
}
980

981
impl<W: Word, B: AsRef<[W]>> crate::traits::UncheckedIterator
982
    for BitFieldVectorUncheckedIterator<'_, W, B>
983
{
984
    type Item = W;
985
    unsafe fn next_unchecked(&mut self) -> W {
32,658,435✔
986
        let bit_width = self.vec.bit_width;
65,316,870✔
987

988
        if self.fill >= bit_width {
32,658,435✔
989
            self.fill -= bit_width;
17,182,682✔
990
            let res = self.window & self.vec.mask;
34,365,364✔
991
            self.window >>= bit_width;
17,182,682✔
992
            return res;
17,182,682✔
993
        }
994

995
        let res = self.window;
×
996
        self.word_index += 1;
×
997
        self.window = *self.vec.bits.as_ref().get_unchecked(self.word_index);
×
998
        let res = (res | (self.window << self.fill)) & self.vec.mask;
×
999
        let used = bit_width - self.fill;
×
1000
        self.window >>= used;
×
1001
        self.fill = W::BITS - used;
×
1002
        res
×
1003
    }
1004
}
1005

1006
impl<'a, W: Word, B: AsRef<[W]>> IntoUncheckedIterator for &'a BitFieldVec<W, B> {
1007
    type Item = W;
1008
    type IntoUncheckedIter = BitFieldVectorUncheckedIterator<'a, W, B>;
1009
    fn into_unchecked_iter_from(self, from: usize) -> Self::IntoUncheckedIter {
322,712✔
1010
        BitFieldVectorUncheckedIterator::new(self, from)
968,136✔
1011
    }
1012
}
1013

1014
/// An [`UncheckedIterator`] moving backwards over the values of a [`BitFieldVec`].
1015
#[derive(Debug, Clone, MemDbg, MemSize)]
1016
pub struct BitFieldVectorReverseUncheckedIterator<'a, W: Word, B> {
1017
    vec: &'a BitFieldVec<W, B>,
1018
    word_index: usize,
1019
    window: W,
1020
    fill: usize,
1021
}
1022

1023
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVectorReverseUncheckedIterator<'a, W, B> {
1024
    fn new(vec: &'a BitFieldVec<W, B>, index: usize) -> Self {
315,992✔
1025
        if index > vec.len() {
631,984✔
1026
            panic!("Start index out of bounds: {} > {}", index, vec.len());
×
1027
        }
1028
        let (word_index, fill);
×
1029

1030
        let window = if index == 0 {
631,984✔
1031
            word_index = 0;
3,120✔
1032
            fill = 0;
3,120✔
1033
            W::ZERO
3,120✔
1034
        } else {
1035
            // We have to handle the case of zero bit width
1036
            let bit_offset = (index * vec.bit_width).saturating_sub(1);
312,872✔
1037
            let bit_index = bit_offset % W::BITS;
×
1038

1039
            word_index = bit_offset / W::BITS;
×
1040
            fill = bit_index + 1;
×
1041
            unsafe {
1042
                // SAFETY: index has been check at the start and it is within bounds
1043
                *vec.bits.as_ref().get_unchecked(word_index) << (W::BITS - fill)
×
1044
            }
1045
        };
1046
        Self {
1047
            vec,
1048
            word_index,
1049
            window,
1050
            fill,
1051
        }
1052
    }
1053
}
1054

1055
impl<W: Word, B: AsRef<[W]>> crate::traits::UncheckedIterator
1056
    for BitFieldVectorReverseUncheckedIterator<'_, W, B>
1057
{
1058
    type Item = W;
1059
    unsafe fn next_unchecked(&mut self) -> W {
15,451,222✔
1060
        let bit_width = self.vec.bit_width;
30,902,444✔
1061

1062
        if self.fill >= bit_width {
15,451,222✔
1063
            self.fill -= bit_width;
8,036,076✔
1064
            self.window = self.window.rotate_left(bit_width as u32);
16,072,152✔
1065
            return self.window & self.vec.mask;
8,036,076✔
1066
        }
1067

1068
        let mut res = self.window.rotate_left(self.fill as u32);
×
1069
        self.word_index -= 1;
×
1070
        self.window = *self.vec.bits.as_ref().get_unchecked(self.word_index);
×
1071
        let used = bit_width - self.fill;
×
1072
        res = ((res << used) | (self.window >> (W::BITS - used))) & self.vec.mask;
×
1073
        self.window <<= used;
×
1074
        self.fill = W::BITS - used;
×
1075
        res
×
1076
    }
1077
}
1078

1079
impl<'a, W: Word, B: AsRef<[W]>> IntoReverseUncheckedIterator for &'a BitFieldVec<W, B> {
1080
    type Item = W;
1081
    type IntoRevUncheckedIter = BitFieldVectorReverseUncheckedIterator<'a, W, B>;
1082

1083
    fn into_rev_unchecked_iter(self) -> Self::IntoRevUncheckedIter {
×
1084
        BitFieldVectorReverseUncheckedIterator::new(self, self.len())
×
1085
    }
1086

1087
    fn into_rev_unchecked_iter_from(self, from: usize) -> Self::IntoRevUncheckedIter {
315,992✔
1088
        BitFieldVectorReverseUncheckedIterator::new(self, from)
947,976✔
1089
    }
1090
}
1091

1092
/// An [`Iterator`] over the values of a [`BitFieldVec`].
1093
#[derive(Debug, Clone, MemDbg, MemSize)]
1094
pub struct BitFieldVecIterator<'a, W, B>
1095
where
1096
    W: Word,
1097
{
1098
    unchecked: BitFieldVectorUncheckedIterator<'a, W, B>,
1099
    range: core::ops::Range<usize>,
1100
}
1101

1102
impl<'a, W: Word, B: AsRef<[W]>> BitFieldVecIterator<'a, W, B> {
1103
    fn new(vec: &'a BitFieldVec<W, B>, from: usize) -> Self {
313,210✔
1104
        let len = vec.len();
939,630✔
1105
        if from > len {
313,210✔
1106
            panic!(
×
1107
                "Start index out of bounds: {} > 
×
1108
            {}",
×
1109
                from, len
×
1110
            );
1111
        }
1112
        Self {
1113
            unchecked: BitFieldVectorUncheckedIterator::new(vec, from),
×
1114
            range: from..len,
×
1115
        }
1116
    }
1117
}
1118

1119
impl<W: Word, B: AsRef<[W]>> Iterator for BitFieldVecIterator<'_, W, B> {
1120
    type Item = W;
1121
    fn next(&mut self) -> Option<Self::Item> {
16,189,263✔
1122
        if self.range.is_empty() {
32,378,526✔
1123
            return None;
313,204✔
1124
        }
1125
        // SAFETY: index has just been checked.
1126
        let res = unsafe { self.unchecked.next_unchecked() };
×
1127
        self.range.start += 1;
×
1128
        Some(res)
×
1129
    }
1130

1131
    #[inline(always)]
1132
    fn size_hint(&self) -> (usize, Option<usize>) {
×
1133
        (self.range.len(), Some(self.range.len()))
×
1134
    }
1135
}
1136

1137
impl<W: Word, B: AsRef<[W]>> ExactSizeIterator for BitFieldVecIterator<'_, W, B> {
1138
    #[inline(always)]
1139
    fn len(&self) -> usize {
×
1140
        self.range.len()
×
1141
    }
1142
}
1143

1144
/// This implements iteration from the end, but its slower than the forward iteration
1145
/// as here we do a random access, while in the forward iterator we do a sequential access
1146
/// and we keep a buffer of `W::BITS` bits to speed up the iteration.
1147
///
1148
/// If needed we could also keep a buffer from the end, but the logic would be more complex
1149
/// and potentially slower.
1150
impl<W: Word, B: AsRef<[W]>> DoubleEndedIterator for BitFieldVecIterator<'_, W, B> {
1151
    fn next_back(&mut self) -> Option<Self::Item> {
×
1152
        if self.range.is_empty() {
×
1153
            return None;
×
1154
        }
1155
        // SAFETY: index has just been checked.
1156
        let res = unsafe { self.unchecked.next_unchecked() };
×
1157
        self.range.end -= 1;
×
1158
        Some(res)
×
1159
    }
1160
}
1161

1162
impl<'a, W: Word, B: AsRef<[W]>> IntoIterator for &'a BitFieldVec<W, B> {
1163
    type Item = W;
1164
    type IntoIter = BitFieldVecIterator<'a, W, B>;
1165

1166
    fn into_iter(self) -> Self::IntoIter {
2✔
1167
        BitFieldVecIterator::new(self, 0)
4✔
1168
    }
1169
}
1170

1171
impl<'a, W: Word, B: AsRef<[W]>> IntoIteratorFrom for &'a BitFieldVec<W, B> {
1172
    type IntoIterFrom = BitFieldVecIterator<'a, W, B>;
1173

1174
    fn into_iter_from(self, from: usize) -> Self::IntoIter {
×
1175
        BitFieldVecIterator::new(self, from)
×
1176
    }
1177
}
1178

1179
impl<W: Word, B: AsRef<[W]>> BitFieldVec<W, B> {
1180
    pub fn iter_from(&self, from: usize) -> BitFieldVecIterator<'_, W, B> {
313,208✔
1181
        BitFieldVecIterator::new(self, from)
939,624✔
1182
    }
1183

1184
    pub fn iter(&self) -> BitFieldVecIterator<'_, W, B> {
1,202✔
1185
        self.iter_from(0)
2,404✔
1186
    }
1187
}
1188
/// A tentatively thread-safe vector of bit fields of fixed width.
1189
///
1190
/// This implementation provides some concurrency guarantees, albeit not
1191
/// full-fledged thread safety: more precisely, we can guarantee thread-safety
1192
/// if the bit width is a power of two; otherwise, concurrent writes to values
1193
/// that cross word boundaries might end up in different threads succeding in
1194
/// writing only part of a value. If the user can guarantee that no two threads
1195
/// ever write to the same boundary-crossing value, then no race condition can
1196
/// happen.
1197
///
1198
/// Note that the trait
1199
/// [`AtomicHelper`](crate::traits::bit_field_slice::AtomicHelper) can be used
1200
/// to provide a more convenient naming for some methods.
1201

1202
#[derive(Epserde, Debug, Clone, Hash, MemDbg, MemSize)]
1203
pub struct AtomicBitFieldVec<W: Word + IntoAtomic = usize, B = Vec<<W as IntoAtomic>::AtomicType>> {
1204
    /// The underlying storage.
1205
    bits: B,
1206
    /// The bit width of the values stored in the vector.
1207
    bit_width: usize,
1208
    /// A mask with its lowest `bit_width` bits set to one.
1209
    mask: W,
1210
    /// The length of the vector.
1211
    len: usize,
1212
}
1213

1214
impl<W: Word + IntoAtomic, B> AtomicBitFieldVec<W, B> {
1215
    /// # Safety
1216
    /// `len` * `bit_width` must be between 0 (included) the number of
1217
    /// bits in `bits` (included).
1218
    #[inline(always)]
1219
    pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
184✔
1220
        Self {
1221
            bits,
1222
            bit_width,
1223
            mask: mask(bit_width),
368✔
1224
            len,
1225
        }
1226
    }
1227

1228
    #[inline(always)]
1229
    pub fn into_raw_parts(self) -> (B, usize, usize) {
184✔
1230
        (self.bits, self.bit_width, self.len)
368✔
1231
    }
1232

1233
    /// Returns the mask used to extract values from the vector.
1234
    /// This will keep the lowest `bit_width` bits.
1235
    pub fn mask(&self) -> W {
184✔
1236
        self.mask
184✔
1237
    }
1238
}
1239

1240
impl<W: Word + IntoAtomic, B: AsRef<[W::AtomicType]>> AtomicBitFieldVec<W, B> {
1241
    /// Returns the backend of the `AtomicBitFieldVec` as a slice of `A`, where `A` is the
1242
    /// atomic variant of `W`.
1243
    pub fn as_slice(&self) -> &[W::AtomicType] {
4✔
1244
        self.bits.as_ref()
4✔
1245
    }
1246
}
1247

1248
impl<W: Word + IntoAtomic> AtomicBitFieldVec<W>
1249
where
1250
    W::AtomicType: AtomicUnsignedInt,
1251
{
1252
    pub fn new(bit_width: usize, len: usize) -> AtomicBitFieldVec<W> {
491✔
1253
        // we need at least two words to avoid branches in the gets
1254
        let n_of_words = Ord::max(1, (len * bit_width).div_ceil(W::BITS));
1,964✔
1255
        AtomicBitFieldVec::<W> {
1256
            bits: (0..n_of_words)
491✔
1257
                .map(|_| W::AtomicType::new(W::ZERO))
1258
                .collect(),
1259
            bit_width,
1260
            mask: mask(bit_width),
982✔
1261
            len,
1262
        }
1263
    }
1264
}
1265

1266
impl<W: Word + IntoAtomic, B: AsRef<[W::AtomicType]>> AtomicBitFieldVec<W, B>
1267
where
1268
    W::AtomicType: AtomicUnsignedInt + AsBytes,
1269
{
1270
    /// Writes zeros in all values.
1271
    #[deprecated(since = "0.4.4", note = "reset is deprecated in favor of reset_atomic")]
1272
    pub fn reset(&mut self, ordering: Ordering) {
×
1273
        self.reset_atomic(ordering)
×
1274
    }
1275
}
1276

1277
impl<W: Word + IntoAtomic, B> BitFieldSliceCore<W::AtomicType> for AtomicBitFieldVec<W, B> {
1278
    #[inline(always)]
1279
    fn bit_width(&self) -> usize {
184✔
1280
        debug_assert!(self.bit_width <= W::BITS);
368✔
1281
        self.bit_width
184✔
1282
    }
1283

1284
    #[inline(always)]
1285
    fn len(&self) -> usize {
404,824✔
1286
        self.len
404,824✔
1287
    }
1288
}
1289

1290
impl<W: Word + IntoAtomic, T: AsRef<[W::AtomicType]>> AtomicBitFieldSlice<W>
1291
    for AtomicBitFieldVec<W, T>
1292
where
1293
    W::AtomicType: AtomicUnsignedInt + AsBytes,
1294
{
1295
    #[inline]
1296
    unsafe fn get_atomic_unchecked(&self, index: usize, order: Ordering) -> W {
404,825✔
1297
        let pos = index * self.bit_width;
809,650✔
1298
        let word_index = pos / W::BITS;
809,650✔
1299
        let bit_index = pos % W::BITS;
809,650✔
1300
        let bits = self.bits.as_ref();
1,214,475✔
1301

1302
        if bit_index + self.bit_width <= W::BITS {
404,825✔
1303
            (bits.get_unchecked(word_index).load(order) >> bit_index) & self.mask
939,176✔
1304
        } else {
1305
            ((bits.get_unchecked(word_index).load(order) >> bit_index)
170,031✔
1306
                | (bits.get_unchecked(word_index + 1).load(order) << (W::BITS - bit_index)))
170,031✔
1307
                & self.mask
170,031✔
1308
        }
1309
    }
1310

1311
    // We reimplement set as we have the mask in the structure.
1312

1313
    /// Sets the element of the slice at the specified index.
1314
    ///
1315
    /// May panic if the index is not in in [0..[len](`BitFieldSliceCore::len`))
1316
    /// or the value does not fit in [`BitFieldSliceCore::bit_width`] bits.
1317
    #[inline(always)]
1318
    fn set_atomic(&self, index: usize, value: W, order: Ordering) {
184,005✔
1319
        panic_if_out_of_bounds!(index, self.len);
552,015✔
1320
        panic_if_value!(value, self.mask, self.bit_width);
552,012✔
1321
        unsafe {
1322
            self.set_atomic_unchecked(index, value, order);
184,003✔
1323
        }
1324
    }
1325

1326
    #[inline]
1327
    unsafe fn set_atomic_unchecked(&self, index: usize, value: W, order: Ordering) {
451,171✔
1328
        debug_assert!(self.bit_width != W::BITS);
902,342✔
1329
        let pos = index * self.bit_width;
902,342✔
1330
        let word_index = pos / W::BITS;
902,342✔
1331
        let bit_index = pos % W::BITS;
902,342✔
1332
        let bits = self.bits.as_ref();
1,353,513✔
1333

1334
        if bit_index + self.bit_width <= W::BITS {
451,171✔
1335
            // this is consistent
1336
            let mut current = bits.get_unchecked(word_index).load(order);
1,300,230✔
1337
            loop {
×
1338
                let mut new = current;
520,092✔
1339
                new &= !(self.mask << bit_index);
520,092✔
1340
                new |= value << bit_index;
520,092✔
1341

1342
                match bits
780,138✔
1343
                    .get_unchecked(word_index)
520,092✔
1344
                    .compare_exchange(current, new, order, order)
1,040,184✔
1345
                {
1346
                    Ok(_) => break,
260,046✔
1347
                    Err(e) => current = e,
×
1348
                }
1349
            }
1350
        } else {
1351
            let mut word = bits.get_unchecked(word_index).load(order);
191,125✔
1352
            // try to wait for the other thread to finish
1353
            fence(Ordering::Acquire);
×
1354
            loop {
×
1355
                let mut new = word;
382,250✔
1356
                new &= (W::ONE << bit_index) - W::ONE;
191,125✔
1357
                new |= value << bit_index;
382,250✔
1358

1359
                match bits
573,375✔
1360
                    .get_unchecked(word_index)
382,250✔
1361
                    .compare_exchange(word, new, order, order)
764,500✔
1362
                {
1363
                    Ok(_) => break,
×
1364
                    Err(e) => word = e,
×
1365
                }
1366
            }
1367
            fence(Ordering::Release);
×
1368

1369
            // ensures that the compiler does not reorder the two atomic operations
1370
            // this should increase the probability of having consistency
1371
            // between two concurrent writes as they will both execute the set
1372
            // of the bits in the same order, and the release / acquire fence
1373
            // should try to synchronize the threads as much as possible
1374
            compiler_fence(Ordering::SeqCst);
×
1375

1376
            let mut word = bits.get_unchecked(word_index + 1).load(order);
×
1377
            fence(Ordering::Acquire);
×
1378
            loop {
×
1379
                let mut new = word;
382,250✔
1380
                new &= !(self.mask >> (W::BITS - bit_index));
382,250✔
1381
                new |= value >> (W::BITS - bit_index);
382,250✔
1382

1383
                match bits
573,375✔
1384
                    .get_unchecked(word_index + 1)
382,250✔
1385
                    .compare_exchange(word, new, order, order)
764,500✔
1386
                {
1387
                    Ok(_) => break,
×
1388
                    Err(e) => word = e,
×
1389
                }
1390
            }
1391
            fence(Ordering::Release);
×
1392
        }
1393
    }
1394

1395
    fn reset_atomic(&mut self, ordering: Ordering) {
1✔
1396
        let bit_len = self.len * self.bit_width;
2✔
1397
        let full_words = bit_len / W::BITS;
2✔
1398
        let residual = bit_len % W::BITS;
2✔
1399
        let bits = self.bits.as_ref();
3✔
1400
        bits[..full_words]
1✔
1401
            .iter()
1402
            .for_each(|x| x.store(W::ZERO, ordering));
22✔
1403
        if residual != 0 {
2✔
1404
            bits[full_words].fetch_and(W::MAX << residual, ordering);
3✔
1405
        }
1406
    }
1407

1408
    #[cfg(feature = "rayon")]
1409
    fn par_reset_atomic(&mut self, ordering: Ordering) {
1✔
1410
        let bit_len = self.len * self.bit_width;
2✔
1411
        let full_words = bit_len / W::BITS;
2✔
1412
        let residual = bit_len % W::BITS;
2✔
1413
        let bits = self.bits.as_ref();
3✔
1414
        bits[..full_words]
1✔
1415
            .par_iter()
1416
            .with_min_len(RAYON_MIN_LEN)
1✔
1417
            .for_each(|x| x.store(W::ZERO, ordering));
22✔
1418
        if residual != 0 {
2✔
1419
            bits[full_words].fetch_and(W::MAX << residual, ordering);
3✔
1420
        }
1421
    }
1422
}
1423

1424
// Conversions
1425

1426
impl<W: Word + IntoAtomic> From<AtomicBitFieldVec<W, Vec<W::AtomicType>>>
1427
    for BitFieldVec<W, Vec<W>>
1428
{
1429
    #[inline]
1430
    fn from(value: AtomicBitFieldVec<W, Vec<W::AtomicType>>) -> Self {
470✔
1431
        BitFieldVec {
1432
            bits: unsafe { transmute_vec::<W::AtomicType, W>(value.bits) },
1,410✔
1433
            len: value.len,
940✔
1434
            bit_width: value.bit_width,
470✔
1435
            mask: value.mask,
470✔
1436
        }
1437
    }
1438
}
1439

1440
impl<W: Word + IntoAtomic> From<AtomicBitFieldVec<W, Box<[W::AtomicType]>>>
1441
    for BitFieldVec<W, Box<[W]>>
1442
{
1443
    #[inline]
1444
    fn from(value: AtomicBitFieldVec<W, Box<[W::AtomicType]>>) -> Self {
1✔
1445
        BitFieldVec {
1446
            bits: unsafe { transmute_boxed_slice::<W::AtomicType, W>(value.bits) },
3✔
1447

1448
            len: value.len,
2✔
1449
            bit_width: value.bit_width,
1✔
1450
            mask: value.mask,
1✔
1451
        }
1452
    }
1453
}
1454

1455
impl<'a, W: Word + IntoAtomic> From<AtomicBitFieldVec<W, &'a [W::AtomicType]>>
1456
    for BitFieldVec<W, &'a [W]>
1457
{
1458
    #[inline]
1459
    fn from(value: AtomicBitFieldVec<W, &'a [W::AtomicType]>) -> Self {
1✔
1460
        BitFieldVec {
1461
            bits: unsafe { core::mem::transmute::<&'a [W::AtomicType], &'a [W]>(value.bits) },
2✔
1462
            len: value.len,
2✔
1463
            bit_width: value.bit_width,
1✔
1464
            mask: value.mask,
1✔
1465
        }
1466
    }
1467
}
1468

1469
impl<'a, W: Word + IntoAtomic> From<AtomicBitFieldVec<W, &'a mut [W::AtomicType]>>
1470
    for BitFieldVec<W, &'a mut [W]>
1471
{
1472
    #[inline]
1473
    fn from(value: AtomicBitFieldVec<W, &'a mut [W::AtomicType]>) -> Self {
1✔
1474
        BitFieldVec {
1475
            bits: unsafe {
1✔
1476
                core::mem::transmute::<&'a mut [W::AtomicType], &'a mut [W]>(value.bits)
1477
            },
1478
            len: value.len,
2✔
1479
            bit_width: value.bit_width,
1✔
1480
            mask: value.mask,
1✔
1481
        }
1482
    }
1483
}
1484

1485
impl<W: Word + IntoAtomic> From<BitFieldVec<W, Vec<W>>>
1486
    for AtomicBitFieldVec<W, Vec<W::AtomicType>>
1487
{
1488
    #[inline]
1489
    fn from(value: BitFieldVec<W, Vec<W>>) -> Self {
369✔
1490
        AtomicBitFieldVec {
1491
            bits: unsafe { transmute_vec::<W, W::AtomicType>(value.bits) },
1,107✔
1492
            len: value.len,
738✔
1493
            bit_width: value.bit_width,
369✔
1494
            mask: value.mask,
369✔
1495
        }
1496
    }
1497
}
1498

1499
impl<W: Word + IntoAtomic> From<BitFieldVec<W, Box<[W]>>>
1500
    for AtomicBitFieldVec<W, Box<[W::AtomicType]>>
1501
{
1502
    #[inline]
1503
    fn from(value: BitFieldVec<W, Box<[W]>>) -> Self {
1✔
1504
        AtomicBitFieldVec {
1505
            bits: unsafe { transmute_boxed_slice::<W, W::AtomicType>(value.bits) },
3✔
1506
            len: value.len,
2✔
1507
            bit_width: value.bit_width,
1✔
1508
            mask: value.mask,
1✔
1509
        }
1510
    }
1511
}
1512

1513
impl<'a, W: Word + IntoAtomic> From<BitFieldVec<W, &'a [W]>>
1514
    for AtomicBitFieldVec<W, &'a [W::AtomicType]>
1515
{
1516
    #[inline]
1517
    fn from(value: BitFieldVec<W, &'a [W]>) -> Self {
1✔
1518
        AtomicBitFieldVec {
1519
            bits: unsafe { core::mem::transmute::<&'a [W], &'a [W::AtomicType]>(value.bits) },
2✔
1520
            len: value.len,
2✔
1521
            bit_width: value.bit_width,
1✔
1522
            mask: value.mask,
1✔
1523
        }
1524
    }
1525
}
1526

1527
impl<'a, W: Word + IntoAtomic> From<BitFieldVec<W, &'a mut [W]>>
1528
    for AtomicBitFieldVec<W, &'a mut [W::AtomicType]>
1529
{
1530
    #[inline]
1531
    fn from(value: BitFieldVec<W, &'a mut [W]>) -> Self {
1✔
1532
        AtomicBitFieldVec {
1533
            bits: unsafe {
1✔
1534
                core::mem::transmute::<&'a mut [W], &'a mut [W::AtomicType]>(value.bits)
1535
            },
1536
            len: value.len,
2✔
1537
            bit_width: value.bit_width,
1✔
1538
            mask: value.mask,
1✔
1539
        }
1540
    }
1541
}
1542

1543
impl<W: Word> From<BitFieldVec<W, Vec<W>>> for BitFieldVec<W, Box<[W]>> {
1544
    fn from(value: BitFieldVec<W, Vec<W>>) -> Self {
96✔
1545
        BitFieldVec {
1546
            bits: value.bits.into_boxed_slice(),
288✔
1547
            len: value.len,
192✔
1548
            bit_width: value.bit_width,
96✔
1549
            mask: value.mask,
96✔
1550
        }
1551
    }
1552
}
1553

1554
impl<W: Word> From<BitFieldVec<W, Box<[W]>>> for BitFieldVec<W, Vec<W>> {
1555
    fn from(value: BitFieldVec<W, Box<[W]>>) -> Self {
1✔
1556
        BitFieldVec {
1557
            bits: value.bits.into_vec(),
3✔
1558
            len: value.len,
2✔
1559
            bit_width: value.bit_width,
1✔
1560
            mask: value.mask,
1✔
1561
        }
1562
    }
1563
}
1564

1565
impl<W: Word, B: AsRef<[W]>> value_traits::slices::SliceByValue for BitFieldVec<W, B> {
1566
    type Value = W;
1567

1568
    fn len(&self) -> usize {
×
1569
        <Self as BitFieldSliceCore<W>>::len(self)
×
1570
    }
1571
}
1572

1573
impl<W: Word, B: AsRef<[W]>> value_traits::slices::SliceByValueGet for BitFieldVec<W, B> {
1574
    unsafe fn get_value_unchecked(&self, index: usize) -> Self::Value {
×
1575
        <Self as BitFieldSlice<W>>::get_unchecked(self, index)
×
1576
    }
1577
}
1578

1579
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> value_traits::slices::SliceByValueSet
1580
    for BitFieldVec<W, B>
1581
{
1582
    unsafe fn set_value_unchecked(&mut self, index: usize, value: Self::Value) {
×
1583
        <Self as BitFieldSliceMut<W>>::set_unchecked(self, index, value);
×
1584
    }
1585
}
1586

1587
impl<W: Word, B: AsRef<[W]> + AsMut<[W]>> value_traits::slices::SliceByValueRepl
1588
    for BitFieldVec<W, B>
1589
{
1590
    unsafe fn replace_value_unchecked(&mut self, index: usize, value: Self::Value) -> Self::Value {
×
1591
        self.replace_unchecked(index, value)
×
1592
    }
1593
}
1594

1595
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueGat<'a> for BitFieldVec<W, B> {
1596
    type Item = W;
1597
    type Iter = BitFieldVecIterator<'a, W, B>;
1598
}
1599

1600
impl<W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValue for BitFieldVec<W, B> {
1601
    fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
×
1602
        self.iter_from(0)
×
1603
    }
1604
}
1605

1606
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFromGat<'a>
1607
    for BitFieldVec<W, B>
1608
{
1609
    type Item = W;
1610
    type IterFrom = BitFieldVecIterator<'a, W, B>;
1611
}
1612

1613
impl<W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFrom for BitFieldVec<W, B> {
1614
    fn iter_value_from(
×
1615
        &self,
1616
        from: usize,
1617
    ) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
1618
        self.iter_from(from)
×
1619
    }
1620
}
1621

1622
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueGat<'a>
1623
    for BitFieldVecSubsliceImpl<'b, W, B>
1624
{
1625
    type Item = W;
1626
    type Iter = BitFieldVecIterator<'a, W, B>;
1627
}
1628

1629
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValue
1630
    for BitFieldVecSubsliceImpl<'a, W, B>
1631
{
1632
    fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
×
1633
        self.slice.iter_from(0)
×
1634
    }
1635
}
1636

1637
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFromGat<'a>
1638
    for BitFieldVecSubsliceImpl<'b, W, B>
1639
{
1640
    type Item = W;
1641
    type IterFrom = BitFieldVecIterator<'a, W, B>;
1642
}
1643

1644
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFrom
1645
    for BitFieldVecSubsliceImpl<'a, W, B>
1646
{
1647
    fn iter_value_from(
×
1648
        &self,
1649
        from: usize,
1650
    ) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
1651
        self.slice.iter_from(from)
×
1652
    }
1653
}
1654

1655
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueGat<'a>
1656
    for BitFieldVecSubsliceImplMut<'b, W, B>
1657
{
1658
    type Item = W;
1659
    type Iter = BitFieldVecIterator<'a, W, B>;
1660
}
1661

1662
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValue
1663
    for BitFieldVecSubsliceImplMut<'a, W, B>
1664
{
1665
    fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
×
1666
        self.slice.iter_from(0)
×
1667
    }
1668
}
1669

1670
impl<'a, 'b, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFromGat<'a>
1671
    for BitFieldVecSubsliceImplMut<'b, W, B>
1672
{
1673
    type Item = W;
1674
    type IterFrom = BitFieldVecIterator<'a, W, B>;
1675
}
1676

1677
impl<'a, W: Word, B: AsRef<[W]>> value_traits::iter::IterateByValueFrom
1678
    for BitFieldVecSubsliceImplMut<'a, W, B>
1679
{
1680
    fn iter_value_from(
×
1681
        &self,
1682
        from: usize,
1683
    ) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
1684
        self.slice.iter_from(from)
×
1685
    }
1686
}
1687

1688
#[cfg(test)]
1689
mod tests {
1690
    use super::*;
1691

1692
    #[test]
1693
    fn test_with_capacity() {
1694
        let mut b = BitFieldVec::<usize, _>::with_capacity(10, 100);
1695
        let capacity = b.bits.capacity();
1696
        for _ in 0..100 {
1697
            b.push(0);
1698
        }
1699
        assert_eq!(b.bits.capacity(), capacity);
1700
    }
1701

1702
    fn copy<W: Word, B: AsRef<[W]>, C: AsRef<[W]> + AsMut<[W]>>(
1703
        source: &BitFieldVec<W, B>,
1704
        from: usize,
1705
        dest: &mut BitFieldVec<W, C>,
1706
        to: usize,
1707
        len: usize,
1708
    ) {
1709
        let len = Ord::min(Ord::min(len, dest.len - to), source.len - from);
1710
        for i in 0..len {
1711
            dest.set(to + i, source.get(from + i));
1712
        }
1713
    }
1714

1715
    #[test]
1716
    fn test_copy() {
1717
        for src_pattern in 0..8 {
1718
            for dst_pattern in 0..8 {
1719
                // if from_first_word == from_last_word && to_first_word == to_last_word
1720
                let source = bit_field_vec![3 => src_pattern; 100];
1721
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1722
                let mut dest_expected = dest_actual.clone();
1723
                source.copy(1, &mut dest_actual, 2, 10);
1724
                copy(&source, 1, &mut dest_expected, 2, 10);
1725
                assert_eq!(dest_actual, dest_expected);
1726
                // else if from_first_word == from_last_word
1727
                let source = bit_field_vec![3 => src_pattern; 100];
1728
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1729
                let mut dest_expected = dest_actual.clone();
1730
                source.copy(1, &mut dest_actual, 20, 10);
1731
                copy(&source, 1, &mut dest_expected, 20, 10);
1732
                assert_eq!(dest_actual, dest_expected);
1733
                // else if to_first_word == to_last_word
1734
                let source = bit_field_vec![3 => src_pattern; 100];
1735
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
1736
                let mut dest_expected = dest_actual.clone();
1737
                source.copy(20, &mut dest_actual, 1, 10);
1738
                copy(&source, 20, &mut dest_expected, 1, 10);
1739
                assert_eq!(dest_actual, dest_expected);
1740
                // else if src_bit == dest_bit (residual = 1)
1741
                let source = bit_field_vec![3 => src_pattern; 1000];
1742
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1743
                let mut dest_expected = dest_actual.clone();
1744
                source.copy(3, &mut dest_actual, 3 + 3 * 128, 40);
1745
                copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 40);
1746
                assert_eq!(dest_actual, dest_expected);
1747
                // else if src_bit == dest_bit (residual = 64)
1748
                let source = bit_field_vec![3 => src_pattern; 1000];
1749
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1750
                let mut dest_expected = dest_actual.clone();
1751
                source.copy(3, &mut dest_actual, 3 + 3 * 128, 61);
1752
                copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 61);
1753
                assert_eq!(dest_actual, dest_expected);
1754
                // else if src_bit < dest_bit (residual = 1)
1755
                let source = bit_field_vec![3 => src_pattern; 1000];
1756
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1757
                let mut dest_expected = dest_actual.clone();
1758
                source.copy(3, &mut dest_actual, 7 + 64 * 3, 40);
1759
                copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40);
1760
                assert_eq!(dest_actual, dest_expected);
1761
                // else if src_bit < dest_bit (residual = 64)
1762
                let source = bit_field_vec![3 => src_pattern; 1000];
1763
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1764
                let mut dest_expected = dest_actual.clone();
1765
                source.copy(3, &mut dest_actual, 7 + 64 * 3, 40 + 17);
1766
                copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40 + 17);
1767
                assert_eq!(dest_actual, dest_expected);
1768
                // else if src_bit > dest_bit (residual = 1)
1769
                let source = bit_field_vec![3 => src_pattern; 1000];
1770
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1771
                let mut dest_expected = dest_actual.clone();
1772
                source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 64);
1773
                copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 64);
1774
                assert_eq!(dest_actual, dest_expected);
1775
                // else if src_bit > dest_bit (residual = 64)
1776
                let source = bit_field_vec![3 => src_pattern; 1000];
1777
                let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
1778
                let mut dest_expected = dest_actual.clone();
1779
                source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 21 + 64);
1780
                copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 21 + 64);
1781
                assert_eq!(dest_actual, dest_expected);
1782
            }
1783
        }
1784
    }
1785
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc