• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vigna / sux-rs / 15155344114

21 May 2025 06:42AM UTC coverage: 68.995% (-6.5%) from 75.47%
15155344114

Pull #62

github

web-flow
Merge efbcfa250 into 30c4990e8
Pull Request #62: Fix typo in documentation

3625 of 5254 relevant lines covered (69.0%)

124437652.64 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.01
/src/bits/bit_vec.rs
1
/*
2
 * SPDX-FileCopyrightText: 2023 Inria
3
 * SPDX-FileCopyrightText: 2023 Sebastiano Vigna
4
 *
5
 * SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
6
 */
7

8
//! Bit vector implementations.
9
//!
10
//! There are two flavors: [`BitVec`], a mutable bit vector, and
11
//! [`AtomicBitVec`], a mutable, thread-safe bit vector.
12
//!
13
//! These flavors depends on a backend, and presently we provide:
14
//!
15
//! - `BitVec<Vec<usize>>`: a mutable, growable and resizable bit vector;
16
//! - `BitVec<AsRef<[usize]>>`: an immutable bit vector, useful for
17
//!   [ε-serde](epserde) support;
18
//! - `BitVec<AsRef<[usize]> + AsMut<[usize]>>`: a mutable (but not resizable)
19
//!   bit vector;
20
//! - `AtomicBitVec<AsRef<[AtomicUsize]>>`: a thread-safe, mutable (but not
21
//!   resizable) bit vector.
22
//!
23
//! Note that nothing is assumed about the content of the backend outside the
24
//! bits of the bit vector. Moreover, the content of the backend outside of
25
//! the bit vector is never modified by the methods of this structure.
26
//!
27
//! It is possible to juggle between the three flavors using [`From`]/[`Into`].
28
//!
29
//! # Examples
30
//!
31
//! ```rust
32
//! use sux::bit_vec;
33
//! use sux::traits::{BitCount, BitLength, NumBits, AddNumBits};
34
//! use sux::bits::{BitVec, AtomicBitVec};
35
//! use core::sync::atomic::Ordering;
36
//!
37
//! // Convenience macro
38
//! let b = bit_vec![0, 1, 0, 1, 1, 0, 1, 0];
39
//! assert_eq!(b.len(), 8);
40
//! // Not constant time
41
//! assert_eq!(b.count_ones(), 4);
42
//! assert_eq!(b[0], false);
43
//! assert_eq!(b[1], true);
44
//! assert_eq!(b[2], false);
45
//!
46
//! let b: AddNumBits<_> = b.into();
47
//! // Constant time, but now b is immutable
48
//! assert_eq!(b.num_ones(), 4);
49
//!
50
//! let mut b = BitVec::new(0);
51
//! b.push(true);
52
//! b.push(false);
53
//! b.push(true);
54
//! assert_eq!(b.len(), 3);
55
//!
56
//! // Let's make it atomic
57
//! let mut a: AtomicBitVec = b.into();
58
//! a.set(1, true, Ordering::Relaxed);
59
//! assert!(a.get(0, Ordering::Relaxed));
60
//!
61
//! // Back to normal, but immutable size
62
//! let b: BitVec<Vec<usize>> = a.into();
63
//! let mut b: BitVec<Box<[usize]>> = b.into();
64
//! b.set(2, false);
65
//!
66
//! // If we create an artificially dirty bit vector, everything still works.
67
//! let ones = [usize::MAX; 2];
68
//! assert_eq!(unsafe { BitVec::from_raw_parts(ones, 1) }.count_ones(), 1);
69
//! ```
70

71
use common_traits::{IntoAtomic, SelectInWord};
72
#[allow(unused_imports)] // this is in the std prelude but not in no_std!
73
use core::borrow::BorrowMut;
74
use core::fmt;
75
use epserde::*;
76
use mem_dbg::*;
77
#[cfg(feature = "rayon")]
78
use rayon::prelude::*;
79
use std::{
80
    ops::Index,
81
    sync::atomic::{AtomicUsize, Ordering},
82
};
83

84
use crate::{
85
    traits::rank_sel::*,
86
    utils::{transmute_boxed_slice, transmute_vec},
87
};
88

89
#[cfg(feature = "rayon")]
90
use crate::RAYON_MIN_LEN;
91

92
const BITS: usize = usize::BITS as usize;
93

94
/// Convenient, [`vec!`](vec!)-like macro to initialize bit vectors.
95
///
96
/// # Examples
97
///
98
/// ```rust
99
/// use sux::bit_vec;
100
/// use sux::traits::BitLength;
101
///
102
/// // Empty bit vector
103
/// let b = bit_vec![];
104
/// assert_eq!(b.len(), 0);
105
///
106
/// // 10 bits set to true
107
/// let b = bit_vec![true; 10];
108
/// assert_eq!(b.len(), 10);
109
/// assert_eq!(b.iter().all(|x| x), true);
110
/// let b = bit_vec![1; 10];
111
/// assert_eq!(b.len(), 10);
112
/// assert_eq!(b.iter().all(|x| x), true);
113
///
114
/// // 10 bits set to false
115
/// let b = bit_vec![false; 10];
116
/// assert_eq!(b.len(), 10);
117
/// assert_eq!(b.iter().any(|x| x), false);
118
/// let b = bit_vec![0; 10];
119
/// assert_eq!(b.len(), 10);
120
/// assert_eq!(b.iter().any(|x| x), false);
121
///
122
/// // Bit list
123
/// let b = bit_vec![0, 1, 0, 1, 0, 0];
124
/// assert_eq!(b.len(), 6);
125
/// assert_eq!(b[0], false);
126
/// assert_eq!(b[1], true);
127
/// assert_eq!(b[2], false);
128
/// assert_eq!(b[3], true);
129
/// assert_eq!(b[4], false);
130
/// assert_eq!(b[5], false);
131
/// ```
132
#[macro_export]
133
macro_rules! bit_vec {
134
    () => {
135
        $crate::bits::BitVec::new(0)
136
    };
137
    (false; $n:expr) => {
138
        $crate::bits::BitVec::new($n)
139
    };
140
    (0; $n:expr) => {
141
        $crate::bits::BitVec::new($n)
142
    };
143
    (true; $n:expr) => {
144
        {
145
            $crate::bits::BitVec::with_value($n, true)
146
        }
147
    };
148
    (1; $n:expr) => {
149
        {
150
            $crate::bits::BitVec::with_value($n, true)
151
        }
152
    };
153
    ($($x:expr),+ $(,)?) => {
154
        {
155
            let mut b = $crate::bits::BitVec::with_capacity([$($x),+].len());
156
            $( b.push($x != 0); )*
157
            b
158
        }
159
    };
160
}
161

162
macro_rules! panic_if_out_of_bounds {
163
    ($index: expr, $len: expr) => {
164
        if $index >= $len {
165
            panic!("Bit index out of bounds: {} >= {}", $index, $len)
166
        }
167
    };
168
}
169

170
#[derive(Epserde, Debug, Clone, MemDbg, MemSize)]
171
/// A bit vector.
172
pub struct BitVec<B = Vec<usize>> {
173
    bits: B,
174
    len: usize,
175
}
176

177
impl<B> BitVec<B> {
178
    /// Returns the number of bits in the bit vector.
179
    ///
180
    /// This method is equivalent to [`BitLength::len`], but it is provided to
181
    /// reduce ambiguity in method resolution.
182
    #[inline(always)]
183
    pub fn len(&self) -> usize {
222,163✔
184
        BitLength::len(self)
444,326✔
185
    }
186

187
    /// # Safety
188
    /// `len` must be between 0 (included) the number of
189
    /// bits in `bits` (included).
190
    #[inline(always)]
191
    pub unsafe fn from_raw_parts(bits: B, len: usize) -> Self {
24✔
192
        Self { bits, len }
193
    }
194
    #[inline(always)]
195
    pub fn into_raw_parts(self) -> (B, usize) {
2✔
196
        (self.bits, self.len)
2✔
197
    }
198
    #[inline(always)]
199
    /// Modify the bit vector in place.
200
    /// # Safety
201
    /// This is unsafe because it's the caller's responsibility to ensure that
202
    /// that the length is compatible with the modified bits.
203
    pub unsafe fn map<B2>(self, f: impl FnOnce(B) -> B2) -> BitVec<B2> {
×
204
        BitVec {
205
            bits: f(self.bits),
×
206
            len: self.len,
×
207
        }
208
    }
209
}
210

211
impl<B: AsRef<[usize]>> BitVec<B> {
212
    /// Returns an owned copy of the bit vector.
213
    pub fn to_owned(&self) -> BitVec {
×
214
        BitVec {
215
            bits: self.bits.as_ref().to_owned(),
×
216
            len: self.len,
×
217
        }
218
    }
219

220
    /// Returns true if the bit of given index is set.
221
    #[inline]
222
    pub fn get(&self, index: usize) -> bool {
481,609,627✔
223
        panic_if_out_of_bounds!(index, self.len);
1,444,828,881✔
224
        unsafe { self.get_unchecked(index) }
481,609,627✔
225
    }
226

227
    /// Returns true if the bit of given index is set, without
228
    /// bound checks.
229
    ///
230
    /// # Safety
231
    ///
232
    /// `index` must be between 0 (included) and [`BitVec::len`] (excluded).
233
    #[inline(always)]
234
    pub unsafe fn get_unchecked(&self, index: usize) -> bool {
481,609,627✔
235
        let word_index = index / BITS;
963,219,254✔
236
        let word = self.bits.as_ref().get_unchecked(word_index);
1,926,438,508✔
237
        (word >> (index % BITS)) & 1 != 0
481,609,627✔
238
    }
239
}
240

241
impl<B: AsRef<[usize]> + AsMut<[usize]>> BitVec<B> {
242
    #[inline]
243
    pub fn set(&mut self, index: usize, value: bool) {
2,147,483,647✔
244
        panic_if_out_of_bounds!(index, self.len);
2,147,483,647✔
245
        unsafe { self.set_unchecked(index, value) }
2,147,483,647✔
246
    }
247

248
    /// # Safety
249
    ///
250
    /// `index` must be between 0 (included) and [`BitVec::len`] (excluded).
251
    #[inline(always)]
252
    pub unsafe fn set_unchecked(&mut self, index: usize, value: bool) {
2,147,483,647✔
253
        let word_index = index / BITS;
2,147,483,647✔
254
        let bit_index = index % BITS;
2,147,483,647✔
255
        let bits = self.bits.as_mut();
2,147,483,647✔
256
        // TODO: no test?
257
        // For constant values, this should be inlined with no test.
258
        if value {
2,147,483,647✔
259
            *bits.get_unchecked_mut(word_index) |= 1 << bit_index;
2,147,483,647✔
260
        } else {
261
            *bits.get_unchecked_mut(word_index) &= !(1 << bit_index);
174,979✔
262
        }
263
    }
264

265
    /// Sets all bits to the given value.
266
    pub fn fill(&mut self, value: bool) {
24✔
267
        let full_words = self.len() / BITS;
48✔
268
        let residual = self.len % BITS;
48✔
269
        let bits = self.bits.as_mut();
72✔
270
        let word_value = if value { !0 } else { 0 };
72✔
271
        bits[..full_words].iter_mut().for_each(|x| *x = word_value);
135✔
272
        if residual != 0 {
39✔
273
            let mask = (1 << residual) - 1;
45✔
274
            bits[full_words] = (bits[full_words] & !mask) | (word_value & mask);
45✔
275
        }
276
    }
277

278
    /// Sets all bits to the given value using a parallel implementation.
279
    #[cfg(feature = "rayon")]
280
    pub fn par_fill(&mut self, value: bool) {
16✔
281
        let full_words = self.len() / BITS;
32✔
282
        let residual = self.len % BITS;
32✔
283
        let bits = self.bits.as_mut();
48✔
284
        let word_value = if value { !0 } else { 0 };
48✔
285
        bits[..full_words]
16✔
286
            .par_iter_mut()
287
            .with_min_len(RAYON_MIN_LEN)
16✔
288
            .for_each(|x| *x = word_value);
58✔
289
        if residual != 0 {
26✔
290
            let mask = (1 << residual) - 1;
30✔
291
            bits[full_words] = (bits[full_words] & !mask) | (word_value & mask);
30✔
292
        }
293
    }
294

295
    /// Sets all bits to zero.
296
    pub fn reset(&mut self) {
×
297
        self.fill(false);
×
298
    }
299

300
    /// Sets all bits to zero using a parallel implementation.
301
    #[cfg(feature = "rayon")]
302
    pub fn par_reset(&mut self) {
×
303
        self.par_fill(false);
×
304
    }
305

306
    /// Flip all bits.
307
    pub fn flip(&mut self) {
22✔
308
        let full_words = self.len() / BITS;
44✔
309
        let residual = self.len % BITS;
44✔
310
        let bits = self.bits.as_mut();
66✔
311
        bits[..full_words].iter_mut().for_each(|x| *x = !*x);
156,281,364✔
312
        if residual != 0 {
34✔
313
            let mask = (1 << residual) - 1;
36✔
314
            bits[full_words] = (bits[full_words] & !mask) | (!bits[full_words] & mask);
36✔
315
        }
316
    }
317

318
    /// Flip all bits using a parallel implementation.
319
    #[cfg(feature = "rayon")]
320
    pub fn par_flip(&mut self) {
16✔
321
        let full_words = self.len() / BITS;
32✔
322
        let residual = self.len % BITS;
32✔
323
        let bits = self.bits.as_mut();
48✔
324
        bits[..full_words]
16✔
325
            .par_iter_mut()
326
            .with_min_len(RAYON_MIN_LEN)
16✔
327
            .for_each(|x| *x = !*x);
58✔
328
        if residual != 0 {
26✔
329
            let mask = (1 << residual) - 1;
30✔
330
            bits[full_words] = (bits[full_words] & !mask) | (!bits[full_words] & mask);
30✔
331
        }
332
    }
333

334
    /// A parallel version of [`BitVec::count_ones`].
335
    #[cfg(feature = "rayon")]
336
    pub fn par_count_ones(&self) -> usize {
×
337
        let full_words = self.len() / BITS;
×
338
        let residual = self.len() % BITS;
×
339
        let bits = self.bits.as_ref();
×
340
        let mut num_ones;
×
341
        num_ones = bits[..full_words]
×
342
            .par_iter()
×
343
            .with_min_len(RAYON_MIN_LEN)
×
344
            .map(|x| x.count_ones() as usize)
×
345
            .sum();
×
346
        if residual != 0 {
×
347
            num_ones += (self.as_ref()[full_words] << (BITS - residual)).count_ones() as usize
×
348
        }
349

350
        num_ones
×
351
    }
352
}
353

354
impl BitVec<Vec<usize>> {
355
    /// Creates a new bit vector of length `len` initialized to `false`.
356
    pub fn new(len: usize) -> Self {
104,517✔
357
        Self::with_value(len, false)
209,034✔
358
    }
359

360
    /// Creates a new bit vector of length `len` initialized to `value`.
361
    pub fn with_value(len: usize, value: bool) -> Self {
105,187✔
362
        let n_of_words = len.div_ceil(BITS);
315,561✔
363
        let extra_bits = (n_of_words * BITS) - len;
210,374✔
364
        let word_value = if value { !0 } else { 0 };
315,561✔
365
        let mut bits = vec![word_value; n_of_words];
420,748✔
366
        if extra_bits > 0 {
106,557✔
367
            let last_word_value = word_value >> extra_bits;
4,110✔
368
            bits[n_of_words - 1] = last_word_value;
1,370✔
369
        }
370
        Self { bits, len }
371
    }
372

373
    /// Creates a new zero-length bit vector of given capacity.
374
    ///
375
    /// Note that the capacity will be rounded up to a multiple of the word
376
    /// size.
377
    pub fn with_capacity(capacity: usize) -> Self {
50✔
378
        let n_of_words = capacity.div_ceil(BITS);
150✔
379
        Self {
380
            bits: Vec::with_capacity(n_of_words),
50✔
381
            len: 0,
382
        }
383
    }
384

385
    pub fn capacity(&self) -> usize {
80✔
386
        self.bits.capacity() * BITS
80✔
387
    }
388

389
    pub fn push(&mut self, b: bool) {
2,147,483,647✔
390
        if self.bits.len() * BITS == self.len {
2,147,483,647✔
391
            self.bits.push(0);
40,495,570✔
392
        }
393
        let word_index = self.len / BITS;
2,147,483,647✔
394
        let bit_index = self.len % BITS;
2,147,483,647✔
395
        // Clear bit
396
        self.bits[word_index] &= !(1 << bit_index);
2,147,483,647✔
397
        // Set bit
398
        self.bits[word_index] |= (b as usize) << bit_index;
2,147,483,647✔
399
        self.len += 1;
2,147,483,647✔
400
    }
401

402
    pub fn pop(&mut self) -> Option<bool> {
1,005✔
403
        if self.len == 0 {
1,005✔
404
            return None;
5✔
405
        }
406
        self.len -= 1;
×
407
        let word_index = self.len / BITS;
×
408
        let bit_index = self.len % BITS;
×
409
        Some((self.bits[word_index] >> bit_index) & 1 != 0)
×
410
    }
411

412
    pub fn resize(&mut self, new_len: usize, value: bool) {
10✔
413
        // TODO: rewrite by word
414
        if new_len > self.len {
10✔
415
            if new_len > self.bits.len() * BITS {
15✔
416
                self.bits.resize(new_len.div_ceil(BITS), 0);
15✔
417
            }
418
            for i in self.len..new_len {
510✔
419
                unsafe {
420
                    self.set_unchecked(i, value);
×
421
                }
422
            }
423
        }
424
        self.len = new_len;
10✔
425
    }
426
}
427

428
impl<B> BitLength for BitVec<B> {
429
    #[inline(always)]
430
    fn len(&self) -> usize {
12,209,599✔
431
        self.len
12,209,599✔
432
    }
433
}
434

435
impl<B: AsRef<[usize]>> BitCount for BitVec<B> {
436
    fn count_ones(&self) -> usize {
102,230✔
437
        let full_words = self.len() / BITS;
204,460✔
438
        let residual = self.len() % BITS;
204,460✔
439
        let bits = self.bits.as_ref();
306,690✔
440
        let mut num_ones = bits[..full_words]
204,460✔
441
            .iter()
442
            .map(|x| x.count_ones() as usize)
706,962,970✔
443
            .sum();
444
        if residual != 0 {
102,230✔
445
            num_ones += (self.as_ref()[full_words] << (BITS - residual)).count_ones() as usize
183,136✔
446
        }
447
        num_ones
102,230✔
448
    }
449
}
450

451
impl<B: AsRef<[usize]>> Index<usize> for BitVec<B> {
452
    type Output = bool;
453

454
    fn index(&self, index: usize) -> &Self::Output {
325,878,760✔
455
        match self.get(index) {
651,757,520✔
456
            false => &false,
172,741,020✔
457
            true => &true,
153,137,740✔
458
        }
459
    }
460
}
461

462
impl Extend<bool> for BitVec<Vec<usize>> {
463
    fn extend<T>(&mut self, i: T)
20,582✔
464
    where
465
        T: IntoIterator<Item = bool>,
466
    {
467
        for b in i {
1,035,509,862✔
468
            self.push(b);
×
469
        }
470
    }
471
}
472

473
impl FromIterator<bool> for BitVec<Vec<usize>> {
474
    fn from_iter<T: IntoIterator<Item = bool>>(iter: T) -> Self {
20,582✔
475
        let mut res = Self::new(0);
41,164✔
476
        res.extend(iter);
61,746✔
477
        res
20,582✔
478
    }
479
}
480

481
impl<B: AsRef<[usize]>> RankHinted<64> for BitVec<B> {
482
    #[inline(always)]
483
    unsafe fn rank_hinted(&self, pos: usize, hint_pos: usize, hint_rank: usize) -> usize {
9✔
484
        let bits = self.as_ref();
27✔
485
        let mut rank = hint_rank;
18✔
486
        let mut hint_pos = hint_pos;
18✔
487

488
        debug_assert!(
9✔
489
            hint_pos < bits.len(),
18✔
490
            "hint_pos: {}, len: {}",
×
491
            hint_pos,
×
492
            bits.len()
×
493
        );
494

495
        while (hint_pos + 1) * 64 <= pos {
9✔
496
            rank += bits.get_unchecked(hint_pos).count_ones() as usize;
×
497
            hint_pos += 1;
×
498
        }
499

500
        rank + (bits.get_unchecked(hint_pos) & ((1 << (pos % 64)) - 1)).count_ones() as usize
36✔
501
    }
502
}
503

504
impl<B: AsRef<[usize]>> SelectHinted for BitVec<B> {
505
    unsafe fn select_hinted(&self, rank: usize, hint_pos: usize, hint_rank: usize) -> usize {
2,147,483,647✔
506
        let mut word_index = hint_pos / BITS;
2,147,483,647✔
507
        let bit_index = hint_pos % BITS;
2,147,483,647✔
508
        let mut residual = rank - hint_rank;
2,147,483,647✔
509
        let mut word = (self.as_ref().get_unchecked(word_index) >> bit_index) << bit_index;
2,147,483,647✔
510
        loop {
×
511
            let bit_count = word.count_ones() as usize;
2,147,483,647✔
512
            if residual < bit_count {
2,147,483,647✔
513
                return word_index * BITS + word.select_in_word(residual);
2,147,483,647✔
514
            }
515
            word_index += 1;
×
516
            word = *self.as_ref().get_unchecked(word_index);
×
517
            residual -= bit_count;
×
518
        }
519
    }
520
}
521

522
impl<B: AsRef<[usize]>> SelectZeroHinted for BitVec<B> {
523
    unsafe fn select_zero_hinted(&self, rank: usize, hint_pos: usize, hint_rank: usize) -> usize {
2,147,483,647✔
524
        let mut word_index = hint_pos / BITS;
2,147,483,647✔
525
        let bit_index = hint_pos % BITS;
2,147,483,647✔
526
        let mut residual = rank - hint_rank;
2,147,483,647✔
527
        let mut word = (!*self.as_ref().get_unchecked(word_index) >> bit_index) << bit_index;
2,147,483,647✔
528
        loop {
×
529
            let bit_count = word.count_ones() as usize;
2,147,483,647✔
530
            if residual < bit_count {
2,147,483,647✔
531
                return word_index * BITS + word.select_in_word(residual);
2,147,483,647✔
532
            }
533
            word_index += 1;
×
534
            word = !self.as_ref().get_unchecked(word_index);
×
535
            residual -= bit_count;
×
536
        }
537
    }
538
}
539

540
impl<B: AsRef<[usize]>, C: AsRef<[usize]>> PartialEq<BitVec<C>> for BitVec<B> {
541
    fn eq(&self, other: &BitVec<C>) -> bool {
70✔
542
        let len = self.len();
210✔
543
        if len != other.len() {
140✔
544
            return false;
1✔
545
        }
546

547
        let full_words = len / BITS;
×
548
        if self.as_ref()[..full_words] != other.as_ref()[..full_words] {
×
549
            return false;
×
550
        }
551

552
        let residual = len % BITS;
×
553

554
        residual == 0
×
555
            || (self.as_ref()[full_words] ^ other.as_ref()[full_words]) << (BITS - residual) == 0
67✔
556
    }
557
}
558

559
impl Eq for BitVec<Vec<usize>> {}
560

561
impl<B: AsRef<[usize]>> fmt::Display for BitVec<B> {
562
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
×
563
        write!(f, "[")?;
×
564
        for b in self {
×
565
            write!(f, "{:b}", b as usize)?;
×
566
        }
567
        write!(f, "]")?;
×
568
        Ok(())
×
569
    }
570
}
571

572
// An iterator over the bits of the bit vector as booleans.
573
#[derive(Debug, Clone, MemDbg, MemSize)]
574
pub struct BitIterator<'a, B> {
575
    bits: &'a B,
576
    len: usize,
577
    next_bit_pos: usize,
578
}
579

580
impl<'a, B: AsRef<[usize]>> IntoIterator for &'a BitVec<B> {
581
    type IntoIter = BitIterator<'a, B>;
582
    type Item = bool;
583

584
    fn into_iter(self) -> Self::IntoIter {
6,799✔
585
        BitIterator {
586
            bits: &self.bits,
6,799✔
587
            len: self.len,
6,799✔
588
            next_bit_pos: 0,
589
        }
590
    }
591
}
592

593
impl<B: AsRef<[usize]>> Iterator for BitIterator<'_, B> {
594
    type Item = bool;
595
    fn next(&mut self) -> Option<bool> {
110,751,792✔
596
        if self.next_bit_pos == self.len {
110,751,792✔
597
            return None;
6,799✔
598
        }
599
        let word_idx = self.next_bit_pos / BITS;
×
600
        let bit_idx = self.next_bit_pos % BITS;
×
601
        let word = unsafe { *self.bits.as_ref().get_unchecked(word_idx) };
×
602
        let bit = (word >> bit_idx) & 1;
×
603
        self.next_bit_pos += 1;
×
604
        Some(bit != 0)
×
605
    }
606
}
607

608
/// An iterator over the positions of the ones in a bit vector.
609
#[derive(Debug, Clone, MemDbg, MemSize)]
610
pub struct OnesIterator<'a, B> {
611
    bits: &'a B,
612
    len: usize,
613
    word_idx: usize,
614
    /// This is a usize because BitVec is currently implemented only for `Vec<usize>` and `&[usize]`.
615
    word: usize,
616
}
617

618
impl<'a, B: AsRef<[usize]>> OnesIterator<'a, B> {
619
    pub fn new(bits: &'a B, len: usize) -> Self {
5✔
620
        let word = if bits.as_ref().is_empty() {
15✔
621
            0
×
622
        } else {
623
            unsafe { *bits.as_ref().get_unchecked(0) }
5✔
624
        };
625
        Self {
626
            bits,
627
            len,
628
            word_idx: 0,
629
            word,
630
        }
631
    }
632
}
633

634
impl<B: AsRef<[usize]>> Iterator for OnesIterator<'_, B> {
635
    type Item = usize;
636

637
    fn next(&mut self) -> Option<Self::Item> {
106✔
638
        // find the next word with ones
639
        while self.word == 0 {
115✔
640
            self.word_idx += 1;
13✔
641
            if self.word_idx == self.bits.as_ref().len() {
26✔
642
                return None;
4✔
643
            }
644
            self.word = unsafe { *self.bits.as_ref().get_unchecked(self.word_idx) };
×
645
        }
646
        // find the lowest bit set index in the word
647
        let bit_idx = self.word.trailing_zeros() as usize;
102✔
648
        // compute the global bit index
649
        let res = (self.word_idx * BITS) + bit_idx;
×
650
        if res >= self.len {
×
651
            None
1✔
652
        } else {
653
            // clear the lowest bit set
654
            self.word &= self.word - 1;
101✔
655
            Some(res)
×
656
        }
657
    }
658
}
659

660
/// An iterator over the positions of the zeros in a bit vector.
661
#[derive(Debug, Clone, MemDbg, MemSize)]
662
pub struct ZerosIterator<'a, B> {
663
    bits: &'a B,
664
    len: usize,
665
    word_idx: usize,
666
    /// This is a usize because BitVec is currently implemented only for `Vec<usize>` and `&[usize]`.
667
    word: usize,
668
}
669

670
impl<'a, B: AsRef<[usize]>> ZerosIterator<'a, B> {
671
    pub fn new(bits: &'a B, len: usize) -> Self {
5✔
672
        let word = if bits.as_ref().is_empty() {
15✔
673
            0
×
674
        } else {
675
            unsafe { !*bits.as_ref().get_unchecked(0) }
5✔
676
        };
677
        Self {
678
            bits,
679
            len,
680
            word_idx: 0,
681
            word,
682
        }
683
    }
684
}
685

686
impl<B: AsRef<[usize]>> Iterator for ZerosIterator<'_, B> {
687
    type Item = usize;
688

689
    fn next(&mut self) -> Option<Self::Item> {
106✔
690
        // find the next flipped word with zeros
691
        while self.word == 0 {
115✔
692
            self.word_idx += 1;
10✔
693
            if self.word_idx == self.bits.as_ref().len() {
20✔
694
                return None;
1✔
695
            }
696
            self.word = unsafe { !*self.bits.as_ref().get_unchecked(self.word_idx) };
×
697
        }
698
        // find the lowest zero bit index in the word
699
        let bit_idx = self.word.trailing_zeros() as usize;
105✔
700
        // compute the global bit index
701
        let res = (self.word_idx * BITS) + bit_idx;
×
702
        if res >= self.len {
×
703
            None
4✔
704
        } else {
705
            // clear the lowest bit set
706
            self.word &= self.word - 1;
101✔
707
            Some(res)
×
708
        }
709
    }
710
}
711

712
impl<B: AsRef<[usize]>> BitVec<B> {
713
    // Returns an iterator over the bits of the bit vector.
714
    #[inline(always)]
715
    pub fn iter(&self) -> BitIterator<B> {
5✔
716
        self.into_iter()
10✔
717
    }
718

719
    // Returns an iterator over the positions of the ones in this bit vector.
720
    pub fn iter_ones(&self) -> OnesIterator<B> {
5✔
721
        OnesIterator::new(&self.bits, self.len)
15✔
722
    }
723

724
    // Returns an iterator over the positions of the zeros in this bit vector.
725
    pub fn iter_zeros(&self) -> ZerosIterator<B> {
5✔
726
        ZerosIterator::new(&self.bits, self.len)
15✔
727
    }
728
}
729

730
#[derive(Debug, Clone, MemDbg, MemSize)]
731
/// A thread-safe bit vector.
732
pub struct AtomicBitVec<B = Vec<AtomicUsize>> {
733
    bits: B,
734
    len: usize,
735
}
736

737
impl<B> AtomicBitVec<B> {
738
    /// Returns the number of bits in the bit vector.
739
    ///
740
    /// This method is equivalent to [`BitLength::len`], but it is provided to
741
    /// reduce ambiguity in method resolution.
742
    #[inline(always)]
743
    pub fn len(&self) -> usize {
6✔
744
        BitLength::len(self)
12✔
745
    }
746

747
    /// # Safety
748
    /// `len` must be between 0 (included) the number of
749
    /// bits in `bits` (included).
750
    #[inline(always)]
751
    pub unsafe fn from_raw_parts(bits: B, len: usize) -> Self {
3✔
752
        Self { bits, len }
753
    }
754
    #[inline(always)]
755
    pub fn into_raw_parts(self) -> (B, usize) {
1✔
756
        (self.bits, self.len)
1✔
757
    }
758
}
759

760
impl<B: AsRef<[AtomicUsize]>> AtomicBitVec<B> {
761
    pub fn get(&self, index: usize, ordering: Ordering) -> bool {
64,368✔
762
        panic_if_out_of_bounds!(index, self.len);
193,104✔
763
        unsafe { self.get_unchecked(index, ordering) }
64,368✔
764
    }
765

766
    pub fn set(&self, index: usize, value: bool, ordering: Ordering) {
21,550✔
767
        panic_if_out_of_bounds!(index, self.len);
64,650✔
768
        unsafe { self.set_unchecked(index, value, ordering) }
21,550✔
769
    }
770

771
    pub fn swap(&self, index: usize, value: bool, ordering: Ordering) -> bool {
3✔
772
        panic_if_out_of_bounds!(index, self.len);
9✔
773
        unsafe { self.swap_unchecked(index, value, ordering) }
3✔
774
    }
775

776
    unsafe fn get_unchecked(&self, index: usize, ordering: Ordering) -> bool {
64,368✔
777
        let word_index = index / BITS;
128,736✔
778
        let bits = self.bits.as_ref();
193,104✔
779
        let word = bits.get_unchecked(word_index).load(ordering);
321,840✔
780
        (word >> (index % BITS)) & 1 != 0
64,368✔
781
    }
782
    #[inline(always)]
783
    unsafe fn set_unchecked(&self, index: usize, value: bool, ordering: Ordering) {
21,550✔
784
        let word_index = index / BITS;
43,100✔
785
        let bit_index = index % BITS;
43,100✔
786
        let bits = self.bits.as_ref();
64,650✔
787

788
        // For constant values, this should be inlined with no test.
789
        if value {
37,850✔
790
            bits.get_unchecked(word_index)
48,900✔
791
                .fetch_or(1 << bit_index, ordering);
32,600✔
792
        } else {
793
            bits.get_unchecked(word_index)
5,250✔
794
                .fetch_and(!(1 << bit_index), ordering);
5,250✔
795
        }
796
    }
797

798
    #[inline(always)]
799
    unsafe fn swap_unchecked(&self, index: usize, value: bool, ordering: Ordering) -> bool {
3✔
800
        let word_index = index / BITS;
6✔
801
        let bit_index = index % BITS;
6✔
802
        let bits = self.bits.as_ref();
9✔
803

804
        let old_word = if value {
6✔
805
            bits.get_unchecked(word_index)
4✔
806
                .fetch_or(1 << bit_index, ordering)
6✔
807
        } else {
808
            bits.get_unchecked(word_index)
1✔
809
                .fetch_and(!(1 << bit_index), ordering)
×
810
        };
811

812
        (old_word >> (bit_index)) & 1 != 0
3✔
813
    }
814

815
    /// Sets all bits to the given value.
816
    pub fn fill(&mut self, value: bool, ordering: Ordering) {
24✔
817
        let full_words = self.len() / BITS;
48✔
818
        let residual = self.len % BITS;
48✔
819
        let bits = self.bits.as_ref();
72✔
820
        let word_value = if value { !0 } else { 0 };
72✔
821
        // Just to be sure, add a fence to ensure that we will see all the final
822
        // values
823
        core::sync::atomic::fence(Ordering::SeqCst);
48✔
824
        bits[..full_words]
24✔
825
            .iter()
826
            .for_each(|x| x.store(word_value, ordering));
276✔
827
        if residual != 0 {
39✔
828
            let mask = (1 << residual) - 1;
45✔
829
            bits[full_words].store(
45✔
830
                (bits[full_words].load(ordering) & !mask) | (word_value & mask),
60✔
831
                ordering,
15✔
832
            );
833
        }
834
    }
835

836
    /// Sets all bits to the given value using a parallel implementation.
837
    #[cfg(feature = "rayon")]
838
    pub fn par_fill(&mut self, value: bool, ordering: Ordering) {
16✔
839
        let full_words = self.len() / BITS;
32✔
840
        let residual = self.len % BITS;
32✔
841
        let bits = self.bits.as_ref();
48✔
842
        let word_value = if value { !0 } else { 0 };
48✔
843

844
        // Just to be sure, add a fence to ensure that we will see all the final
845
        // values
846
        core::sync::atomic::fence(Ordering::SeqCst);
32✔
847
        bits[..full_words]
16✔
848
            .par_iter()
849
            .with_min_len(RAYON_MIN_LEN)
16✔
850
            .for_each(|x| x.store(word_value, ordering));
184✔
851
        if residual != 0 {
26✔
852
            let mask = (1 << residual) - 1;
30✔
853
            bits[full_words].store(
30✔
854
                (bits[full_words].load(ordering) & !mask) | (word_value & mask),
40✔
855
                ordering,
10✔
856
            );
857
        }
858
    }
859

860
    /// Sets all bits to zero.
861
    pub fn reset(&mut self, ordering: Ordering) {
×
862
        self.fill(false, ordering);
×
863
    }
864

865
    /// Sets all bits to zero using a parallel implementation.
866
    #[cfg(feature = "rayon")]
867
    pub fn par_reset(&mut self, ordering: Ordering) {
×
868
        self.par_fill(false, ordering);
×
869
    }
870

871
    /// Flip all bits.
872
    pub fn flip(&mut self, ordering: Ordering) {
16✔
873
        let full_words = self.len() / BITS;
32✔
874
        let residual = self.len % BITS;
32✔
875
        let bits = self.bits.as_ref();
48✔
876
        // Just to be sure, add a fence to ensure that we will see all the final
877
        // values
878
        core::sync::atomic::fence(Ordering::SeqCst);
32✔
879
        bits[..full_words]
16✔
880
            .iter()
881
            .for_each(|x| _ = x.fetch_xor(!0, ordering));
142✔
882
        if residual != 0 {
26✔
883
            let mask = (1 << residual) - 1;
30✔
884
            let last_word = bits[full_words].load(ordering);
50✔
885
            bits[full_words].store((last_word & !mask) | (!last_word & mask), ordering);
40✔
886
        }
887
    }
888

889
    /// Flip all bits using a parallel implementation.
890
    #[cfg(feature = "rayon")]
891
    pub fn par_flip(&mut self, ordering: Ordering) {
16✔
892
        let full_words = self.len() / BITS;
32✔
893
        let residual = self.len % BITS;
32✔
894
        let bits = self.bits.as_ref();
48✔
895
        // Just to be sure, add a fence to ensure that we will see all the final
896
        // values
897
        core::sync::atomic::fence(Ordering::SeqCst);
32✔
898
        bits[..full_words]
16✔
899
            .par_iter()
900
            .with_min_len(RAYON_MIN_LEN)
16✔
901
            .for_each(|x| _ = x.fetch_xor(!0, ordering));
142✔
902
        if residual != 0 {
26✔
903
            let mask = (1 << residual) - 1;
30✔
904
            let last_word = bits[full_words].load(ordering);
50✔
905
            bits[full_words].store((last_word & !mask) | (!last_word & mask), ordering);
40✔
906
        }
907
    }
908

909
    /// A parallel version of [`BitVec::count_ones`].
910
    #[cfg(feature = "rayon")]
911
    pub fn par_count_ones(&self) -> usize {
×
912
        let full_words = self.len() / BITS;
×
913
        let residual = self.len() % BITS;
×
914
        let bits = self.bits.as_ref();
×
915
        let mut num_ones;
×
916
        // Just to be sure, add a fence to ensure that we will see all the final
917
        // values
918
        core::sync::atomic::fence(Ordering::SeqCst);
×
919
        num_ones = bits[..full_words]
×
920
            .par_iter()
×
921
            .with_min_len(RAYON_MIN_LEN)
×
922
            .map(|x| x.load(Ordering::Relaxed).count_ones() as usize)
×
923
            .sum();
×
924
        if residual != 0 {
×
925
            num_ones += (bits[full_words].load(Ordering::Relaxed) << (BITS - residual)).count_ones()
×
926
                as usize
×
927
        }
928
        num_ones
×
929
    }
930
}
931

932
impl AtomicBitVec<Vec<AtomicUsize>> {
933
    /// Creates a new atomic bit vector of length `len` initialized to `false`.
934
    pub fn new(len: usize) -> Self {
115✔
935
        Self::with_value(len, false)
230✔
936
    }
937

938
    /// Creates a new atomic bit vector of length `len` initialized to `value`.
939
    pub fn with_value(len: usize, value: bool) -> Self {
120✔
940
        let n_of_words = len.div_ceil(BITS);
360✔
941
        let extra_bits = (n_of_words * BITS) - len;
240✔
942
        let word_value = if value { !0 } else { 0 };
360✔
943
        let mut bits = (0..n_of_words)
240✔
944
            .map(|_| AtomicUsize::new(word_value))
1,260✔
945
            .collect::<Vec<_>>();
946
        if extra_bits > 0 {
210✔
947
            let last_word_value = word_value >> extra_bits;
270✔
948
            bits[n_of_words - 1] = AtomicUsize::new(last_word_value);
180✔
949
        }
950
        Self { bits, len }
951
    }
952
}
953

954
impl<B> BitLength for AtomicBitVec<B> {
955
    #[inline(always)]
956
    fn len(&self) -> usize {
78✔
957
        self.len
78✔
958
    }
959
}
960

961
impl<B: AsRef<[AtomicUsize]>> Index<usize> for AtomicBitVec<B> {
962
    type Output = bool;
963

964
    /// Shorthand for [`Self::get`] using [`Ordering::Relaxed`].
965
    fn index(&self, index: usize) -> &Self::Output {
10,000✔
966
        match self.get(index, Ordering::Relaxed) {
30,000✔
967
            false => &false,
9,500✔
968
            true => &true,
500✔
969
        }
970
    }
971
}
972

973
impl<B: AsRef<[AtomicUsize]>> BitCount for AtomicBitVec<B> {
974
    fn count_ones(&self) -> usize {
3✔
975
        let full_words = self.len() / BITS;
6✔
976
        let residual = self.len() % BITS;
6✔
977
        let bits = self.bits.as_ref();
9✔
978
        let mut num_ones;
×
979
        // Just to be sure, add a fence to ensure that we will see all the final
980
        // values
981
        core::sync::atomic::fence(Ordering::SeqCst);
6✔
982
        num_ones = bits[..full_words]
3✔
983
            .iter()
3✔
984
            .map(|x| x.load(Ordering::Relaxed).count_ones() as usize)
63✔
985
            .sum();
3✔
986
        if residual != 0 {
3✔
987
            num_ones += (bits[full_words].load(Ordering::Relaxed) << (BITS - residual)).count_ones()
6✔
988
                as usize
2✔
989
        }
990
        num_ones
3✔
991
    }
992
}
993

994
// Conversions
995

996
impl<W: IntoAtomic> From<BitVec<Vec<W>>> for AtomicBitVec<Vec<W::AtomicType>> {
997
    fn from(value: BitVec<Vec<W>>) -> Self {
2✔
998
        AtomicBitVec {
999
            bits: unsafe { transmute_vec::<W, W::AtomicType>(value.bits) },
4✔
1000
            len: value.len,
2✔
1001
        }
1002
    }
1003
}
1004

1005
impl<'a, W: IntoAtomic> From<BitVec<&'a [W]>> for AtomicBitVec<&'a [W::AtomicType]> {
1006
    fn from(value: BitVec<&'a [W]>) -> Self {
1✔
1007
        AtomicBitVec {
1008
            bits: unsafe { core::mem::transmute::<&'a [W], &'a [W::AtomicType]>(value.bits) },
1✔
1009
            len: value.len,
1✔
1010
        }
1011
    }
1012
}
1013

1014
impl<'a, W: IntoAtomic> From<BitVec<&'a mut [W]>> for AtomicBitVec<&'a mut [W::AtomicType]> {
1015
    fn from(value: BitVec<&'a mut [W]>) -> Self {
1✔
1016
        AtomicBitVec {
1017
            bits: unsafe {
1✔
1018
                core::mem::transmute::<&'a mut [W], &'a mut [W::AtomicType]>(value.bits)
1019
            },
1020
            len: value.len,
1✔
1021
        }
1022
    }
1023
}
1024

1025
impl<W: IntoAtomic> From<AtomicBitVec<Vec<W::AtomicType>>> for BitVec<Vec<W>> {
1026
    fn from(value: AtomicBitVec<Vec<W::AtomicType>>) -> Self {
30✔
1027
        BitVec {
1028
            bits: unsafe { transmute_vec::<W::AtomicType, W>(value.bits) },
60✔
1029
            len: value.len,
30✔
1030
        }
1031
    }
1032
}
1033

1034
impl<W: IntoAtomic> From<AtomicBitVec<Box<[W::AtomicType]>>> for BitVec<Box<[W]>> {
1035
    fn from(value: AtomicBitVec<Box<[W::AtomicType]>>) -> Self {
1✔
1036
        BitVec {
1037
            bits: unsafe { transmute_boxed_slice::<W::AtomicType, W>(value.bits) },
2✔
1038
            len: value.len,
1✔
1039
        }
1040
    }
1041
}
1042

1043
impl<W: IntoAtomic> From<BitVec<Box<[W]>>> for AtomicBitVec<Box<[W::AtomicType]>> {
1044
    fn from(value: BitVec<Box<[W]>>) -> Self {
1✔
1045
        AtomicBitVec {
1046
            bits: unsafe { transmute_boxed_slice::<W, W::AtomicType>(value.bits) },
2✔
1047
            len: value.len,
1✔
1048
        }
1049
    }
1050
}
1051

1052
impl<'a, W: IntoAtomic> From<AtomicBitVec<&'a [W::AtomicType]>> for BitVec<&'a [W]> {
1053
    fn from(value: AtomicBitVec<&'a [W::AtomicType]>) -> Self {
1✔
1054
        BitVec {
1055
            bits: unsafe { core::mem::transmute::<&'a [W::AtomicType], &'a [W]>(value.bits) },
1✔
1056
            len: value.len,
1✔
1057
        }
1058
    }
1059
}
1060

1061
impl<'a, W: IntoAtomic> From<AtomicBitVec<&'a mut [W::AtomicType]>> for BitVec<&'a mut [W]> {
1062
    fn from(value: AtomicBitVec<&'a mut [W::AtomicType]>) -> Self {
1✔
1063
        BitVec {
1064
            bits: unsafe {
1✔
1065
                core::mem::transmute::<&'a mut [W::AtomicType], &'a mut [W]>(value.bits)
1066
            },
1067
            len: value.len,
1✔
1068
        }
1069
    }
1070
}
1071

1072
impl<W> From<BitVec<Vec<W>>> for BitVec<Box<[W]>> {
1073
    fn from(value: BitVec<Vec<W>>) -> Self {
96✔
1074
        BitVec {
1075
            bits: value.bits.into_boxed_slice(),
192✔
1076
            len: value.len,
96✔
1077
        }
1078
    }
1079
}
1080

1081
impl<W> From<BitVec<Box<[W]>>> for BitVec<Vec<W>> {
1082
    fn from(value: BitVec<Box<[W]>>) -> Self {
1✔
1083
        BitVec {
1084
            bits: value.bits.into_vec(),
2✔
1085
            len: value.len,
1✔
1086
        }
1087
    }
1088
}
1089

1090
impl<W, B: AsRef<[W]>> AsRef<[W]> for BitVec<B> {
1091
    #[inline(always)]
1092
    fn as_ref(&self) -> &[W] {
2,147,483,647✔
1093
        self.bits.as_ref()
2,147,483,647✔
1094
    }
1095
}
1096

1097
impl<W, B: AsMut<[W]>> AsMut<[W]> for BitVec<B> {
1098
    #[inline(always)]
1099
    fn as_mut(&mut self) -> &mut [W] {
×
1100
        self.bits.as_mut()
×
1101
    }
1102
}
1103

1104
impl<W, B: AsRef<[W]>> AsRef<[W]> for AtomicBitVec<B> {
1105
    #[inline(always)]
1106
    fn as_ref(&self) -> &[W] {
10✔
1107
        self.bits.as_ref()
10✔
1108
    }
1109
}
1110
// An iterator over the bits of this atomic bit vector as booleans.
1111
#[derive(Debug, MemDbg, MemSize)]
1112
pub struct AtomicBitIterator<'a, B> {
1113
    bits: &'a mut B,
1114
    len: usize,
1115
    next_bit_pos: usize,
1116
}
1117

1118
// We implement [`IntoIterator`] for a mutable reference so no
1119
// outstanding references are allowed while iterating.
1120
impl<'a, B: AsRef<[AtomicUsize]>> IntoIterator for &'a mut AtomicBitVec<B> {
1121
    type IntoIter = AtomicBitIterator<'a, B>;
1122
    type Item = bool;
1123

1124
    fn into_iter(self) -> Self::IntoIter {
1✔
1125
        AtomicBitIterator {
1126
            bits: &mut self.bits,
1✔
1127
            len: self.len,
1✔
1128
            next_bit_pos: 0,
1129
        }
1130
    }
1131
}
1132

1133
impl<B: AsRef<[AtomicUsize]>> Iterator for AtomicBitIterator<'_, B> {
1134
    type Item = bool;
1135
    fn next(&mut self) -> Option<bool> {
101✔
1136
        if self.next_bit_pos == self.len {
101✔
1137
            return None;
1✔
1138
        }
1139
        let word_idx = self.next_bit_pos / BITS;
×
1140
        let bit_idx = self.next_bit_pos % BITS;
×
1141
        let word = unsafe {
1142
            self.bits
×
1143
                .as_ref()
1144
                .get_unchecked(word_idx)
×
1145
                .load(Ordering::Relaxed)
×
1146
        };
1147
        let bit = (word >> bit_idx) & 1;
×
1148
        self.next_bit_pos += 1;
×
1149
        Some(bit != 0)
×
1150
    }
1151
}
1152

1153
impl<B: AsRef<[AtomicUsize]>> AtomicBitVec<B> {
1154
    // Returns an iterator over the bits of the bit vector.
1155
    //
1156
    // Note that this method takes a mutable reference to the bit vector,
1157
    // so no outstanding references are allowed while iterating.
1158
    #[inline(always)]
1159
    pub fn iter(&mut self) -> AtomicBitIterator<B> {
1✔
1160
        self.into_iter()
2✔
1161
    }
1162
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc