• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

randombit / botan / 20160266155

12 Dec 2025 08:01AM UTC coverage: 90.357% (-0.001%) from 90.358%
20160266155

push

github

web-flow
Merge pull request #5172 from randombit/jack/fix-clang-tidy-misc-const-correctness

Fix and enable clang-tidy warning misc-const-correctness

100951 of 111725 relevant lines covered (90.36%)

12817908.54 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

98.35
/src/lib/utils/bitvector/bitvector.h
1
/*
2
 * An abstraction for an arbitrarily large bitvector that can
3
 * optionally use the secure_allocator. All bitwise accesses and all
4
 * constructors are implemented in constant time. Otherwise, only methods
5
 * with the "ct_" pre-fix run in constant time.
6
 *
7
 * (C) 2023-2024 Jack Lloyd
8
 * (C) 2023-2024 René Meusel, Rohde & Schwarz Cybersecurity
9
 *
10
 * Botan is released under the Simplified BSD License (see license.txt)
11
 */
12

13
#ifndef BOTAN_BIT_VECTOR_H_
14
#define BOTAN_BIT_VECTOR_H_
15

16
#include <botan/concepts.h>
17
#include <botan/exceptn.h>
18
#include <botan/mem_ops.h>
19
#include <botan/secmem.h>
20
#include <botan/strong_type.h>
21
#include <botan/internal/bit_ops.h>
22
#include <botan/internal/ct_utils.h>
23
#include <botan/internal/loadstor.h>
24
#include <botan/internal/stl_util.h>
25

26
#include <memory>
27
#include <optional>
28
#include <span>
29
#include <sstream>
30
#include <string>
31
#include <utility>
32
#include <vector>
33

34
namespace Botan {
35

36
template <template <typename> typename AllocatorT>
37
class bitvector_base;
38

39
template <typename T>
40
struct is_bitvector : std::false_type {};
41

42
template <template <typename> typename T>
43
struct is_bitvector<bitvector_base<T>> : std::true_type {};
44

45
template <typename T>
46
constexpr static bool is_bitvector_v = is_bitvector<T>::value;
47

48
template <typename T>
49
concept bitvectorish = is_bitvector_v<strong_type_wrapped_type<T>>;
50

51
namespace detail {
52

53
template <typename T0, typename... Ts>
54
struct first_type {
55
      using type = T0;
56
};
57

58
// get the first type from a parameter pack
59
// TODO: C++26 will bring Parameter Pack indexing:
60
//       using first_t = Ts...[0];
61
template <typename... Ts>
62
   requires(sizeof...(Ts) > 0)
63
using first_t = typename first_type<Ts...>::type;
64

65
// get the first object from a parameter pack
66
// TODO: C++26 will bring Parameter Pack indexing:
67
//       auto first = s...[0];
68
template <typename T0, typename... Ts>
69
constexpr static first_t<T0, Ts...> first(T0&& t, Ts&&... /*rest*/) {
70
   return std::forward<T0>(t);
71
}
72

73
template <typename OutT, typename>
74
using as = OutT;
75

76
template <typename FnT, std::unsigned_integral BlockT, typename... ParamTs>
77
using blockwise_processing_callback_return_type = std::invoke_result_t<FnT, as<BlockT, ParamTs>...>;
78

79
template <typename FnT, typename BlockT, typename... ParamTs>
80
concept is_blockwise_processing_callback_return_type =
81
   std::unsigned_integral<BlockT> &&
82
   (std::same_as<BlockT, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>> ||
83
    std::same_as<bool, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>> ||
84
    std::same_as<void, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>>);
85

86
template <typename FnT, typename... ParamTs>
87
concept blockwise_processing_callback_without_mask =
88
   is_blockwise_processing_callback_return_type<FnT, uint8_t, ParamTs...> &&
89
   is_blockwise_processing_callback_return_type<FnT, uint16_t, ParamTs...> &&
90
   is_blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...> &&
91
   is_blockwise_processing_callback_return_type<FnT, uint64_t, ParamTs...>;
92

93
template <typename FnT, typename... ParamTs>
94
concept blockwise_processing_callback_with_mask =
95
   is_blockwise_processing_callback_return_type<FnT, uint8_t, ParamTs..., uint8_t /* mask */> &&
96
   is_blockwise_processing_callback_return_type<FnT, uint16_t, ParamTs..., uint16_t /* mask */> &&
97
   is_blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., uint32_t /* mask */> &&
98
   is_blockwise_processing_callback_return_type<FnT, uint64_t, ParamTs..., uint64_t /* mask */>;
99

100
/**
101
 * Defines the callback constraints for the BitRangeOperator. For further
102
 * details, see bitvector_base::range_operation().
103
 */
104
template <typename FnT, typename... ParamTs>
105
concept blockwise_processing_callback = blockwise_processing_callback_with_mask<FnT, ParamTs...> ||
106
                                        blockwise_processing_callback_without_mask<FnT, ParamTs...>;
107

108
template <typename FnT, typename... ParamTs>
109
concept manipulating_blockwise_processing_callback =
110
   (blockwise_processing_callback_without_mask<FnT, ParamTs...> &&
111
    std::same_as<uint32_t, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...>>) ||
112
   (blockwise_processing_callback_with_mask<FnT, ParamTs...> &&
113
    std::same_as<uint32_t, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., first_t<ParamTs...>>>);
114

115
template <typename FnT, typename... ParamTs>
116
concept predicate_blockwise_processing_callback =
117
   (blockwise_processing_callback_without_mask<FnT, ParamTs...> &&
118
    std::same_as<bool, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...>>) ||
119
   (blockwise_processing_callback_with_mask<FnT, ParamTs...> &&
120
    std::same_as<bool, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., first_t<ParamTs...>>>);
121

122
template <typename T>
123
class bitvector_iterator {
124
   private:
125
      using size_type = typename T::size_type;
126

127
   public:
128
      using difference_type = std::make_signed_t<size_type>;
129
      using value_type = std::remove_const_t<decltype(std::declval<T>().at(0))>;
130
      using pointer = value_type*;
131
      using reference = value_type&;
132

133
      // TODO: technically, this could be a random access iterator
134
      using iterator_category = std::bidirectional_iterator_tag;
135

136
   public:
137
      bitvector_iterator() = default;
138
      ~bitvector_iterator() = default;
139

140
      bitvector_iterator(T* bitvector, size_t offset) : m_bitvector(bitvector) { update(offset); }
1,588✔
141

142
      bitvector_iterator(const bitvector_iterator& other) noexcept : m_bitvector(other.m_bitvector) {
2,002✔
143
         update(other.m_offset);
2,004✔
144
      }
145

146
      bitvector_iterator(bitvector_iterator&& other) noexcept : m_bitvector(other.m_bitvector) {
147
         update(other.m_offset);
148
      }
149

150
      bitvector_iterator& operator=(const bitvector_iterator& other) noexcept {
151
         if(this != &other) {
152
            m_bitvector = other.m_bitvector;
153
            update(other.m_offset);
154
         }
155
         return *this;
156
      }
157

158
      bitvector_iterator& operator=(bitvector_iterator&& other) noexcept {
159
         m_bitvector = other.m_bitvector;
160
         update(other.m_offset);
161
         return *this;
162
      }
163

164
      bitvector_iterator& operator++() noexcept {
252,242✔
165
         update(signed_offset() + 1);
504,511✔
166
         return *this;
167
      }
168

169
      bitvector_iterator operator++(int) noexcept {
6✔
170
         auto copy = *this;
6✔
171
         update(signed_offset() + 1);
12✔
172
         return copy;
6✔
173
      }
174

175
      bitvector_iterator& operator--() noexcept {
6✔
176
         update(signed_offset() - 1);
12✔
177
         return *this;
178
      }
179

180
      bitvector_iterator operator--(int) noexcept {
181
         auto copy = *this;
182
         update(signed_offset() - 1);
183
         return copy;
184
      }
185

186
      std::partial_ordering operator<=>(const bitvector_iterator& other) const noexcept {
187
         if(m_bitvector == other.m_bitvector) {
188
            return m_offset <=> other.m_offset;
189
         } else {
190
            return std::partial_ordering::unordered;
191
         }
192
      }
193

194
      bool operator==(const bitvector_iterator& other) const noexcept {
253,283✔
195
         return m_bitvector == other.m_bitvector && m_offset == other.m_offset;
252,755✔
196
      }
197

198
      reference operator*() const { return m_bitref.value(); }
2,397✔
199

200
      pointer operator->() const { return &(m_bitref.value()); }
256✔
201

202
   private:
203
      void update(size_type new_offset) {
522✔
204
         m_offset = new_offset;
1,028✔
205
         if(m_offset < m_bitvector->size()) {
1,505✔
206
            m_bitref.emplace((*m_bitvector)[m_offset]);
254,263✔
207
         } else {
208
            // end() iterator
209
            m_bitref.reset();
251,802✔
210
         }
211
      }
212

213
      difference_type signed_offset() const { return static_cast<difference_type>(m_offset); }
6✔
214

215
   private:
216
      T* m_bitvector;
217
      size_type m_offset;
218
      mutable std::optional<value_type> m_bitref;
219
};
220

221
}  // namespace detail
222

223
/**
224
 * An arbitrarily large bitvector with typical bit manipulation and convenient
225
 * bitwise access methods. Don't use `bitvector_base` directly, but the type
226
 * aliases::
227
 *
228
 *    * bitvector         - with a standard allocator
229
 *    * secure_bitvector  - with a secure allocator that auto-scrubs the memory
230
 */
231
template <template <typename> typename AllocatorT>
232
class bitvector_base final {
960,049✔
233
   public:
234
      using block_type = uint8_t;
235
      using size_type = size_t;
236
      using allocator_type = AllocatorT<block_type>;
237
      using value_type = block_type;
238
      using iterator = detail::bitvector_iterator<bitvector_base<AllocatorT>>;
239
      using const_iterator = detail::bitvector_iterator<const bitvector_base<AllocatorT>>;
240

241
      static constexpr size_type block_size_bytes = sizeof(block_type);
242
      static constexpr size_type block_size_bits = block_size_bytes * 8;
243
      static constexpr bool uses_secure_allocator = std::is_same_v<allocator_type, secure_allocator<block_type>>;
244

245
   private:
246
      template <template <typename> typename FriendAllocatorT>
247
      friend class bitvector_base;
248

249
      static constexpr block_type one = block_type(1);
250

251
      static constexpr size_type block_offset_shift = size_type(3) + ceil_log2(block_size_bytes);
252
      static constexpr size_type block_index_mask = (one << block_offset_shift) - 1;
253

254
      static constexpr size_type block_index(size_type pos) { return pos >> block_offset_shift; }
21,106,307✔
255

256
      static constexpr size_type block_offset(size_type pos) { return pos & block_index_mask; }
1,247,564,240✔
257

258
   private:
259
      /**
260
       * Internal helper to wrap a single bit in the bitvector and provide
261
       * certain convenience access methods.
262
       */
263
      template <typename BlockT>
264
         requires std::same_as<block_type, std::remove_cv_t<BlockT>>
265
      class bitref_base {
266
         private:
267
            friend class bitvector_base<AllocatorT>;
268

269
            constexpr bitref_base(std::span<BlockT> blocks, size_type pos) noexcept :
1,661,756,787✔
270
                  m_block(blocks[block_index(pos)]), m_mask(one << block_offset(pos)) {}
1,661,756,787✔
271

272
         public:
273
            bitref_base() = delete;
274
            bitref_base(const bitref_base&) noexcept = default;
275
            bitref_base(bitref_base&&) noexcept = default;
276
            bitref_base& operator=(const bitref_base&) = delete;
277
            bitref_base& operator=(bitref_base&&) = delete;
278

279
            ~bitref_base() = default;
280

281
         public:
282
            // NOLINTNEXTLINE(*-explicit-conversions) FIXME
283
            constexpr operator bool() const noexcept { return is_set(); }
13,349,398✔
284

285
            constexpr bool is_set() const noexcept { return (m_block & m_mask) > 0; }
13,349,429✔
286

287
            template <std::unsigned_integral T>
288
            constexpr T as() const noexcept {
288,057✔
289
               return static_cast<T>(is_set());
94,674✔
290
            }
291

292
            constexpr CT::Choice as_choice() const noexcept {
412,797,476✔
293
               return CT::Choice::from_int(static_cast<BlockT>(m_block & m_mask));
412,797,476✔
294
            }
295

296
         protected:
297
            BlockT& m_block;  // NOLINT(*-non-private-member-variable*)
298
            BlockT m_mask;    // NOLINT(*-non-private-member-variable*)
299
      };
300

301
   public:
302
      /**
303
       * Wraps a constant reference into the bitvector. Bit can be accessed
304
       * but not modified.
305
       */
306
      template <typename BlockT>
307
      class bitref final : public bitref_base<BlockT> {
308
         public:
309
            using bitref_base<BlockT>::bitref_base;
12,641,743✔
310
      };
311

312
      /**
313
       * Wraps a modifiable reference into the bitvector. Bit may be accessed
314
       * and modified (e.g. flipped or XOR'ed).
315
       *
316
       * Constant-time operations are used for the bit manipulations. The
317
       * location of the bit in the bit vector may be leaked, though.
318
       */
319
      template <typename BlockT>
320
         requires(!std::is_const_v<BlockT>)
321
      class bitref<BlockT> : public bitref_base<BlockT> {
322
         public:
323
            using bitref_base<BlockT>::bitref_base;
1,235,232,102✔
324

325
            ~bitref() = default;
326
            bitref(const bitref&) noexcept = default;
327
            bitref(bitref&&) noexcept = default;
328

329
            constexpr bitref& set() noexcept {
390✔
330
               this->m_block |= this->m_mask;
58✔
331
               return *this;
250✔
332
            }
333

334
            constexpr bitref& unset() noexcept {
3✔
335
               this->m_block &= ~this->m_mask;
1✔
336
               return *this;
337
            }
338

339
            constexpr bitref& flip() noexcept {
53✔
340
               this->m_block ^= this->m_mask;
7✔
341
               return *this;
342
            }
343

344
            // NOLINTBEGIN
345

346
            constexpr bitref& operator=(bool bit) noexcept {
1,234,999,201✔
347
               this->m_block =
1,234,999,201✔
348
                  CT::Mask<BlockT>::expand(bit).select(this->m_mask | this->m_block, this->m_block & ~this->m_mask);
1,234,999,201✔
349
               return *this;
1,234,999,201✔
350
            }
351

352
            constexpr bitref& operator=(const bitref& bit) noexcept { return *this = bit.is_set(); }
353

354
            constexpr bitref& operator=(bitref&& bit) noexcept { return *this = bit.is_set(); }
355

356
            // NOLINTEND
357

358
            constexpr bitref& operator&=(bool other) noexcept {
4✔
359
               this->m_block &= ~CT::Mask<BlockT>::expand(other).if_not_set_return(this->m_mask);
4✔
360
               return *this;
361
            }
362

363
            constexpr bitref& operator|=(bool other) noexcept {
4✔
364
               this->m_block |= CT::Mask<BlockT>::expand(other).if_set_return(this->m_mask);
4✔
365
               return *this;
366
            }
367

368
            constexpr bitref& operator^=(bool other) noexcept {
70,599✔
369
               this->m_block ^= CT::Mask<BlockT>::expand(other).if_set_return(this->m_mask);
70,599✔
370
               return *this;
371
            }
372
      };
373

374
   public:
375
      bitvector_base() : m_bits(0) {}
58✔
376

377
      explicit bitvector_base(size_type bits) : m_bits(bits), m_blocks(ceil_toblocks(bits)) {}
238,762✔
378

379
      /**
380
       * Initialize the bitvector from a byte-array. Bits are taken byte-wise
381
       * from least significant to most significant. Example::
382
       *
383
       *    bitvector[0] -> LSB(Byte[0])
384
       *    bitvector[1] -> LSB+1(Byte[0])
385
       *    ...
386
       *    bitvector[8] -> LSB(Byte[1])
387
       *
388
       * @param bytes The byte vector to be loaded
389
       * @param bits  The number of bits to be loaded. This must not be more
390
       *              than the number of bytes in @p bytes.
391
       */
392
      bitvector_base(std::span<const uint8_t> bytes, /* NOLINT(*-explicit-conversions) FIXME */
72,463✔
393
                     std::optional<size_type> bits = std::nullopt) :
394
            m_bits() {
72,463✔
395
         from_bytes(bytes, bits);
72,463✔
396
      }
72,463✔
397

398
      bitvector_base(std::initializer_list<block_type> blocks, std::optional<size_type> bits = std::nullopt) :
187✔
399
            m_bits(bits.value_or(blocks.size() * block_size_bits)), m_blocks(blocks.begin(), blocks.end()) {}
187✔
400

401
      bool empty() const { return m_bits == 0; }
6✔
402

403
      size_type size() const { return m_bits; }
8,456,005✔
404

405
      /**
406
       * @returns true iff the number of 1-bits in this is odd, false otherwise (constant time)
407
       */
408
      CT::Choice has_odd_hamming_weight() const {
70,599✔
409
         uint64_t acc = 0;
70,599✔
410
         full_range_operation([&](std::unsigned_integral auto block) { acc ^= block; }, *this);
70,599✔
411

412
         for(size_t i = (sizeof(acc) * 8) >> 1; i > 0; i >>= 1) {
494,193✔
413
            acc ^= acc >> i;
423,594✔
414
         }
415

416
         return CT::Choice::from_int(acc & one);
70,599✔
417
      }
418

419
      /**
420
       * Counts the number of 1-bits in the bitvector in constant time.
421
       * @returns the "population count" (or hamming weight) of the bitvector
422
       */
423
      size_type hamming_weight() const {
125✔
424
         size_type acc = 0;
125✔
425
         full_range_operation([&](std::unsigned_integral auto block) { acc += ct_popcount(block); }, *this);
63✔
426
         return acc;
108✔
427
      }
428

429
      /**
430
       * @returns copies this bitvector into a new bitvector of type @p OutT
431
       */
432
      template <bitvectorish OutT>
433
      OutT as() const {
112✔
434
         return subvector<OutT>(0, size());
194✔
435
      }
436

437
      /**
438
       * @returns true if @p other contains the same bit pattern as this
439
       */
440
      template <bitvectorish OtherT>
441
      bool equals_vartime(const OtherT& other) const noexcept {
8✔
442
         return size() == other.size() &&
10✔
443
                full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) { return lhs == rhs; },
10✔
444
                                     *this,
445
                                     unwrap_strong_type(other));
446
      }
447

448
      /**
449
       * @returns true if @p other contains the same bit pattern as this
450
       */
451
      template <bitvectorish OtherT>
452
      bool equals(const OtherT& other) const noexcept {
47✔
453
         return (*this ^ other).none();
47✔
454
      }
455

456
      /// @name Serialization
457
      /// @{
458

459
      /**
460
       * Re-initialize the bitvector with the given bytes. See the respective
461
       * constructor for details. This should be used only when trying to save
462
       * allocations. Otherwise, use the constructor.
463
       *
464
       * @param bytes  the byte range to load bits from
465
       * @param bits   (optional) if not all @p bytes should be loaded in full
466
       */
467
      void from_bytes(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
72,463✔
468
         m_bits = bits.value_or(bytes.size_bytes() * 8);
72,463✔
469
         BOTAN_ARG_CHECK(m_bits <= bytes.size_bytes() * 8, "not enough data to load so many bits");
72,463✔
470
         resize(m_bits);
72,463✔
471

472
         // load as much aligned data as possible
473
         const auto verbatim_blocks = m_bits / block_size_bits;
72,463✔
474
         const auto verbatim_bytes = verbatim_blocks * block_size_bytes;
72,463✔
475
         if(verbatim_blocks > 0) {
72,463✔
476
            typecast_copy(std::span{m_blocks}.first(verbatim_blocks), bytes.first(verbatim_bytes));
72,462✔
477
         }
478

479
         // load remaining unaligned data
480
         for(size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
165,327✔
481
            ref(i) = ((bytes[i >> 3] & (uint8_t(1) << (i & 7))) != 0);
92,864✔
482
         }
483
      }
72,463✔
484

485
      /**
486
       * Renders the bitvector into a byte array. By default, this will use
487
       * `std::vector<uint8_t>` or `Botan::secure_vector<uint8_t>`, depending on
488
       * the allocator used by the bitvector. The rendering is compatible with
489
       * the bit layout explained in the respective constructor.
490
       */
491
      template <concepts::resizable_byte_buffer OutT =
492
                   std::conditional_t<uses_secure_allocator, secure_vector<uint8_t>, std::vector<uint8_t>>>
493
      OutT to_bytes() const {
297✔
494
         OutT out(ceil_tobytes(m_bits));
297✔
495
         to_bytes(out);
297✔
496
         return out;
297✔
497
      }
×
498

499
      /**
500
       * Renders the bitvector into a properly sized byte range.
501
       *
502
       * @param out  a byte range that has a length of at least `ceil_tobytes(size())`.
503
       */
504
      void to_bytes(std::span<uint8_t> out) const {
119,277✔
505
         const auto bytes_needed = ceil_tobytes(m_bits);
119,277✔
506
         BOTAN_ARG_CHECK(bytes_needed <= out.size_bytes(), "Not enough space to render bitvector");
119,277✔
507

508
         // copy as much aligned data as possible
509
         const auto verbatim_blocks = m_bits / block_size_bits;
119,277✔
510
         const auto verbatim_bytes = verbatim_blocks * block_size_bytes;
119,277✔
511
         if(verbatim_blocks > 0) {
119,277✔
512
            typecast_copy(out.first(verbatim_bytes), std::span{m_blocks}.first(verbatim_blocks));
119,274✔
513
         }
514

515
         // copy remaining unaligned data
516
         clear_mem(out.subspan(verbatim_bytes));
119,277✔
517
         for(size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
312,696✔
518
            out[i >> 3] |= ref(i).template as<uint8_t>() << (i & 7);
193,419✔
519
         }
520
      }
119,277✔
521

522
      /**
523
       * Renders this bitvector into a sequence of "0"s and "1"s.
524
       * This is meant for debugging purposes and is not efficient.
525
       */
526
      std::string to_string() const {
1✔
527
         std::stringstream ss;
1✔
528
         for(size_type i = 0; i < size(); ++i) {
6✔
529
            ss << ref(i);
5✔
530
         }
531
         return ss.str();
1✔
532
      }
1✔
533

534
      /// @}
535

536
      /// @name Capacity Accessors and Modifiers
537
      /// @{
538

539
      size_type capacity() const { return m_blocks.capacity() * block_size_bits; }
6✔
540

541
      void reserve(size_type bits) { m_blocks.reserve(ceil_toblocks(bits)); }
53✔
542

543
      void resize(size_type bits) {
400,066✔
544
         const auto new_number_of_blocks = ceil_toblocks(bits);
400,066✔
545
         if(new_number_of_blocks != m_blocks.size()) {
400,066✔
546
            m_blocks.resize(new_number_of_blocks);
113,465✔
547
         }
548

549
         m_bits = bits;
400,066✔
550
         zero_unused_bits();
400,066✔
551
      }
400,059✔
552

553
      void push_back(bool bit) {
327,535✔
554
         const auto i = size();
327,535✔
555
         resize(i + 1);
327,535✔
556
         ref(i) = bit;
655,070✔
557
      }
327,535✔
558

559
      void pop_back() {
9✔
560
         if(!empty()) {
9✔
561
            resize(size() - 1);
9✔
562
         }
563
      }
564

565
      /// @}
566

567
      /// @name Bitwise and Global Accessors and Modifiers
568
      /// @{
569

570
      auto at(size_type pos) {
421,624,587✔
571
         check_offset(pos);
421,624,583✔
572
         return ref(pos);
421,624,282✔
573
      }
574

575
      // TODO C++23: deducing this
576
      auto at(size_type pos) const {
706,241✔
577
         check_offset(pos);
706,240✔
578
         return ref(pos);
706,240✔
579
      }
580

581
      auto front() { return ref(0); }
2✔
582

583
      // TODO C++23: deducing this
584
      auto front() const { return ref(0); }
585

586
      auto back() { return ref(size() - 1); }
5✔
587

588
      // TODO C++23: deducing this
589
      auto back() const { return ref(size() - 1); }
590

591
      /**
592
       * Sets the bit at position @p pos.
593
       * @throws Botan::Invalid_Argument if @p pos is out of range
594
       */
595
      bitvector_base& set(size_type pos) {
83✔
596
         check_offset(pos);
82✔
597
         ref(pos).set();
48✔
598
         return *this;
32✔
599
      }
600

601
      /**
602
       * Sets all currently allocated bits.
603
       */
604
      bitvector_base& set() {
2✔
605
         full_range_operation(
2✔
606
            [](std::unsigned_integral auto block) -> decltype(block) {
2✔
607
               return static_cast<decltype(block)>(~static_cast<decltype(block)>(0));
608
            },
609
            *this);
610
         zero_unused_bits();
2✔
611
         return *this;
2✔
612
      }
613

614
      /**
615
       * Unsets the bit at position @p pos.
616
       * @throws Botan::Invalid_Argument if @p pos is out of range
617
       */
618
      bitvector_base& unset(size_type pos) {
3✔
619
         check_offset(pos);
2✔
620
         ref(pos).unset();
2✔
621
         return *this;
2✔
622
      }
623

624
      /**
625
       * Unsets all currently allocated bits.
626
       */
627
      bitvector_base& unset() {
4✔
628
         full_range_operation(
4✔
629
            [](std::unsigned_integral auto block) -> decltype(block) { return static_cast<decltype(block)>(0); },
4✔
630
            *this);
631
         return *this;
632
      }
633

634
      /**
635
       * Flips the bit at position @p pos.
636
       * @throws Botan::Invalid_Argument if @p pos is out of range
637
       */
638
      bitvector_base& flip(size_type pos) {
47✔
639
         check_offset(pos);
46✔
640
         ref(pos).flip();
46✔
641
         return *this;
46✔
642
      }
643

644
      /**
645
       * Flips all currently allocated bits.
646
       */
647
      bitvector_base& flip() {
5✔
648
         full_range_operation([](std::unsigned_integral auto block) -> decltype(block) { return ~block; }, *this);
5✔
649
         zero_unused_bits();
5✔
650
         return *this;
5✔
651
      }
652

653
      /**
654
       * @returns true iff no bit is set
655
       */
656
      bool none_vartime() const {
13✔
657
         return full_range_operation([](std::unsigned_integral auto block) { return block == 0; }, *this);
6✔
658
      }
659

660
      /**
661
       * @returns true iff no bit is set in constant time
662
       */
663
      bool none() const { return hamming_weight() == 0; }
52✔
664

665
      /**
666
       * @returns true iff at least one bit is set
667
       */
668
      bool any_vartime() const { return !none_vartime(); }
7✔
669

670
      /**
671
       * @returns true iff at least one bit is set in constant time
672
       */
673
      bool any() const { return !none(); }
5✔
674

675
      /**
676
       * @returns true iff all bits are set
677
       */
678
      bool all_vartime() const {
7✔
679
         return full_range_operation(
7✔
680
            []<std::unsigned_integral BlockT>(BlockT block, BlockT mask) { return block == mask; }, *this);
6✔
681
      }
682

683
      /**
684
       * @returns true iff all bits are set in constant time
685
       */
686
      bool all() const { return hamming_weight() == m_bits; }
5✔
687

688
      auto operator[](size_type pos) { return ref(pos); }
1,226,363,685✔
689

690
      // TODO C++23: deducing this
691
      auto operator[](size_type pos) const { return ref(pos); }
12,448,319✔
692

693
      /// @}
694

695
      /// @name Subvectors
696
      /// @{
697

698
      /**
699
       * Creates a new bitvector with a subsection of this bitvector starting at
700
       * @p pos copying exactly @p length bits.
701
       */
702
      template <bitvectorish OutT = bitvector_base<AllocatorT>>
703
      auto subvector(size_type pos, std::optional<size_type> length = std::nullopt) const {
119,162✔
704
         const size_type bitlen = length.value_or(size() - pos);
119,162✔
705
         BOTAN_ARG_CHECK(pos + bitlen <= size(), "Not enough bits to copy");
119,162✔
706

707
         OutT newvector(bitlen);
119,158✔
708

709
         // Handle bitvectors that are wrapped in strong types
710
         auto& newvector_unwrapped = unwrap_strong_type(newvector);
119,158✔
711

712
         if(bitlen > 0) {
119,158✔
713
            if(pos % 8 == 0) {
119,155✔
714
               copy_mem(
80,461✔
715
                  newvector_unwrapped.m_blocks,
50✔
716
                  std::span{m_blocks}.subspan(block_index(pos), block_index(pos + bitlen - 1) - block_index(pos) + 1));
80,461✔
717
            } else {
718
               const BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> from_op(
38,694✔
719
                  *this, pos, bitlen);
720
               const BitRangeOperator<strong_type_wrapped_type<OutT>> to_op(
38,694✔
721
                  unwrap_strong_type(newvector_unwrapped), 0, bitlen);
722
               range_operation([](auto /* to */, auto from) { return from; }, to_op, from_op);
38,694✔
723
            }
724

725
            newvector_unwrapped.zero_unused_bits();
119,158✔
726
         }
727

728
         return newvector;
119,158✔
729
      }
×
730

731
      /**
732
       * Extracts a subvector of bits as an unsigned integral type @p OutT
733
       * starting from bit @p pos and copying exactly sizeof(OutT)*8 bits.
734
       *
735
       * Hint: The bits are in big-endian order, i.e. the least significant bit
736
       *       is the 0th bit and the most significant bit it the n-th. Hence,
737
       *       addressing the bits with bitwise operations is done like so:
738
       *       bool bit = (out_int >> pos) & 1;
739
       */
740
      template <typename OutT>
741
         requires(std::unsigned_integral<strong_type_wrapped_type<OutT>> &&
742
                  !std::same_as<bool, strong_type_wrapped_type<OutT>>)
743
      OutT subvector(size_type pos) const {
65,744✔
744
         using result_t = strong_type_wrapped_type<OutT>;
745
         constexpr size_t bits = sizeof(result_t) * 8;
65,744✔
746
         BOTAN_ARG_CHECK(pos + bits <= size(), "Not enough bits to copy");
65,744✔
747
         result_t out = 0;
65,740✔
748

749
         if(pos % 8 == 0) {
65,740✔
750
            out = load_le<result_t>(std::span{m_blocks}.subspan(block_index(pos)).template first<sizeof(result_t)>());
39,432✔
751
         } else {
752
            const BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(
26,308✔
753
               *this, pos, bits);
754
            range_operation(
26,308✔
755
               [&](std::unsigned_integral auto integer) {
26,308✔
756
                  if constexpr(std::same_as<result_t, decltype(integer)>) {
757
                     out = integer;
26,308✔
758
                  }
759
               },
760
               op);
761
         }
762

763
         return wrap_strong_type<OutT>(out);
65,740✔
764
      }
765

766
      /**
767
       * Replaces a subvector of bits with the bits of another bitvector @p value
768
       * starting at bit @p pos. The number of bits to replace is determined by
769
       * the size of @p value.
770
       *
771
       * @note This is currently supported for byte-aligned @p pos only.
772
       *
773
       * @throws Not_Implemented when called with @p pos not divisible by 8.
774
       *
775
       * @param pos    the position to start replacing bits
776
       * @param value  the bitvector to copy bits from
777
       */
778
      template <typename InT>
779
         requires(std::unsigned_integral<strong_type_wrapped_type<InT>> && !std::same_as<bool, InT>)
780
      void subvector_replace(size_type pos, InT value) {
65,751✔
781
         using in_t = strong_type_wrapped_type<InT>;
782
         constexpr size_t bits = sizeof(in_t) * 8;
65,751✔
783
         BOTAN_ARG_CHECK(pos + bits <= size(), "Not enough bits to replace");
65,751✔
784

785
         if(pos % 8 == 0) {
65,747✔
786
            store_le(std::span{m_blocks}.subspan(block_index(pos)).template first<sizeof(in_t)>(),
39,434✔
787
                     unwrap_strong_type(value));
788
         } else {
789
            const BitRangeOperator<bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(*this, pos, bits);
26,313✔
790
            range_operation(
26,313✔
791
               [&]<std::unsigned_integral BlockT>(BlockT block) -> BlockT {
26,313✔
792
                  if constexpr(std::same_as<in_t, BlockT>) {
793
                     return unwrap_strong_type(value);
26,313✔
794
                  } else {
795
                     // This should never be reached. BOTAN_ASSERT_UNREACHABLE()
796
                     // caused warning "unreachable code" on MSVC, though. You
797
                     // don't say!
798
                     //
799
                     // Returning the given block back, is the most reasonable
800
                     // thing to do in this case, though.
801
                     return block;
802
                  }
803
               },
804
               op);
805
         }
806
      }
65,747✔
807

808
      /// @}
809

810
      /// @name Operators
811
      ///
812
      /// @{
813

814
      auto operator~() {
1✔
815
         auto newbv = *this;
1✔
816
         newbv.flip();
1✔
817
         return newbv;
1✔
818
      }
×
819

820
      template <bitvectorish OtherT>
821
      auto& operator|=(const OtherT& other) {
5✔
822
         full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs | rhs; },
2✔
823
                              *this,
824
                              unwrap_strong_type(other));
825
         return *this;
826
      }
827

828
      template <bitvectorish OtherT>
829
      auto& operator&=(const OtherT& other) {
70,600✔
830
         full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs & rhs; },
70,597✔
831
                              *this,
832
                              unwrap_strong_type(other));
833
         return *this;
834
      }
835

836
      template <bitvectorish OtherT>
837
      auto& operator^=(const OtherT& other) {
54✔
838
         full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs ^ rhs; },
136✔
839
                              *this,
840
                              unwrap_strong_type(other));
841
         return *this;
842
      }
843

844
      /// @}
845

846
      /// @name Constant Time Operations
847
      ///
848
      /// @{
849

850
      /**
851
       * Implements::
852
       *
853
       *    if(condition) {
854
       *       *this ^= other;
855
       *    }
856
       *
857
       * omitting runtime dependence on any of the parameters.
858
       */
859
      template <bitvectorish OtherT>
860
      void ct_conditional_xor(CT::Choice condition, const OtherT& other) {
416,949,994✔
861
         BOTAN_ASSERT_NOMSG(m_bits == other.m_bits);
416,949,994✔
862
         BOTAN_ASSERT_NOMSG(m_blocks.size() == other.m_blocks.size());
416,949,994✔
863

864
         auto maybe_xor = overloaded{
865
            [m = CT::Mask<uint64_t>::from_choice(condition)](uint64_t lhs, uint64_t rhs) -> uint64_t {
948,471,536✔
866
               return lhs ^ m.if_set_return(rhs);
2,147,483,647✔
867
            },
868
            [m = CT::Mask<uint32_t>::from_choice(condition)](uint32_t lhs, uint32_t rhs) -> uint32_t {
650,852,042✔
869
               return lhs ^ m.if_set_return(rhs);
233,902,048✔
870
            },
871
            [m = CT::Mask<uint16_t>::from_choice(condition)](uint16_t lhs, uint16_t rhs) -> uint16_t {
520,979,561✔
872
               return lhs ^ m.if_set_return(rhs);
104,029,567✔
873
            },
874
            [m = CT::Mask<uint8_t>::from_choice(condition)](uint8_t lhs, uint8_t rhs) -> uint8_t {
416,950,000✔
875
               return lhs ^ m.if_set_return(rhs);
6✔
876
            },
877
         };
878

879
         full_range_operation(maybe_xor, *this, unwrap_strong_type(other));
416,949,994✔
880
      }
416,949,994✔
881

882
      constexpr void _const_time_poison() const { CT::poison(m_blocks); }
51✔
883

884
      constexpr void _const_time_unpoison() const { CT::unpoison(m_blocks); }
86✔
885

886
      /// @}
887

888
      /// @name Iterators
889
      ///
890
      /// @{
891

892
      iterator begin() noexcept { return iterator(this, 0); }
1,066✔
893

894
      const_iterator begin() const noexcept { return const_iterator(this, 0); }
10✔
895

896
      const_iterator cbegin() const noexcept { return const_iterator(this, 0); }
10✔
897

898
      iterator end() noexcept { return iterator(this, size()); }
537✔
899

900
      const_iterator end() const noexcept { return const_iterator(this, size()); }
5✔
901

902
      const_iterator cend() noexcept { return const_iterator(this, size()); }
8✔
903

904
      /// @}
905

906
   private:
907
      void check_offset(size_type pos) const {
422,330,531✔
908
         // BOTAN_ASSERT_NOMSG(!CT::is_poisoned(&m_bits, sizeof(m_bits)));
909
         // BOTAN_ASSERT_NOMSG(!CT::is_poisoned(&pos, sizeof(pos)));
910
         BOTAN_ARG_CHECK(pos < m_bits, "Out of range");
422,330,927✔
911
      }
912

913
      void zero_unused_bits() {
519,221✔
914
         const auto first_unused_bit = size();
517,486✔
915

916
         // Zero out any unused bits in the last block
917
         if(first_unused_bit % block_size_bits != 0) {
519,221✔
918
            const block_type mask = (one << block_offset(first_unused_bit)) - one;
343,916✔
919
            m_blocks[block_index(first_unused_bit)] &= mask;
343,916✔
920
         }
921
      }
922

923
      static constexpr size_type ceil_toblocks(size_type bits) {
517,854✔
924
         return (bits + block_size_bits - 1) / block_size_bits;
519,556✔
925
      }
926

927
      auto ref(size_type pos) const { return bitref<const block_type>(m_blocks, pos); }
12,641,743✔
928

929
      auto ref(size_type pos) { return bitref<block_type>(m_blocks, pos); }
1,235,232,102✔
930

931
   private:
932
      enum class BitRangeAlignment : uint8_t { byte_aligned, no_alignment };
933

934
      /**
935
       * Helper construction to implement bit range operations on the bitvector.
936
       * It basically implements an iterator to read and write blocks of bits
937
       * from the underlying bitvector. Where "blocks of bits" are unsigned
938
       * integers of varying bit lengths.
939
       *
940
       * If the iteration starts at a byte boundary in the underlying bitvector,
941
       * this applies certain optimizations (i.e. loading blocks of bits straight
942
       * from the underlying byte buffer). The optimizations are enabled at
943
       * compile time (with the template parameter `alignment`).
944
       */
945
      template <typename BitvectorT, auto alignment = BitRangeAlignment::byte_aligned>
946
         requires is_bitvector_v<std::remove_cvref_t<BitvectorT>>
947
      class BitRangeOperator {
948
         private:
949
            constexpr static bool is_const() { return std::is_const_v<BitvectorT>; }
950

951
            struct UnalignedDataHelper {
952
                  const uint8_t padding_bits;
953
                  const uint8_t bits_to_byte_alignment;
954
            };
955

956
         public:
957
            BitRangeOperator(BitvectorT& source, size_type start_bitoffset, size_type bitlength) :
834,242,086✔
958
                  m_source(source),
834,242,086✔
959
                  m_start_bitoffset(start_bitoffset),
834,242,086✔
960
                  m_bitlength(bitlength),
834,242,086✔
961
                  m_unaligned_helper({.padding_bits = static_cast<uint8_t>(start_bitoffset % 8),
834,242,086✔
962
                                      .bits_to_byte_alignment = static_cast<uint8_t>(8 - (start_bitoffset % 8))}),
834,242,086✔
963
                  m_read_bitpos(start_bitoffset),
834,242,086✔
964
                  m_write_bitpos(start_bitoffset) {
834,242,086✔
965
               BOTAN_ASSERT(is_byte_aligned() == (m_start_bitoffset % 8 == 0), "byte alignment guarantee");
834,242,086✔
966
               BOTAN_ASSERT(m_source.size() >= m_start_bitoffset + m_bitlength, "enough bytes in underlying source");
834,242,086✔
967
            }
834,242,086✔
968

969
            explicit BitRangeOperator(BitvectorT& source) : BitRangeOperator(source, 0, source.size()) {}
834,112,077✔
970

971
            static constexpr bool is_byte_aligned() { return alignment == BitRangeAlignment::byte_aligned; }
972

973
            /**
974
             * @returns the overall number of bits to be iterated with this operator
975
             */
976
            size_type size() const { return m_bitlength; }
417,059,355✔
977

978
            /**
979
             * @returns the number of bits not yet read from this operator
980
             */
981
            size_type bits_to_read() const { return m_bitlength - m_read_bitpos + m_start_bitoffset; }
1,675,493,493✔
982

983
            /**
984
             * @returns the number of bits still to be written into this operator
985
             */
986
            size_type bits_to_write() const { return m_bitlength - m_write_bitpos + m_start_bitoffset; }
3,398,317✔
987

988
            /**
989
             * Loads the next block of bits from the underlying bitvector. No
990
             * bounds checks are performed. The caller can define the size of
991
             * the resulting unsigned integer block.
992
             */
993
            template <std::unsigned_integral BlockT>
994
            BlockT load_next() const {
6,762,606✔
995
               constexpr size_type block_size = sizeof(BlockT);
6,762,606✔
996
               constexpr size_type block_bits = block_size * 8;
6,762,606✔
997
               const auto bits_remaining = bits_to_read();
6,762,606✔
998

999
               BlockT result_block = 0;
6,762,606✔
1000
               if constexpr(is_byte_aligned()) {
1001
                  result_block = load_le(m_source.as_byte_span().subspan(read_bytepos()).template first<block_size>());
3,382,892✔
1002
               } else {
1003
                  const size_type byte_pos = read_bytepos();
3,379,714✔
1004
                  const size_type bits_to_collect = std::min(block_bits, bits_to_read());
6,759,428✔
1005

1006
                  const uint8_t first_byte = m_source.as_byte_span()[byte_pos];
3,379,714✔
1007

1008
                  // Initialize the left-most bits from the first byte.
1009
                  result_block = BlockT(first_byte) >> m_unaligned_helper.padding_bits;
3,379,714✔
1010

1011
                  // If more bits are needed, we pull them from the remaining bytes.
1012
                  if(m_unaligned_helper.bits_to_byte_alignment < bits_to_collect) {
3,379,714✔
1013
                     const BlockT block =
1014
                        load_le(m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>());
6,682,042✔
1015
                     result_block |= block << m_unaligned_helper.bits_to_byte_alignment;
3,341,024✔
1016
                  }
1017
               }
1018

1019
               m_read_bitpos += std::min(block_bits, bits_remaining);
6,762,606✔
1020
               return result_block;
6,668,117✔
1021
            }
1022

1023
            /**
1024
             * Stores the next block of bits into the underlying bitvector.
1025
             * No bounds checks are performed. Storing bit blocks that are not
1026
             * aligned at a byte-boundary in the underlying bitvector is
1027
             * currently not implemented.
1028
             */
1029
            template <std::unsigned_integral BlockT>
1030
               requires(!is_const())
1031
            void store_next(BlockT block) {
3,372,004✔
1032
               constexpr size_type block_size = sizeof(BlockT);
3,372,004✔
1033
               constexpr size_type block_bits = block_size * 8;
3,372,004✔
1034

1035
               if constexpr(is_byte_aligned()) {
1036
                  auto sink = m_source.as_byte_span().subspan(write_bytepos()).template first<block_size>();
3,345,691✔
1037
                  store_le(sink, block);
3,345,691✔
1038
               } else {
1039
                  const size_type byte_pos = write_bytepos();
26,313✔
1040
                  const size_type bits_to_store = std::min(block_bits, bits_to_write());
52,626✔
1041

1042
                  uint8_t& first_byte = m_source.as_byte_span()[byte_pos];
26,313✔
1043

1044
                  // Set the left-most bits in the first byte, leaving all others unchanged
1045
                  first_byte = (first_byte & uint8_t(0xFF >> m_unaligned_helper.bits_to_byte_alignment)) |
26,313✔
1046
                               uint8_t(block << m_unaligned_helper.padding_bits);
26,313✔
1047

1048
                  // If more bits are provided, we store them in the remaining bytes.
1049
                  if(m_unaligned_helper.bits_to_byte_alignment < bits_to_store) {
26,313✔
1050
                     const auto remaining_bytes =
1051
                        m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>();
26,313✔
1052
                     const BlockT padding_mask = ~(BlockT(-1) >> m_unaligned_helper.bits_to_byte_alignment);
26,313✔
1053
                     const BlockT new_bytes =
26,313✔
1054
                        (load_le(remaining_bytes) & padding_mask) | block >> m_unaligned_helper.bits_to_byte_alignment;
26,313✔
1055
                     store_le(remaining_bytes, new_bytes);
26,313✔
1056
                  }
1057
               }
1058

1059
               m_write_bitpos += std::min(block_bits, bits_to_write());
3,372,004✔
1060
            }
3,372,004✔
1061

1062
            template <std::unsigned_integral BlockT>
1063
               requires(is_byte_aligned() && !is_const())
1064
            std::span<BlockT> span(size_type blocks) const {
1,251,061,992✔
1065
               BOTAN_DEBUG_ASSERT(blocks == 0 || is_memory_aligned_to<BlockT>());
1,251,061,992✔
1066
               BOTAN_DEBUG_ASSERT(read_bytepos() % sizeof(BlockT) == 0);
1,251,061,992✔
1067
               // Intermittently casting to void* to avoid a compiler warning
1068
               void* ptr = reinterpret_cast<void*>(m_source.as_byte_span().data() + read_bytepos());
1,251,061,992✔
1069
               return {reinterpret_cast<BlockT*>(ptr), blocks};
1,251,061,992✔
1070
            }
1071

1072
            template <std::unsigned_integral BlockT>
1073
               requires(is_byte_aligned() && is_const())
1074
            std::span<const BlockT> span(size_type blocks) const {
1,251,274,192✔
1075
               BOTAN_DEBUG_ASSERT(blocks == 0 || is_memory_aligned_to<BlockT>());
1,251,274,192✔
1076
               BOTAN_DEBUG_ASSERT(read_bytepos() % sizeof(BlockT) == 0);
1,251,274,192✔
1077
               // Intermittently casting to void* to avoid a compiler warning
1078
               const void* ptr = reinterpret_cast<const void*>(m_source.as_byte_span().data() + read_bytepos());
1,251,274,192✔
1079
               return {reinterpret_cast<const BlockT*>(ptr), blocks};
1,251,274,192✔
1080
            }
1081

1082
            void advance(size_type bytes)
1,251,274,225✔
1083
               requires(is_byte_aligned())
1084
            {
1085
               m_read_bitpos += bytes * 8;
1,251,274,225✔
1086
               m_write_bitpos += bytes * 8;
1,251,274,225✔
1087
            }
1088

1089
            template <std::unsigned_integral BlockT>
1090
               requires(is_byte_aligned())
1091
            size_t is_memory_aligned_to() const {
834,112,077✔
1092
               const void* cptr = m_source.as_byte_span().data() + read_bytepos();
834,112,077✔
1093
               const void* ptr_before = cptr;
834,112,077✔
1094

1095
               // std::align takes `ptr` as a reference (!), i.e. `void*&` and
1096
               // uses it as an out-param. Though, `cptr` is const because this
1097
               // method is const-qualified, hence the const_cast<>.
1098
               void* ptr = const_cast<void*>(cptr);  // NOLINT(*-const-correctness)
834,112,077✔
1099
               size_t size = sizeof(BlockT);
834,112,077✔
1100
               return ptr_before != nullptr && std::align(alignof(BlockT), size, ptr, size) == ptr_before;
1,668,224,154✔
1101
            }
1102

1103
         private:
1104
            size_type read_bytepos() const { return m_read_bitpos / 8; }
840,874,673✔
1105

1106
            size_type write_bytepos() const { return m_write_bitpos / 8; }
3,372,004✔
1107

1108
         private:
1109
            BitvectorT& m_source;
1110
            size_type m_start_bitoffset;
1111
            size_type m_bitlength;
1112

1113
            UnalignedDataHelper m_unaligned_helper;
1114

1115
            mutable size_type m_read_bitpos;
1116
            mutable size_type m_write_bitpos;
1117
      };
1118

1119
      /**
1120
       * Helper struct for the low-level handling of blockwise operations
1121
       *
1122
       * This has two main code paths: Optimized for byte-aligned ranges that
1123
       * can simply be taken from memory as-is. And a generic implementation
1124
       * that must assemble blocks from unaligned bits before processing.
1125
       */
1126
      template <typename FnT, typename... ParamTs>
1127
         requires detail::blockwise_processing_callback<FnT, ParamTs...>
1128
      class blockwise_processing_callback_trait {
1129
         public:
1130
            constexpr static bool needs_mask = detail::blockwise_processing_callback_with_mask<FnT, ParamTs...>;
1131
            constexpr static bool is_manipulator = detail::manipulating_blockwise_processing_callback<FnT, ParamTs...>;
1132
            constexpr static bool is_predicate = detail::predicate_blockwise_processing_callback<FnT, ParamTs...>;
1133
            static_assert(!is_manipulator || !is_predicate, "cannot be manipulator and predicate at the same time");
1134

1135
            /**
1136
             * Applies @p fn to the blocks provided in @p blocks by simply reading from
1137
             * memory without re-arranging any bits across byte-boundaries.
1138
             */
1139
            template <std::unsigned_integral... BlockTs>
1140
               requires(all_same_v<std::remove_cv_t<BlockTs>...> && sizeof...(BlockTs) == sizeof...(ParamTs))
1141
            constexpr static bool apply_on_full_blocks(FnT fn, std::span<BlockTs>... blocks) {
1,251,274,225✔
1142
               constexpr size_type bits = sizeof(detail::first_t<BlockTs...>) * 8;
1,251,274,225✔
1143
               const size_type iterations = detail::first(blocks...).size();
1,251,062,334✔
1144
               for(size_type i = 0; i < iterations; ++i) {
2,147,483,647✔
1145
                  if constexpr(is_predicate) {
1146
                     if(!apply(fn, bits, blocks[i]...)) {
33✔
1147
                        return false;
1148
                     }
1149
                  } else if constexpr(is_manipulator) {
1150
                     detail::first(blocks...)[i] = apply(fn, bits, blocks[i]...);
2,147,483,647✔
1151
                  } else {
1152
                     apply(fn, bits, blocks[i]...);
5,792,403✔
1153
                  }
1154
               }
1155
               return true;
375✔
1156
            }
1157

1158
            /**
1159
             * Applies @p fn to as many blocks as @p ops provide for the given type.
1160
             */
1161
            template <std::unsigned_integral BlockT, typename... BitRangeOperatorTs>
1162
               requires(sizeof...(BitRangeOperatorTs) == sizeof...(ParamTs))
1163
            constexpr static bool apply_on_unaligned_blocks(FnT fn, BitRangeOperatorTs&... ops) {
417,456,662✔
1164
               constexpr size_type block_bits = sizeof(BlockT) * 8;
417,456,662✔
1165
               auto bits = detail::first(ops...).bits_to_read();
417,456,662✔
1166
               if(bits == 0) {
417,456,662✔
1167
                  return true;
1168
               }
1169

1170
               bits += block_bits;  // avoid unsigned integer underflow in the following loop
244,628✔
1171
               while(bits > block_bits * 2 - 8) {
3,661,545✔
1172
                  bits -= block_bits;
3,416,918✔
1173
                  if constexpr(is_predicate) {
1174
                     if(!apply(fn, bits, ops.template load_next<BlockT>()...)) {
21✔
1175
                        return false;
1176
                     }
1177
                  } else if constexpr(is_manipulator) {
1178
                     detail::first(ops...).store_next(apply(fn, bits, ops.template load_next<BlockT>()...));
3,410,694✔
1179
                  } else {
1180
                     apply(fn, bits, ops.template load_next<BlockT>()...);
44,900✔
1181
                  }
1182
               }
1183
               return true;
1184
            }
1185

1186
         private:
1187
            template <std::unsigned_integral... BlockTs>
1188
               requires(all_same_v<BlockTs...>)
1189
            constexpr static auto apply(FnT fn, size_type bits, BlockTs... blocks) {
2,147,483,647✔
1190
               if constexpr(needs_mask) {
1191
                  return fn(blocks..., make_mask<detail::first_t<BlockTs...>>(bits));
3✔
1192
               } else {
1193
                  return fn(blocks...);
2,147,483,647✔
1194
               }
1195
            }
1196
      };
1197

1198
      /**
1199
       * Helper function of `full_range_operation` and `range_operation` that
1200
       * calls @p fn on a given aligned unsigned integer block as long as the
1201
       * underlying bit range contains enough bits to fill the block fully.
1202
       *
1203
       * This uses bare memory access to gain a speed up for aligned data.
1204
       */
1205
      template <std::unsigned_integral BlockT, typename FnT, typename... BitRangeOperatorTs>
1206
         requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1207
                  sizeof...(BitRangeOperatorTs) > 0)
1208
      static bool _process_in_fully_aligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
1,251,274,225✔
1209
         constexpr size_type block_bytes = sizeof(BlockT);
1,251,274,225✔
1210
         constexpr size_type block_bits = block_bytes * 8;
1,251,274,225✔
1211
         const size_type blocks = detail::first(ops...).bits_to_read() / block_bits;
1,251,274,225✔
1212

1213
         using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1214
         const auto result = callback_trait::apply_on_full_blocks(fn, ops.template span<BlockT>(blocks)...);
2,147,483,647✔
1215
         (ops.advance(block_bytes * blocks), ...);
1,251,274,225✔
1216
         return result;
1,251,274,225✔
1217
      }
1218

1219
      /**
1220
       * Helper function of `full_range_operation` and `range_operation` that
1221
       * calls @p fn on a given unsigned integer block size as long as the
1222
       * underlying bit range contains enough bits to fill the block.
1223
       */
1224
      template <std::unsigned_integral BlockT, typename FnT, typename... BitRangeOperatorTs>
1225
         requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...>)
1226
      static bool _process_in_unaligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
417,456,662✔
1227
         using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1228
         return callback_trait::template apply_on_unaligned_blocks<BlockT>(fn, ops...);
417,365,347✔
1229
      }
1230

1231
      /**
1232
       * Apply @p fn to all bits in the ranges defined by @p ops. If more than
1233
       * one range operator is passed to @p ops, @p fn receives corresponding
1234
       * blocks of bits from each operator. Therefore, all @p ops have to define
1235
       * the exact same length of their underlying ranges.
1236
       *
1237
       * @p fn may return a bit block that will be stored into the _first_ bit
1238
       * range passed into @p ops. If @p fn returns a boolean, and its value is
1239
       * `false`, the range operation is cancelled and `false` is returned.
1240
       *
1241
       * The implementation ensures to pull bits in the largest bit blocks
1242
       * possible and reverts to smaller bit blocks only when needed.
1243
       */
1244
      template <typename FnT, typename... BitRangeOperatorTs>
1245
         requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1246
                  sizeof...(BitRangeOperatorTs) > 0)
1247
      static bool range_operation(FnT fn, BitRangeOperatorTs... ops) {
417,182,731✔
1248
         BOTAN_ASSERT(has_equal_lengths(ops...), "all BitRangeOperators have the same length");
162,070✔
1249

1250
         if constexpr((BitRangeOperatorTs::is_byte_aligned() && ...)) {
1251
            // Note: At the moment we can assume that this will always be used
1252
            //       on the _entire_ bitvector. Therefore, we can safely assume
1253
            //       that the bitvectors' underlying buffers are properly aligned.
1254
            //       If this assumption changes, we need to add further handling
1255
            //       to process a byte padding at the beginning of the bitvector
1256
            //       until a memory alignment boundary is reached.
1257
            const bool alignment = (ops.template is_memory_aligned_to<uint64_t>() && ...);
834,112,077✔
1258
            BOTAN_ASSERT_NOMSG(alignment);
×
1259

1260
            return _process_in_fully_aligned_blocks_of<uint64_t>(fn, ops...) &&
834,182,822✔
1261
                   _process_in_fully_aligned_blocks_of<uint32_t>(fn, ops...) &&
834,182,809✔
1262
                   _process_in_fully_aligned_blocks_of<uint16_t>(fn, ops...) &&
834,182,819✔
1263
                   _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
417,091,402✔
1264
         } else {
1265
            return _process_in_unaligned_blocks_of<uint64_t>(fn, ops...) &&
182,630✔
1266
                   _process_in_unaligned_blocks_of<uint32_t>(fn, ops...) &&
91,315✔
1267
                   _process_in_unaligned_blocks_of<uint16_t>(fn, ops...) &&
182,630✔
1268
                   _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
91,315✔
1269
         }
1270
      }
1271

1272
      /**
1273
       * Apply @p fn to all bit blocks in the bitvector(s).
1274
       */
1275
      template <typename FnT, typename... BitvectorTs>
1276
         requires(detail::blockwise_processing_callback<FnT, BitvectorTs...> &&
1277
                  (is_bitvector_v<std::remove_cvref_t<BitvectorTs>> && ... && true))
1278
      static bool full_range_operation(FnT&& fn, BitvectorTs&... bitvecs) {
417,091,416✔
1279
         BOTAN_ASSERT(has_equal_lengths(bitvecs...), "all bitvectors have the same length");
417,091,416✔
1280
         return range_operation(std::forward<FnT>(fn), BitRangeOperator<BitvectorTs>(bitvecs)...);
417,091,416✔
1281
      }
1282

1283
      template <typename SomeT, typename... SomeTs>
1284
      static bool has_equal_lengths(const SomeT& v, const SomeTs&... vs) {
834,080,016✔
1285
         return ((v.size() == vs.size()) && ... && true);
834,080,016✔
1286
      }
1287

1288
      template <std::unsigned_integral T>
1289
      static constexpr T make_mask(size_type bits) {
3✔
1290
         const bool max = bits >= sizeof(T) * 8;
3✔
1291
         bits &= T(max - 1);
3✔
1292
         return (T(!max) << bits) - 1;
3✔
1293
      }
1294

1295
      auto as_byte_span() { return std::span{m_blocks.data(), m_blocks.size() * sizeof(block_type)}; }
1,674,826,658✔
1296

1297
      auto as_byte_span() const { return std::span{m_blocks.data(), m_blocks.size() * sizeof(block_type)}; }
1,671,756,203✔
1298

1299
   private:
1300
      size_type m_bits;
1301
      std::vector<block_type, allocator_type> m_blocks;
1302
};
1303

1304
using secure_bitvector = bitvector_base<secure_allocator>;
1305
using bitvector = bitvector_base<std::allocator>;
1306

1307
namespace detail {
1308

1309
/**
1310
 * If one of the allocators is a Botan::secure_allocator, this will always
1311
 * prefer it. Otherwise, the allocator of @p lhs will be used as a default.
1312
 */
1313
template <bitvectorish T1, bitvectorish T2>
1314
constexpr auto copy_lhs_allocator_aware(const T1& lhs, const T2& /*rhs*/) {
60✔
1315
   constexpr bool needs_secure_allocator =
60✔
1316
      strong_type_wrapped_type<T1>::uses_secure_allocator || strong_type_wrapped_type<T2>::uses_secure_allocator;
1317

1318
   if constexpr(needs_secure_allocator) {
1319
      return lhs.template as<secure_bitvector>();
56✔
1320
   } else {
1321
      return lhs.template as<bitvector>();
5✔
1322
   }
1323
}
1324

1325
}  // namespace detail
1326

1327
template <bitvectorish T1, bitvectorish T2>
1328
auto operator|(const T1& lhs, const T2& rhs) {
4✔
1329
   auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
4✔
1330
   res |= rhs;
4✔
1331
   return res;
4✔
1332
}
×
1333

1334
template <bitvectorish T1, bitvectorish T2>
1335
auto operator&(const T1& lhs, const T2& rhs) {
3✔
1336
   auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
3✔
1337
   res &= rhs;
3✔
1338
   return res;
3✔
1339
}
×
1340

1341
template <bitvectorish T1, bitvectorish T2>
1342
auto operator^(const T1& lhs, const T2& rhs) {
53✔
1343
   auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
53✔
1344
   res ^= rhs;
53✔
1345
   return res;
53✔
1346
}
×
1347

1348
template <bitvectorish T1, bitvectorish T2>
1349
bool operator==(const T1& lhs, const T2& rhs) {
6✔
1350
   return lhs.equals_vartime(rhs);
6✔
1351
}
1352

1353
template <bitvectorish T1, bitvectorish T2>
1354
bool operator!=(const T1& lhs, const T2& rhs) {
1355
   return lhs.equals_vartime(rhs);
1356
}
1357

1358
namespace detail {
1359

1360
/**
1361
 * A Strong<> adapter for arbitrarily large bitvectors
1362
 */
1363
template <concepts::container T>
1364
   requires is_bitvector_v<T>
1365
class Strong_Adapter<T> : public Container_Strong_Adapter_Base<T> {
573,665✔
1366
   public:
1367
      using size_type = typename T::size_type;
1368

1369
   public:
1370
      using Container_Strong_Adapter_Base<T>::Container_Strong_Adapter_Base;
525✔
1371

1372
      auto at(size_type i) const { return this->get().at(i); }
51,200✔
1373

1374
      auto at(size_type i) { return this->get().at(i); }
413,082,071✔
1375

1376
      auto set(size_type i) { return this->get().set(i); }
1✔
1377

1378
      auto unset(size_type i) { return this->get().unset(i); }
1✔
1379

1380
      auto flip(size_type i) { return this->get().flip(i); }
1✔
1381

1382
      auto flip() { return this->get().flip(); }
1✔
1383

1384
      template <typename OutT>
1385
      auto as() const {
53✔
1386
         return this->get().template as<OutT>();
53✔
1387
      }
1388

1389
      template <bitvectorish OutT = T>
1390
      auto subvector(size_type pos, std::optional<size_type> length = std::nullopt) const {
119,028✔
1391
         return this->get().template subvector<OutT>(pos, length);
118,980✔
1392
      }
1393

1394
      template <typename OutT>
1395
         requires(std::unsigned_integral<strong_type_wrapped_type<OutT>> &&
1396
                  !std::same_as<bool, strong_type_wrapped_type<OutT>>)
1397
      auto subvector(size_type pos) const {
65,724✔
1398
         return this->get().template subvector<OutT>(pos);
65,724✔
1399
      }
1400

1401
      template <typename InT>
1402
         requires(std::unsigned_integral<strong_type_wrapped_type<InT>> && !std::same_as<bool, InT>)
1403
      void subvector_replace(size_type pos, InT value) {
65,723✔
1404
         return this->get().subvector_replace(pos, value);
65,723✔
1405
      }
1406

1407
      template <bitvectorish OtherT>
1408
      auto equals(const OtherT& other) const {
45✔
1409
         return this->get().equals(other);
45✔
1410
      }
1411

1412
      auto push_back(bool b) { return this->get().push_back(b); }
327,521✔
1413

1414
      auto pop_back() { return this->get().pop_back(); }
2✔
1415

1416
      auto front() const { return this->get().front(); }
1417

1418
      auto front() { return this->get().front(); }
1✔
1419

1420
      auto back() const { return this->get().back(); }
1421

1422
      auto back() { return this->get().back(); }
1✔
1423

1424
      auto any_vartime() const { return this->get().any_vartime(); }
1✔
1425

1426
      auto all_vartime() const { return this->get().all_vartime(); }
1✔
1427

1428
      auto none_vartime() const { return this->get().none_vartime(); }
1✔
1429

1430
      auto has_odd_hamming_weight() const { return this->get().has_odd_hamming_weight(); }
1✔
1431

1432
      auto hamming_weight() const { return this->get().hamming_weight(); }
53✔
1433

1434
      auto from_bytes(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
1435
         return this->get().from_bytes(bytes, bits);
1436
      }
1437

1438
      template <typename OutT = T>
1439
      auto to_bytes() const {
1440
         return this->get().template to_bytes<OutT>();
1441
      }
1442

1443
      auto to_bytes(std::span<uint8_t> out) const { return this->get().to_bytes(out); }
48✔
1444

1445
      auto to_string() const { return this->get().to_string(); }
1✔
1446

1447
      auto capacity() const { return this->get().capacity(); }
1448

1449
      auto reserve(size_type n) { return this->get().reserve(n); }
1450

1451
      constexpr void _const_time_poison() const { this->get()._const_time_poison(); }
51✔
1452

1453
      constexpr void _const_time_unpoison() const { this->get()._const_time_unpoison(); }
86✔
1454
};
1455

1456
}  // namespace detail
1457

1458
}  // namespace Botan
1459

1460
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc