• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

randombit / botan / 12993346889

27 Jan 2025 04:15PM UTC coverage: 91.248% (+0.002%) from 91.246%
12993346889

push

github

butteronarchbtw
add (s)afi encoding & decoding

94261 of 103302 relevant lines covered (91.25%)

11708235.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

98.34
/src/lib/utils/bitvector/bitvector.h
1
/*
2
 * An abstraction for an arbitrarily large bitvector that can
3
 * optionally use the secure_allocator. All bitwise accesses and all
4
 * constructors are implemented in constant time. Otherwise, only methods
5
 * with the "ct_" pre-fix run in constant time.
6
 *
7
 * (C) 2023-2024 Jack Lloyd
8
 * (C) 2023-2024 René Meusel, Rohde & Schwarz Cybersecurity
9
 *
10
 * Botan is released under the Simplified BSD License (see license.txt)
11
 */
12

13
#ifndef BOTAN_BIT_VECTOR_H_
14
#define BOTAN_BIT_VECTOR_H_
15

16
#include <botan/concepts.h>
17
#include <botan/exceptn.h>
18
#include <botan/mem_ops.h>
19
#include <botan/secmem.h>
20
#include <botan/strong_type.h>
21
#include <botan/internal/bit_ops.h>
22
#include <botan/internal/ct_utils.h>
23
#include <botan/internal/loadstor.h>
24
#include <botan/internal/stl_util.h>
25

26
#include <optional>
27
#include <span>
28
#include <sstream>
29
#include <string>
30
#include <utility>
31
#include <vector>
32

33
namespace Botan {
34

35
template <template <typename> typename AllocatorT>
36
class bitvector_base;
37

38
template <typename T>
39
struct is_bitvector : std::false_type {};
40

41
template <template <typename> typename T>
42
struct is_bitvector<bitvector_base<T>> : std::true_type {};
43

44
template <typename T>
45
constexpr static bool is_bitvector_v = is_bitvector<T>::value;
46

47
template <typename T>
48
concept bitvectorish = is_bitvector_v<strong_type_wrapped_type<T>>;
49

50
namespace detail {
51

52
template <typename T0, typename... Ts>
53
struct first_type {
54
      using type = T0;
55
};
56

57
// get the first type from a parameter pack
58
// TODO: C++26 will bring Parameter Pack indexing:
59
//       using first_t = Ts...[0];
60
template <typename... Ts>
61
   requires(sizeof...(Ts) > 0)
62
using first_t = typename first_type<Ts...>::type;
63

64
// get the first object from a parameter pack
65
// TODO: C++26 will bring Parameter Pack indexing:
66
//       auto first = s...[0];
67
template <typename T0, typename... Ts>
68
constexpr static first_t<T0, Ts...> first(T0&& t, Ts&&...) {
69
   return std::forward<T0>(t);
70
}
71

72
template <typename OutT, typename>
73
using as = OutT;
74

75
template <typename FnT, std::unsigned_integral BlockT, typename... ParamTs>
76
using blockwise_processing_callback_return_type = std::invoke_result_t<FnT, as<BlockT, ParamTs>...>;
77

78
template <typename FnT, typename BlockT, typename... ParamTs>
79
concept is_blockwise_processing_callback_return_type =
80
   std::unsigned_integral<BlockT> &&
81
   (std::same_as<BlockT, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>> ||
82
    std::same_as<bool, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>> ||
83
    std::same_as<void, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>>);
84

85
template <typename FnT, typename... ParamTs>
86
concept blockwise_processing_callback_without_mask =
87
   is_blockwise_processing_callback_return_type<FnT, uint8_t, ParamTs...> &&
88
   is_blockwise_processing_callback_return_type<FnT, uint16_t, ParamTs...> &&
89
   is_blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...> &&
90
   is_blockwise_processing_callback_return_type<FnT, uint64_t, ParamTs...>;
91

92
template <typename FnT, typename... ParamTs>
93
concept blockwise_processing_callback_with_mask =
94
   is_blockwise_processing_callback_return_type<FnT, uint8_t, ParamTs..., uint8_t /* mask */> &&
95
   is_blockwise_processing_callback_return_type<FnT, uint16_t, ParamTs..., uint16_t /* mask */> &&
96
   is_blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., uint32_t /* mask */> &&
97
   is_blockwise_processing_callback_return_type<FnT, uint64_t, ParamTs..., uint64_t /* mask */>;
98

99
/**
100
 * Defines the callback constraints for the BitRangeOperator. For further
101
 * details, see bitvector_base::range_operation().
102
 */
103
template <typename FnT, typename... ParamTs>
104
concept blockwise_processing_callback = blockwise_processing_callback_with_mask<FnT, ParamTs...> ||
105
                                        blockwise_processing_callback_without_mask<FnT, ParamTs...>;
106

107
template <typename FnT, typename... ParamTs>
108
concept manipulating_blockwise_processing_callback =
109
   (blockwise_processing_callback_without_mask<FnT, ParamTs...> &&
110
    std::same_as<uint32_t, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...>>) ||
111
   (blockwise_processing_callback_with_mask<FnT, ParamTs...> &&
112
    std::same_as<uint32_t, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., first_t<ParamTs...>>>);
113

114
template <typename FnT, typename... ParamTs>
115
concept predicate_blockwise_processing_callback =
116
   (blockwise_processing_callback_without_mask<FnT, ParamTs...> &&
117
    std::same_as<bool, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...>>) ||
118
   (blockwise_processing_callback_with_mask<FnT, ParamTs...> &&
119
    std::same_as<bool, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., first_t<ParamTs...>>>);
120

121
template <typename T>
122
class bitvector_iterator {
123
   private:
124
      using size_type = typename T::size_type;
125

126
   public:
127
      using difference_type = std::make_signed_t<size_type>;
128
      using value_type = std::remove_const_t<decltype(std::declval<T>().at(0))>;
129
      using pointer = value_type*;
130
      using reference = value_type&;
131

132
      // TODO: technically, this could be a random access iterator
133
      using iterator_category = std::bidirectional_iterator_tag;
134

135
   public:
136
      bitvector_iterator() = default;
137
      ~bitvector_iterator() = default;
138

139
      bitvector_iterator(T* bitvector, size_t offset) : m_bitvector(bitvector) { update(offset); }
1,591✔
140

141
      bitvector_iterator(const bitvector_iterator& other) noexcept : m_bitvector(other.m_bitvector) {
2,002✔
142
         update(other.m_offset);
2,004✔
143
      }
144

145
      bitvector_iterator(bitvector_iterator&& other) noexcept : m_bitvector(other.m_bitvector) {
146
         update(other.m_offset);
147
      }
148

149
      bitvector_iterator& operator=(const bitvector_iterator& other) noexcept {
150
         if(this != &other) {
151
            m_bitvector = other.m_bitvector;
152
            update(other.m_offset);
153
         }
154
         return *this;
155
      }
156

157
      bitvector_iterator& operator=(bitvector_iterator&& other) noexcept {
158
         m_bitvector = other.m_bitvector;
159
         update(other.m_offset);
160
         return *this;
161
      }
162

163
      bitvector_iterator& operator++() noexcept {
252,077✔
164
         update(signed_offset() + 1);
504,179✔
165
         return *this;
166
      }
167

168
      bitvector_iterator operator++(int) noexcept {
6✔
169
         auto copy = *this;
6✔
170
         update(signed_offset() + 1);
12✔
171
         return copy;
6✔
172
      }
173

174
      bitvector_iterator& operator--() noexcept {
6✔
175
         update(signed_offset() - 1);
12✔
176
         return *this;
177
      }
178

179
      bitvector_iterator operator--(int) noexcept {
180
         auto copy = *this;
181
         update(signed_offset() - 1);
182
         return copy;
183
      }
184

185
      std::partial_ordering operator<=>(const bitvector_iterator& other) const noexcept {
186
         if(m_bitvector == other.m_bitvector) {
187
            return m_offset <=> other.m_offset;
188
         } else {
189
            return std::partial_ordering::unordered;
190
         }
191
      }
192

193
      bool operator==(const bitvector_iterator& other) const noexcept {
253,116✔
194
         return m_bitvector == other.m_bitvector && m_offset == other.m_offset;
252,581✔
195
      }
196

197
      reference operator*() const { return m_bitref.value(); }
2,226✔
198

199
      pointer operator->() const { return &(m_bitref.value()); }
262✔
200

201
   private:
202
      void update(size_type new_offset) {
529✔
203
         m_offset = new_offset;
1,026✔
204
         if(m_offset < m_bitvector->size()) {
1,506✔
205
            m_bitref.emplace((*m_bitvector)[m_offset]);
254,098✔
206
         } else {
207
            // end() iterator
208
            m_bitref.reset();
251,626✔
209
         }
210
      }
211

212
      difference_type signed_offset() const { return static_cast<difference_type>(m_offset); }
6✔
213

214
   private:
215
      T* m_bitvector;
216
      size_type m_offset;
217
      mutable std::optional<value_type> m_bitref;
218
};
219

220
}  // namespace detail
221

222
/**
223
 * An arbitrarily large bitvector with typical bit manipulation and convenient
224
 * bitwise access methods. Don't use `bitvector_base` directly, but the type
225
 * aliases::
226
 *
227
 *    * bitvector         - with a standard allocator
228
 *    * secure_bitvector  - with a secure allocator that auto-scrubs the memory
229
 */
230
template <template <typename> typename AllocatorT>
231
class bitvector_base final {
950,670✔
232
   public:
233
      using block_type = uint8_t;
234
      using size_type = size_t;
235
      using allocator_type = AllocatorT<block_type>;
236
      using value_type = block_type;
237
      using iterator = detail::bitvector_iterator<bitvector_base<AllocatorT>>;
238
      using const_iterator = detail::bitvector_iterator<const bitvector_base<AllocatorT>>;
239

240
      static constexpr size_type block_size_bytes = sizeof(block_type);
241
      static constexpr size_type block_size_bits = block_size_bytes * 8;
242
      static constexpr bool uses_secure_allocator = std::is_same_v<allocator_type, secure_allocator<block_type>>;
243

244
   private:
245
      template <template <typename> typename FriendAllocatorT>
246
      friend class bitvector_base;
247

248
      static constexpr block_type one = block_type(1);
249

250
      static constexpr size_type block_offset_shift = size_type(3) + ceil_log2(block_size_bytes);
251
      static constexpr size_type block_index_mask = (one << block_offset_shift) - 1;
252

253
      static constexpr size_type block_index(size_type pos) { return pos >> block_offset_shift; }
21,157,507✔
254

255
      static constexpr size_type block_offset(size_type pos) { return pos & block_index_mask; }
1,250,398,163✔
256

257
   private:
258
      /**
259
       * Internal helper to wrap a single bit in the bitvector and provide
260
       * certain convenience access methods.
261
       */
262
      template <typename BlockT>
263
         requires std::same_as<block_type, std::remove_cv_t<BlockT>>
264
      class bitref_base {
265
         private:
266
            friend class bitvector_base<AllocatorT>;
267

268
            constexpr bitref_base(std::span<BlockT> blocks, size_type pos) noexcept :
1,668,196,499✔
269
                  m_block(blocks[block_index(pos)]), m_mask(one << block_offset(pos)) {}
1,668,196,499✔
270

271
         public:
272
            bitref_base() = delete;
273
            bitref_base(const bitref_base&) noexcept = default;
274
            bitref_base(bitref_base&&) noexcept = default;
275
            bitref_base& operator=(const bitref_base&) = delete;
276
            bitref_base& operator=(bitref_base&&) = delete;
277

278
            ~bitref_base() = default;
279

280
         public:
281
            constexpr operator bool() const noexcept { return is_set(); }
13,397,487✔
282

283
            constexpr bool is_set() const noexcept { return (m_block & m_mask) > 0; }
13,346,318✔
284

285
            template <std::integral T>
286
            constexpr T as() const noexcept {
288,056✔
287
               return static_cast<T>(is_set());
94,674✔
288
            }
289

290
            constexpr CT::Choice as_choice() const noexcept {
416,406,382✔
291
               return CT::Choice::from_int(static_cast<BlockT>(m_block & m_mask));
416,406,382✔
292
            }
293

294
         protected:
295
            BlockT& m_block;  // NOLINT(*-non-private-member-variables-in-classes)
296
            BlockT m_mask;    // NOLINT(*-non-private-member-variables-in-classes)
297
      };
298

299
   public:
300
      /**
301
       * Wraps a constant reference into the bitvector. Bit can be accessed
302
       * but not modified.
303
       */
304
      template <typename BlockT>
305
      class bitref final : public bitref_base<BlockT> {
306
         public:
307
            using bitref_base<BlockT>::bitref_base;
12,641,312✔
308
      };
309

310
      /**
311
       * Wraps a modifiable reference into the bitvector. Bit may be accessed
312
       * and modified (e.g. flipped or XOR'ed).
313
       *
314
       * Constant-time operations are used for the bit manipulations. The
315
       * location of the bit in the bit vector may be leaked, though.
316
       */
317
      template <typename BlockT>
318
         requires(!std::is_const_v<BlockT>)
319
      class bitref<BlockT> : public bitref_base<BlockT> {
320
         public:
321
            using bitref_base<BlockT>::bitref_base;
1,238,015,254✔
322

323
            ~bitref() = default;
324
            bitref(const bitref&) noexcept = default;
325
            bitref(bitref&&) noexcept = default;
326

327
            constexpr bitref& set() noexcept {
356✔
328
               this->m_block |= this->m_mask;
29✔
329
               return *this;
250✔
330
            }
331

332
            constexpr bitref& unset() noexcept {
7✔
333
               this->m_block &= ~this->m_mask;
1✔
334
               return *this;
335
            }
336

337
            constexpr bitref& flip() noexcept {
47✔
338
               this->m_block ^= this->m_mask;
7✔
339
               return *this;
340
            }
341

342
            // NOLINTBEGIN
343

344
            constexpr bitref& operator=(bool bit) noexcept {
1,237,781,955✔
345
               this->m_block =
1,237,781,955✔
346
                  CT::Mask<BlockT>::expand(bit).select(this->m_mask | this->m_block, this->m_block & ~this->m_mask);
1,237,781,955✔
347
               return *this;
1,237,781,955✔
348
            }
349

350
            constexpr bitref& operator=(const bitref& bit) noexcept { return *this = bit.is_set(); }
351

352
            constexpr bitref& operator=(bitref&& bit) noexcept { return *this = bit.is_set(); }
353

354
            // NOLINTEND
355

356
            constexpr bitref& operator&=(bool other) noexcept {
4✔
357
               this->m_block &= ~CT::Mask<BlockT>::expand(other).if_not_set_return(this->m_mask);
4✔
358
               return *this;
359
            }
360

361
            constexpr bitref& operator|=(bool other) noexcept {
4✔
362
               this->m_block |= CT::Mask<BlockT>::expand(other).if_set_return(this->m_mask);
4✔
363
               return *this;
364
            }
365

366
            constexpr bitref& operator^=(bool other) noexcept {
70,599✔
367
               this->m_block ^= CT::Mask<BlockT>::expand(other).if_set_return(this->m_mask);
70,599✔
368
               return *this;
369
            }
370
      };
371

372
   public:
373
      bitvector_base() : m_bits(0) {}
58✔
374

375
      bitvector_base(size_type bits) : m_bits(bits), m_blocks(ceil_toblocks(bits)) {}
238,748✔
376

377
      /**
378
       * Initialize the bitvector from a byte-array. Bits are taken byte-wise
379
       * from least significant to most significant. Example::
380
       *
381
       *    bitvector[0] -> LSB(Byte[0])
382
       *    bitvector[1] -> LSB+1(Byte[0])
383
       *    ...
384
       *    bitvector[8] -> LSB(Byte[1])
385
       *
386
       * @param bytes The byte vector to be loaded
387
       * @param bits  The number of bits to be loaded. This must not be more
388
       *              than the number of bytes in @p bytes.
389
       */
390
      bitvector_base(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
72,462✔
391
         from_bytes(bytes, bits);
72,462✔
392
      }
72,462✔
393

394
      bitvector_base(std::initializer_list<block_type> blocks, std::optional<size_type> bits = std::nullopt) :
180✔
395
            m_bits(bits.value_or(blocks.size() * block_size_bits)), m_blocks(blocks.begin(), blocks.end()) {}
180✔
396

397
      bool empty() const { return m_bits == 0; }
6✔
398

399
      size_type size() const { return m_bits; }
8,455,944✔
400

401
      /**
402
       * @returns true iff the number of 1-bits in this is odd, false otherwise (constant time)
403
       */
404
      CT::Choice has_odd_hamming_weight() const {
70,599✔
405
         uint64_t acc = 0;
70,599✔
406
         full_range_operation([&](std::unsigned_integral auto block) { acc ^= block; }, *this);
70,599✔
407

408
         for(size_t i = (sizeof(acc) * 8) >> 1; i > 0; i >>= 1) {
494,193✔
409
            acc ^= acc >> i;
423,594✔
410
         }
411

412
         return CT::Choice::from_int(acc & one);
70,599✔
413
      }
414

415
      /**
416
       * Counts the number of 1-bits in the bitvector in constant time.
417
       * @returns the "population count" (or hamming weight) of the bitvector
418
       */
419
      size_type hamming_weight() const {
125✔
420
         size_type acc = 0;
125✔
421
         full_range_operation([&](std::unsigned_integral auto block) { acc += ct_popcount(block); }, *this);
63✔
422
         return acc;
108✔
423
      }
424

425
      /**
426
       * @returns copies this bitvector into a new bitvector of type @p OutT
427
       */
428
      template <bitvectorish OutT>
429
      OutT as() const {
112✔
430
         return subvector<OutT>(0, size());
194✔
431
      }
432

433
      /**
434
       * @returns true if @p other contains the same bit pattern as this
435
       */
436
      template <bitvectorish OtherT>
437
      bool equals_vartime(const OtherT& other) const noexcept {
8✔
438
         return size() == other.size() &&
10✔
439
                full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) { return lhs == rhs; },
10✔
440
                                     *this,
441
                                     unwrap_strong_type(other));
442
      }
443

444
      /**
445
       * @returns true if @p other contains the same bit pattern as this
446
       */
447
      template <bitvectorish OtherT>
448
      bool equals(const OtherT& other) const noexcept {
47✔
449
         return (*this ^ other).none();
47✔
450
      }
451

452
      /// @name Serialization
453
      /// @{
454

455
      /**
456
       * Re-initialize the bitvector with the given bytes. See the respective
457
       * constructor for details. This should be used only when trying to save
458
       * allocations. Otherwise, use the constructor.
459
       *
460
       * @param bytes  the byte range to load bits from
461
       * @param bits   (optional) if not all @p bytes should be loaded in full
462
       */
463
      void from_bytes(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
72,462✔
464
         m_bits = bits.value_or(bytes.size_bytes() * 8);
72,462✔
465
         BOTAN_ARG_CHECK(m_bits <= bytes.size_bytes() * 8, "not enough data to load so many bits");
72,462✔
466
         resize(m_bits);
72,462✔
467

468
         // load as much aligned data as possible
469
         const auto verbatim_blocks = m_bits / block_size_bits;
72,462✔
470
         const auto verbatim_bytes = verbatim_blocks * block_size_bytes;
72,462✔
471
         if(verbatim_blocks > 0) {
72,462✔
472
            typecast_copy(std::span{m_blocks}.first(verbatim_blocks), bytes.first(verbatim_bytes));
72,461✔
473
         }
474

475
         // load remaining unaligned data
476
         for(size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
165,325✔
477
            ref(i) = ((bytes[i >> 3] & (uint8_t(1) << (i & 7))) != 0);
92,863✔
478
         }
479
      }
72,462✔
480

481
      /**
482
       * Renders the bitvector into a byte array. By default, this will use
483
       * `std::vector<uint8_t>` or `Botan::secure_vector<uint8_t>`, depending on
484
       * the allocator used by the bitvector. The rendering is compatible with
485
       * the bit layout explained in the respective constructor.
486
       */
487
      template <concepts::resizable_byte_buffer OutT =
488
                   std::conditional_t<uses_secure_allocator, secure_vector<uint8_t>, std::vector<uint8_t>>>
489
      OutT to_bytes() const {
297✔
490
         OutT out(ceil_tobytes(m_bits));
297✔
491
         to_bytes(out);
297✔
492
         return out;
297✔
493
      }
×
494

495
      /**
496
       * Renders the bitvector into a properly sized byte range.
497
       *
498
       * @param out  a byte range that has a length of at least `ceil_tobytes(size())`.
499
       */
500
      void to_bytes(std::span<uint8_t> out) const {
119,276✔
501
         const auto bytes_needed = ceil_tobytes(m_bits);
119,276✔
502
         BOTAN_ARG_CHECK(bytes_needed <= out.size_bytes(), "Not enough space to render bitvector");
119,276✔
503

504
         // copy as much aligned data as possible
505
         const auto verbatim_blocks = m_bits / block_size_bits;
119,276✔
506
         const auto verbatim_bytes = verbatim_blocks * block_size_bytes;
119,276✔
507
         if(verbatim_blocks > 0) {
119,276✔
508
            typecast_copy(out.first(verbatim_bytes), std::span{m_blocks}.first(verbatim_blocks));
119,273✔
509
         }
510

511
         // copy remaining unaligned data
512
         for(size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
312,694✔
513
            out[i >> 3] |= ref(i).template as<uint8_t>() << (i & 7);
193,418✔
514
         }
515
      }
119,276✔
516

517
      /**
518
       * Renders this bitvector into a sequence of "0"s and "1"s.
519
       * This is meant for debugging purposes and is not efficient.
520
       */
521
      std::string to_string() const {
1✔
522
         std::stringstream ss;
1✔
523
         for(size_type i = 0; i < size(); ++i) {
6✔
524
            ss << ref(i);
5✔
525
         }
526
         return ss.str();
1✔
527
      }
1✔
528

529
      /// @}
530

531
      /// @name Capacity Accessors and Modifiers
532
      /// @{
533

534
      size_type capacity() const { return m_blocks.capacity() * block_size_bits; }
6✔
535

536
      void reserve(size_type bits) { m_blocks.reserve(ceil_toblocks(bits)); }
53✔
537

538
      void resize(size_type bits) {
400,065✔
539
         const auto new_number_of_blocks = ceil_toblocks(bits);
400,065✔
540
         if(new_number_of_blocks != m_blocks.size()) {
400,065✔
541
            m_blocks.resize(new_number_of_blocks);
113,464✔
542
         }
543

544
         m_bits = bits;
400,065✔
545
         zero_unused_bits();
400,065✔
546
      }
400,058✔
547

548
      void push_back(bool bit) {
327,535✔
549
         const auto i = size();
327,535✔
550
         resize(i + 1);
327,535✔
551
         ref(i) = bit;
655,070✔
552
      }
327,535✔
553

554
      void pop_back() {
9✔
555
         if(!empty()) {
9✔
556
            resize(size() - 1);
9✔
557
         }
558
      }
559

560
      /// @}
561

562
      /// @name Bitwise and Global Accessors and Modifiers
563
      /// @{
564

565
      auto at(size_type pos) {
425,230,385✔
566
         check_offset(pos);
425,230,381✔
567
         return ref(pos);
425,230,097✔
568
      }
569

570
      // TODO C++23: deducing this
571
      auto at(size_type pos) const {
757,441✔
572
         check_offset(pos);
757,440✔
573
         return ref(pos);
757,440✔
574
      }
575

576
      auto front() { return ref(0); }
2✔
577

578
      // TODO C++23: deducing this
579
      auto front() const { return ref(0); }
580

581
      auto back() { return ref(size() - 1); }
5✔
582

583
      // TODO C++23: deducing this
584
      auto back() const { return ref(size() - 1); }
585

586
      /**
587
       * Sets the bit at position @p pos.
588
       * @throws Botan::Invalid_Argument if @p pos is out of range
589
       */
590
      bitvector_base& set(size_type pos) {
78✔
591
         check_offset(pos);
77✔
592
         ref(pos).set();
43✔
593
         return *this;
27✔
594
      }
595

596
      /**
597
       * Sets all currently allocated bits.
598
       */
599
      bitvector_base& set() {
2✔
600
         full_range_operation(
2✔
601
            [](std::unsigned_integral auto block) -> decltype(block) { return static_cast<decltype(block)>(~0); },
2✔
602
            *this);
603
         zero_unused_bits();
2✔
604
         return *this;
2✔
605
      }
606

607
      /**
608
       * Unsets the bit at position @p pos.
609
       * @throws Botan::Invalid_Argument if @p pos is out of range
610
       */
611
      bitvector_base& unset(size_type pos) {
7✔
612
         check_offset(pos);
6✔
613
         ref(pos).unset();
6✔
614
         return *this;
6✔
615
      }
616

617
      /**
618
       * Unsets all currently allocated bits.
619
       */
620
      bitvector_base& unset() {
4✔
621
         full_range_operation(
4✔
622
            [](std::unsigned_integral auto block) -> decltype(block) { return static_cast<decltype(block)>(0); },
4✔
623
            *this);
624
         return *this;
625
      }
626

627
      /**
628
       * Flips the bit at position @p pos.
629
       * @throws Botan::Invalid_Argument if @p pos is out of range
630
       */
631
      bitvector_base& flip(size_type pos) {
41✔
632
         check_offset(pos);
40✔
633
         ref(pos).flip();
40✔
634
         return *this;
40✔
635
      }
636

637
      /**
638
       * Flips all currently allocated bits.
639
       */
640
      bitvector_base& flip() {
5✔
641
         full_range_operation([](std::unsigned_integral auto block) -> decltype(block) { return ~block; }, *this);
5✔
642
         zero_unused_bits();
5✔
643
         return *this;
5✔
644
      }
645

646
      /**
647
       * @returns true iff no bit is set
648
       */
649
      bool none_vartime() const {
13✔
650
         return full_range_operation([](std::unsigned_integral auto block) { return block == 0; }, *this);
6✔
651
      }
652

653
      /**
654
       * @returns true iff no bit is set in constant time
655
       */
656
      bool none() const { return hamming_weight() == 0; }
52✔
657

658
      /**
659
       * @returns true iff at least one bit is set
660
       */
661
      bool any_vartime() const { return !none_vartime(); }
7✔
662

663
      /**
664
       * @returns true iff at least one bit is set in constant time
665
       */
666
      bool any() const { return !none(); }
5✔
667

668
      /**
669
       * @returns true iff all bits are set
670
       */
671
      bool all_vartime() const {
7✔
672
         return full_range_operation(
7✔
673
            []<std::unsigned_integral BlockT>(BlockT block, BlockT mask) { return block == mask; }, *this);
6✔
674
      }
675

676
      /**
677
       * @returns true iff all bits are set in constant time
678
       */
679
      bool all() const { return hamming_weight() == m_bits; }
5✔
680

681
      auto operator[](size_type pos) { return ref(pos); }
1,229,146,838✔
682

683
      // TODO C++23: deducing this
684
      auto operator[](size_type pos) const { return ref(pos); }
12,447,889✔
685

686
      /// @}
687

688
      /// @name Subvectors
689
      /// @{
690

691
      /**
692
       * Creates a new bitvector with a subsection of this bitvector starting at
693
       * @p pos copying exactly @p length bits.
694
       */
695
      template <bitvectorish OutT = bitvector_base<AllocatorT>>
696
      auto subvector(size_type pos, std::optional<size_type> length = std::nullopt) const {
119,162✔
697
         size_type bitlen = length.value_or(size() - pos);
119,162✔
698
         BOTAN_ARG_CHECK(pos + bitlen <= size(), "Not enough bits to copy");
119,162✔
699

700
         OutT newvector(bitlen);
119,158✔
701

702
         // Handle bitvectors that are wrapped in strong types
703
         auto& newvector_unwrapped = unwrap_strong_type(newvector);
119,158✔
704

705
         if(bitlen > 0) {
119,158✔
706
            if(pos % 8 == 0) {
119,155✔
707
               copy_mem(
80,461✔
708
                  newvector_unwrapped.m_blocks,
50✔
709
                  std::span{m_blocks}.subspan(block_index(pos), block_index(pos + bitlen - 1) - block_index(pos) + 1));
80,461✔
710
            } else {
711
               BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> from_op(
38,694✔
712
                  *this, pos, bitlen);
713
               BitRangeOperator<strong_type_wrapped_type<OutT>> to_op(
38,694✔
714
                  unwrap_strong_type(newvector_unwrapped), 0, bitlen);
715
               range_operation([](auto /* to */, auto from) { return from; }, to_op, from_op);
38,694✔
716
            }
717

718
            newvector_unwrapped.zero_unused_bits();
119,158✔
719
         }
720

721
         return newvector;
119,158✔
722
      }
×
723

724
      /**
725
       * Extracts a subvector of bits as an unsigned integral type @p OutT
726
       * starting from bit @p pos and copying exactly sizeof(OutT)*8 bits.
727
       *
728
       * Hint: The bits are in big-endian order, i.e. the least significant bit
729
       *       is the 0th bit and the most significant bit it the n-th. Hence,
730
       *       addressing the bits with bitwise operations is done like so:
731
       *       bool bit = (out_int >> pos) & 1;
732
       */
733
      template <typename OutT>
734
         requires(std::unsigned_integral<strong_type_wrapped_type<OutT>> &&
735
                  !std::same_as<bool, strong_type_wrapped_type<OutT>>)
736
      OutT subvector(size_type pos) const {
65,744✔
737
         using result_t = strong_type_wrapped_type<OutT>;
738
         constexpr size_t bits = sizeof(result_t) * 8;
65,744✔
739
         BOTAN_ARG_CHECK(pos + bits <= size(), "Not enough bits to copy");
65,744✔
740
         result_t out = 0;
65,740✔
741

742
         if(pos % 8 == 0) {
65,740✔
743
            out = load_le<result_t>(std::span{m_blocks}.subspan(block_index(pos)).template first<sizeof(result_t)>());
39,432✔
744
         } else {
745
            BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(*this, pos, bits);
26,308✔
746
            range_operation(
26,308✔
747
               [&](std::unsigned_integral auto integer) {
26,308✔
748
                  if constexpr(std::same_as<result_t, decltype(integer)>) {
749
                     out = integer;
26,308✔
750
                  }
751
               },
752
               op);
753
         }
754

755
         return wrap_strong_type<OutT>(out);
65,740✔
756
      }
757

758
      /**
759
       * Replaces a subvector of bits with the bits of another bitvector @p value
760
       * starting at bit @p pos. The number of bits to replace is determined by
761
       * the size of @p value.
762
       *
763
       * @note This is currently supported for byte-aligned @p pos only.
764
       *
765
       * @throws Not_Implemented when called with @p pos not divisible by 8.
766
       *
767
       * @param pos    the position to start replacing bits
768
       * @param value  the bitvector to copy bits from
769
       */
770
      template <typename InT>
771
         requires(std::unsigned_integral<strong_type_wrapped_type<InT>> && !std::same_as<bool, InT>)
772
      void subvector_replace(size_type pos, InT value) {
65,751✔
773
         using in_t = strong_type_wrapped_type<InT>;
774
         constexpr size_t bits = sizeof(in_t) * 8;
65,751✔
775
         BOTAN_ARG_CHECK(pos + bits <= size(), "Not enough bits to replace");
65,751✔
776

777
         if(pos % 8 == 0) {
65,747✔
778
            store_le(std::span{m_blocks}.subspan(block_index(pos)).template first<sizeof(in_t)>(),
39,434✔
779
                     unwrap_strong_type(value));
780
         } else {
781
            BitRangeOperator<bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(*this, pos, bits);
26,313✔
782
            range_operation(
26,313✔
783
               [&]<std::unsigned_integral BlockT>(BlockT block) -> BlockT {
26,313✔
784
                  if constexpr(std::same_as<in_t, BlockT>) {
785
                     return unwrap_strong_type(value);
26,313✔
786
                  } else {
787
                     // This should never be reached. BOTAN_ASSERT_UNREACHABLE()
788
                     // caused warning "unreachable code" on MSVC, though. You
789
                     // don't say!
790
                     //
791
                     // Returning the given block back, is the most reasonable
792
                     // thing to do in this case, though.
793
                     return block;
794
                  }
795
               },
796
               op);
797
         }
798
      }
65,747✔
799

800
      /// @}
801

802
      /// @name Operators
803
      ///
804
      /// @{
805

806
      auto operator~() {
1✔
807
         auto newbv = *this;
1✔
808
         newbv.flip();
1✔
809
         return newbv;
1✔
810
      }
×
811

812
      template <bitvectorish OtherT>
813
      auto& operator|=(const OtherT& other) {
5✔
814
         full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs | rhs; },
2✔
815
                              *this,
816
                              unwrap_strong_type(other));
817
         return *this;
818
      }
819

820
      template <bitvectorish OtherT>
821
      auto& operator&=(const OtherT& other) {
70,600✔
822
         full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs & rhs; },
70,597✔
823
                              *this,
824
                              unwrap_strong_type(other));
825
         return *this;
826
      }
827

828
      template <bitvectorish OtherT>
829
      auto& operator^=(const OtherT& other) {
54✔
830
         full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs ^ rhs; },
136✔
831
                              *this,
832
                              unwrap_strong_type(other));
833
         return *this;
834
      }
835

836
      /// @}
837

838
      /// @name Constant Time Operations
839
      ///
840
      /// @{
841

842
      /**
843
       * Implements::
844
       *
845
       *    if(condition) {
846
       *       *this ^= other;
847
       *    }
848
       *
849
       * omitting runtime dependence on any of the parameters.
850
       */
851
      template <bitvectorish OtherT>
852
      void ct_conditional_xor(CT::Choice condition, const OtherT& other) {
420,558,900✔
853
         BOTAN_ASSERT_NOMSG(m_bits == other.m_bits);
420,558,900✔
854
         BOTAN_ASSERT_NOMSG(m_blocks.size() == other.m_blocks.size());
420,558,900✔
855

856
         auto maybe_xor = overloaded{
857
            [m = CT::Mask<uint64_t>::from_choice(condition)](uint64_t lhs, uint64_t rhs) -> uint64_t {
952,080,442✔
858
               return lhs ^ m.if_set_return(rhs);
2,147,483,647✔
859
            },
860
            [m = CT::Mask<uint32_t>::from_choice(condition)](uint32_t lhs, uint32_t rhs) -> uint32_t {
688,643,370✔
861
               return lhs ^ m.if_set_return(rhs);
268,084,470✔
862
            },
863
            [m = CT::Mask<uint16_t>::from_choice(condition)](uint16_t lhs, uint16_t rhs) -> uint16_t {
531,755,717✔
864
               return lhs ^ m.if_set_return(rhs);
111,196,817✔
865
            },
866
            [m = CT::Mask<uint8_t>::from_choice(condition)](uint8_t lhs, uint8_t rhs) -> uint8_t {
420,558,906✔
867
               return lhs ^ m.if_set_return(rhs);
6✔
868
            },
869
         };
870

871
         full_range_operation(maybe_xor, *this, unwrap_strong_type(other));
420,558,900✔
872
      }
420,558,900✔
873

874
      constexpr void _const_time_poison() const { CT::poison(m_blocks); }
51✔
875

876
      constexpr void _const_time_unpoison() const { CT::unpoison(m_blocks); }
86✔
877

878
      /// @}
879

880
      /// @name Iterators
881
      ///
882
      /// @{
883

884
      iterator begin() noexcept { return iterator(this, 0); }
1,078✔
885

886
      const_iterator begin() const noexcept { return const_iterator(this, 0); }
887

888
      const_iterator cbegin() const noexcept { return const_iterator(this, 0); }
10✔
889

890
      iterator end() noexcept { return iterator(this, size()); }
546✔
891

892
      const_iterator end() const noexcept { return const_iterator(this, size()); }
893

894
      const_iterator cend() noexcept { return const_iterator(this, size()); }
8✔
895

896
      /// @}
897

898
   private:
899
      void check_offset(size_type pos) const {
425,987,546✔
900
         // BOTAN_ASSERT_NOMSG(!CT::is_poisoned(&m_bits, sizeof(m_bits)));
901
         // BOTAN_ASSERT_NOMSG(!CT::is_poisoned(&pos, sizeof(pos)));
902
         BOTAN_ARG_CHECK(pos < m_bits, "Out of range");
425,987,918✔
903
      }
904

905
      void zero_unused_bits() {
519,220✔
906
         const auto first_unused_bit = size();
517,486✔
907

908
         // Zero out any unused bits in the last block
909
         if(first_unused_bit % block_size_bits != 0) {
519,220✔
910
            const block_type mask = (one << block_offset(first_unused_bit)) - one;
343,915✔
911
            m_blocks[block_index(first_unused_bit)] &= mask;
343,915✔
912
         }
913
      }
914

915
      static constexpr size_type ceil_toblocks(size_type bits) {
517,847✔
916
         return (bits + block_size_bits - 1) / block_size_bits;
519,548✔
917
      }
918

919
      auto ref(size_type pos) const { return bitref<const block_type>(m_blocks, pos); }
12,641,312✔
920

921
      auto ref(size_type pos) { return bitref<block_type>(m_blocks, pos); }
1,238,015,254✔
922

923
   private:
924
      enum class BitRangeAlignment { byte_aligned, no_alignment };
925

926
      /**
927
       * Helper construction to implement bit range operations on the bitvector.
928
       * It basically implements an iterator to read and write blocks of bits
929
       * from the underlying bitvector. Where "blocks of bits" are unsigned
930
       * integers of varying bit lengths.
931
       *
932
       * If the iteration starts at a byte boundary in the underlying bitvector,
933
       * this applies certain optimizations (i.e. loading blocks of bits straight
934
       * from the underlying byte buffer). The optimizations are enabled at
935
       * compile time (with the template parameter `alignment`).
936
       */
937
      template <typename BitvectorT, auto alignment = BitRangeAlignment::byte_aligned>
938
         requires is_bitvector_v<std::remove_cvref_t<BitvectorT>>
939
      class BitRangeOperator {
940
         private:
941
            constexpr static bool is_const() { return std::is_const_v<BitvectorT>; }
942

943
            struct UnalignedDataHelper {
944
                  const uint8_t padding_bits;
945
                  const uint8_t bits_to_byte_alignment;
946
            };
947

948
         public:
949
            BitRangeOperator(BitvectorT& source, size_type start_bitoffset, size_type bitlength) :
841,459,898✔
950
                  m_source(source),
841,459,898✔
951
                  m_start_bitoffset(start_bitoffset),
841,459,898✔
952
                  m_bitlength(bitlength),
841,459,898✔
953
                  m_unaligned_helper({.padding_bits = static_cast<uint8_t>(start_bitoffset % 8),
841,459,898✔
954
                                      .bits_to_byte_alignment = static_cast<uint8_t>(8 - (start_bitoffset % 8))}),
841,459,898✔
955
                  m_read_bitpos(start_bitoffset),
841,459,898✔
956
                  m_write_bitpos(start_bitoffset) {
841,459,898✔
957
               BOTAN_ASSERT(is_byte_aligned() == (m_start_bitoffset % 8 == 0), "byte alignment guarantee");
841,459,898✔
958
               BOTAN_ASSERT(m_source.size() >= m_start_bitoffset + m_bitlength, "enough bytes in underlying source");
841,459,898✔
959
            }
841,459,898✔
960

961
            BitRangeOperator(BitvectorT& source) : BitRangeOperator(source, 0, source.size()) {}
841,329,889✔
962

963
            static constexpr bool is_byte_aligned() { return alignment == BitRangeAlignment::byte_aligned; }
964

965
            /**
966
             * @returns the overall number of bits to be iterated with this operator
967
             */
968
            size_type size() const { return m_bitlength; }
420,668,261✔
969

970
            /**
971
             * @returns the number of bits not yet read from this operator
972
             */
973
            size_type bits_to_read() const { return m_bitlength - m_read_bitpos + m_start_bitoffset; }
1,689,929,117✔
974

975
            /**
976
             * @returns the number of bits still to be written into this operator
977
             */
978
            size_type bits_to_write() const { return m_bitlength - m_write_bitpos + m_start_bitoffset; }
3,398,317✔
979

980
            /**
981
             * Loads the next block of bits from the underlying bitvector. No
982
             * bounds checks are performed. The caller can define the size of
983
             * the resulting unsigned integer block.
984
             */
985
            template <std::unsigned_integral BlockT>
986
            BlockT load_next() const {
6,762,606✔
987
               constexpr size_type block_size = sizeof(BlockT);
6,762,606✔
988
               constexpr size_type block_bits = block_size * 8;
6,762,606✔
989
               const auto bits_remaining = bits_to_read();
6,762,606✔
990

991
               BlockT result_block = 0;
6,762,606✔
992
               if constexpr(is_byte_aligned()) {
993
                  result_block = load_le(m_source.as_byte_span().subspan(read_bytepos()).template first<block_size>());
3,382,892✔
994
               } else {
995
                  const size_type byte_pos = read_bytepos();
3,379,714✔
996
                  const size_type bits_to_collect = std::min(block_bits, bits_to_read());
6,759,428✔
997

998
                  const uint8_t first_byte = m_source.as_byte_span()[byte_pos];
3,379,714✔
999

1000
                  // Initialize the left-most bits from the first byte.
1001
                  result_block = BlockT(first_byte) >> m_unaligned_helper.padding_bits;
3,379,714✔
1002

1003
                  // If more bits are needed, we pull them from the remaining bytes.
1004
                  if(m_unaligned_helper.bits_to_byte_alignment < bits_to_collect) {
3,379,714✔
1005
                     const BlockT block =
1006
                        load_le(m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>());
6,682,042✔
1007
                     result_block |= block << m_unaligned_helper.bits_to_byte_alignment;
3,341,024✔
1008
                  }
1009
               }
1010

1011
               m_read_bitpos += std::min(block_bits, bits_remaining);
6,762,606✔
1012
               return result_block;
6,668,117✔
1013
            }
1014

1015
            /**
1016
             * Stores the next block of bits into the underlying bitvector.
1017
             * No bounds checks are performed. Storing bit blocks that are not
1018
             * aligned at a byte-boundary in the underlying bitvector is
1019
             * currently not implemented.
1020
             */
1021
            template <std::unsigned_integral BlockT>
1022
               requires(!is_const())
1023
            void store_next(BlockT block) {
3,372,004✔
1024
               constexpr size_type block_size = sizeof(BlockT);
3,372,004✔
1025
               constexpr size_type block_bits = block_size * 8;
3,372,004✔
1026

1027
               if constexpr(is_byte_aligned()) {
1028
                  auto sink = m_source.as_byte_span().subspan(write_bytepos()).template first<block_size>();
3,345,691✔
1029
                  store_le(sink, block);
3,345,691✔
1030
               } else {
1031
                  const size_type byte_pos = write_bytepos();
26,313✔
1032
                  const size_type bits_to_store = std::min(block_bits, bits_to_write());
52,626✔
1033

1034
                  uint8_t& first_byte = m_source.as_byte_span()[byte_pos];
26,313✔
1035

1036
                  // Set the left-most bits in the first byte, leaving all others unchanged
1037
                  first_byte = (first_byte & uint8_t(0xFF >> m_unaligned_helper.bits_to_byte_alignment)) |
26,313✔
1038
                               uint8_t(block << m_unaligned_helper.padding_bits);
26,313✔
1039

1040
                  // If more bits are provided, we store them in the remaining bytes.
1041
                  if(m_unaligned_helper.bits_to_byte_alignment < bits_to_store) {
26,313✔
1042
                     const auto remaining_bytes =
1043
                        m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>();
26,313✔
1044
                     const BlockT padding_mask = ~(BlockT(-1) >> m_unaligned_helper.bits_to_byte_alignment);
26,313✔
1045
                     const BlockT new_bytes =
26,313✔
1046
                        (load_le(remaining_bytes) & padding_mask) | block >> m_unaligned_helper.bits_to_byte_alignment;
26,313✔
1047
                     store_le(remaining_bytes, new_bytes);
26,313✔
1048
                  }
1049
               }
1050

1051
               m_write_bitpos += std::min(block_bits, bits_to_write());
3,372,004✔
1052
            }
3,372,004✔
1053

1054
            template <std::unsigned_integral BlockT>
1055
               requires(is_byte_aligned() && !is_const())
1056
            std::span<BlockT> span(size_type blocks) const {
1,261,888,710✔
1057
               BOTAN_DEBUG_ASSERT(blocks == 0 || is_memory_aligned_to<BlockT>());
1,261,888,710✔
1058
               BOTAN_DEBUG_ASSERT(read_bytepos() % sizeof(BlockT) == 0);
1,261,888,710✔
1059
               // Intermittently casting to void* to avoid a compiler warning
1060
               void* ptr = reinterpret_cast<void*>(m_source.as_byte_span().data() + read_bytepos());
1,261,888,710✔
1061
               return {reinterpret_cast<BlockT*>(ptr), blocks};
1,261,888,710✔
1062
            }
1063

1064
            template <std::unsigned_integral BlockT>
1065
               requires(is_byte_aligned() && is_const())
1066
            std::span<const BlockT> span(size_type blocks) const {
1,262,100,910✔
1067
               BOTAN_DEBUG_ASSERT(blocks == 0 || is_memory_aligned_to<BlockT>());
1,262,100,910✔
1068
               BOTAN_DEBUG_ASSERT(read_bytepos() % sizeof(BlockT) == 0);
1,262,100,910✔
1069
               // Intermittently casting to void* to avoid a compiler warning
1070
               const void* ptr = reinterpret_cast<const void*>(m_source.as_byte_span().data() + read_bytepos());
1,262,100,910✔
1071
               return {reinterpret_cast<const BlockT*>(ptr), blocks};
1,262,100,910✔
1072
            }
1073

1074
            void advance(size_type bytes)
1,262,100,943✔
1075
               requires(is_byte_aligned())
1076
            {
1077
               m_read_bitpos += bytes * 8;
1,262,100,943✔
1078
               m_write_bitpos += bytes * 8;
1,262,100,943✔
1079
            }
1080

1081
            template <std::unsigned_integral BlockT>
1082
               requires(is_byte_aligned())
1083
            size_t is_memory_aligned_to() const {
841,329,889✔
1084
               const void* cptr = m_source.as_byte_span().data() + read_bytepos();
841,329,889✔
1085
               const void* ptr_before = cptr;
841,329,889✔
1086

1087
               // std::align takes `ptr` as a reference (!), i.e. `void*&` and
1088
               // uses it as an out-param. Though, `cptr` is const because this
1089
               // method is const-qualified, hence the const_cast<>.
1090
               void* ptr = const_cast<void*>(cptr);
841,329,889✔
1091
               size_t size = sizeof(BlockT);
841,329,889✔
1092
               return ptr_before != nullptr && std::align(alignof(BlockT), size, ptr, size) == ptr_before;
1,682,659,778✔
1093
            }
1094

1095
         private:
1096
            size_type read_bytepos() const { return m_read_bitpos / 8; }
848,092,485✔
1097

1098
            size_type write_bytepos() const { return m_write_bitpos / 8; }
3,372,004✔
1099

1100
         private:
1101
            BitvectorT& m_source;
1102
            size_type m_start_bitoffset;
1103
            size_type m_bitlength;
1104

1105
            UnalignedDataHelper m_unaligned_helper;
1106

1107
            mutable size_type m_read_bitpos;
1108
            mutable size_type m_write_bitpos;
1109
      };
1110

1111
      /**
1112
       * Helper struct for the low-level handling of blockwise operations
1113
       *
1114
       * This has two main code paths: Optimized for byte-aligned ranges that
1115
       * can simply be taken from memory as-is. And a generic implementation
1116
       * that must assemble blocks from unaligned bits before processing.
1117
       */
1118
      template <typename FnT, typename... ParamTs>
1119
         requires detail::blockwise_processing_callback<FnT, ParamTs...>
1120
      class blockwise_processing_callback_trait {
1121
         public:
1122
            constexpr static bool needs_mask = detail::blockwise_processing_callback_with_mask<FnT, ParamTs...>;
1123
            constexpr static bool is_manipulator = detail::manipulating_blockwise_processing_callback<FnT, ParamTs...>;
1124
            constexpr static bool is_predicate = detail::predicate_blockwise_processing_callback<FnT, ParamTs...>;
1125
            static_assert(!is_manipulator || !is_predicate, "cannot be manipulator and predicate at the same time");
1126

1127
            /**
1128
             * Applies @p fn to the blocks provided in @p blocks by simply reading from
1129
             * memory without re-arranging any bits across byte-boundaries.
1130
             */
1131
            template <std::unsigned_integral... BlockTs>
1132
               requires(all_same_v<std::remove_cv_t<BlockTs>...> && sizeof...(BlockTs) == sizeof...(ParamTs))
1133
            constexpr static bool apply_on_full_blocks(FnT fn, std::span<BlockTs>... blocks) {
1,262,100,943✔
1134
               constexpr size_type bits = sizeof(detail::first_t<BlockTs...>) * 8;
1,262,100,943✔
1135
               const size_type iterations = detail::first(blocks...).size();
1,261,889,052✔
1136
               for(size_type i = 0; i < iterations; ++i) {
2,147,483,647✔
1137
                  if constexpr(is_predicate) {
1138
                     if(!apply(fn, bits, blocks[i]...)) {
33✔
1139
                        return false;
1140
                     }
1141
                  } else if constexpr(is_manipulator) {
1142
                     detail::first(blocks...)[i] = apply(fn, bits, blocks[i]...);
2,147,483,647✔
1143
                  } else {
1144
                     apply(fn, bits, blocks[i]...);
5,792,403✔
1145
                  }
1146
               }
1147
               return true;
375✔
1148
            }
1149

1150
            /**
1151
             * Applies @p fn to as many blocks as @p ops provide for the given type.
1152
             */
1153
            template <std::unsigned_integral BlockT, typename... BitRangeOperatorTs>
1154
               requires(sizeof...(BitRangeOperatorTs) == sizeof...(ParamTs))
1155
            constexpr static bool apply_on_unaligned_blocks(FnT fn, BitRangeOperatorTs&... ops) {
421,065,568✔
1156
               constexpr size_type block_bits = sizeof(BlockT) * 8;
421,065,568✔
1157
               auto bits = detail::first(ops...).bits_to_read();
421,065,568✔
1158
               if(bits == 0) {
421,065,568✔
1159
                  return true;
1160
               }
1161

1162
               bits += block_bits;  // avoid unsigned integer underflow in the following loop
244,628✔
1163
               while(bits > block_bits * 2 - 8) {
3,661,545✔
1164
                  bits -= block_bits;
3,416,918✔
1165
                  if constexpr(is_predicate) {
1166
                     if(!apply(fn, bits, ops.template load_next<BlockT>()...)) {
21✔
1167
                        return false;
1168
                     }
1169
                  } else if constexpr(is_manipulator) {
1170
                     detail::first(ops...).store_next(apply(fn, bits, ops.template load_next<BlockT>()...));
3,410,694✔
1171
                  } else {
1172
                     apply(fn, bits, ops.template load_next<BlockT>()...);
44,900✔
1173
                  }
1174
               }
1175
               return true;
1176
            }
1177

1178
         private:
1179
            template <std::unsigned_integral... BlockTs>
1180
               requires(all_same_v<BlockTs...>)
1181
            constexpr static auto apply(FnT fn, size_type bits, BlockTs... blocks) {
2,147,483,647✔
1182
               if constexpr(needs_mask) {
1183
                  return fn(blocks..., make_mask<detail::first_t<BlockTs...>>(bits));
3✔
1184
               } else {
1185
                  return fn(blocks...);
2,147,483,647✔
1186
               }
1187
            }
1188
      };
1189

1190
      /**
1191
       * Helper function of `full_range_operation` and `range_operation` that
1192
       * calls @p fn on a given aligned unsigned integer block as long as the
1193
       * underlying bit range contains enough bits to fill the block fully.
1194
       *
1195
       * This uses bare memory access to gain a speed up for aligned data.
1196
       */
1197
      template <std::unsigned_integral BlockT, typename FnT, typename... BitRangeOperatorTs>
1198
         requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1199
                  sizeof...(BitRangeOperatorTs) > 0)
1200
      static bool _process_in_fully_aligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
1,262,100,943✔
1201
         constexpr size_type block_bytes = sizeof(BlockT);
1,262,100,943✔
1202
         constexpr size_type block_bits = block_bytes * 8;
1,262,100,943✔
1203
         const size_type blocks = detail::first(ops...).bits_to_read() / block_bits;
1,262,100,943✔
1204

1205
         using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1206
         const auto result = callback_trait::apply_on_full_blocks(fn, ops.template span<BlockT>(blocks)...);
2,147,483,647✔
1207
         (ops.advance(block_bytes * blocks), ...);
1,262,100,943✔
1208
         return result;
1,262,100,943✔
1209
      }
1210

1211
      /**
1212
       * Helper function of `full_range_operation` and `range_operation` that
1213
       * calls @p fn on a given unsigned integer block size as long as the
1214
       * underlying bit range contains enough bits to fill the block.
1215
       */
1216
      template <std::unsigned_integral BlockT, typename FnT, typename... BitRangeOperatorTs>
1217
         requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...>)
1218
      static bool _process_in_unaligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
421,065,568✔
1219
         using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1220
         return callback_trait::template apply_on_unaligned_blocks<BlockT>(fn, ops...);
420,974,253✔
1221
      }
1222

1223
      /**
1224
       * Apply @p fn to all bits in the ranges defined by @p ops. If more than
1225
       * one range operator is passed to @p ops, @p fn receives corresponding
1226
       * blocks of bits from each operator. Therefore, all @p ops have to define
1227
       * the exact same length of their underlying ranges.
1228
       *
1229
       * @p fn may return a bit block that will be stored into the _first_ bit
1230
       * range passed into @p ops. If @p fn returns a boolean, and its value is
1231
       * `false`, the range operation is cancelled and `false` is returned.
1232
       *
1233
       * The implementation ensures to pull bits in the largest bit blocks
1234
       * possible and reverts to smaller bit blocks only when needed.
1235
       */
1236
      template <typename FnT, typename... BitRangeOperatorTs>
1237
         requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1238
                  sizeof...(BitRangeOperatorTs) > 0)
1239
      static bool range_operation(FnT fn, BitRangeOperatorTs... ops) {
420,791,637✔
1240
         BOTAN_ASSERT(has_equal_lengths(ops...), "all BitRangeOperators have the same length");
162,070✔
1241

1242
         if constexpr((BitRangeOperatorTs::is_byte_aligned() && ...)) {
1243
            // Note: At the moment we can assume that this will always be used
1244
            //       on the _entire_ bitvector. Therefore, we can safely assume
1245
            //       that the bitvectors' underlying buffers are properly aligned.
1246
            //       If this assumption changes, we need to add further handling
1247
            //       to process a byte padding at the beginning of the bitvector
1248
            //       until a memory alignment boundary is reached.
1249
            const bool alignment = (ops.template is_memory_aligned_to<uint64_t>() && ...);
841,329,889✔
1250
            BOTAN_ASSERT_NOMSG(alignment);
×
1251

1252
            return _process_in_fully_aligned_blocks_of<uint64_t>(fn, ops...) &&
841,400,634✔
1253
                   _process_in_fully_aligned_blocks_of<uint32_t>(fn, ops...) &&
841,400,621✔
1254
                   _process_in_fully_aligned_blocks_of<uint16_t>(fn, ops...) &&
841,400,631✔
1255
                   _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
420,700,308✔
1256
         } else {
1257
            return _process_in_unaligned_blocks_of<uint64_t>(fn, ops...) &&
182,630✔
1258
                   _process_in_unaligned_blocks_of<uint32_t>(fn, ops...) &&
91,315✔
1259
                   _process_in_unaligned_blocks_of<uint16_t>(fn, ops...) &&
182,630✔
1260
                   _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
91,315✔
1261
         }
1262
      }
1263

1264
      /**
1265
       * Apply @p fn to all bit blocks in the bitvector(s).
1266
       */
1267
      template <typename FnT, typename... BitvectorTs>
1268
         requires(detail::blockwise_processing_callback<FnT, BitvectorTs...> &&
1269
                  (is_bitvector_v<std::remove_cvref_t<BitvectorTs>> && ... && true))
1270
      static bool full_range_operation(FnT&& fn, BitvectorTs&... bitvecs) {
420,700,322✔
1271
         BOTAN_ASSERT(has_equal_lengths(bitvecs...), "all bitvectors have the same length");
420,700,322✔
1272
         return range_operation(std::forward<FnT>(fn), BitRangeOperator<BitvectorTs>(bitvecs)...);
420,700,322✔
1273
      }
1274

1275
      template <typename SomeT, typename... SomeTs>
1276
      static bool has_equal_lengths(const SomeT& v, const SomeTs&... vs) {
841,297,828✔
1277
         return ((v.size() == vs.size()) && ... && true);
841,297,828✔
1278
      }
1279

1280
      template <std::unsigned_integral T>
1281
      static constexpr T make_mask(size_type bits) {
3✔
1282
         const bool max = bits >= sizeof(T) * 8;
3✔
1283
         bits &= T(max - 1);
3✔
1284
         return (T(!max) << bits) - 1;
3✔
1285
      }
1286

1287
      auto as_byte_span() { return std::span{m_blocks.data(), m_blocks.size() * sizeof(block_type)}; }
1,689,262,282✔
1288

1289
      auto as_byte_span() const { return std::span{m_blocks.data(), m_blocks.size() * sizeof(block_type)}; }
1,686,191,827✔
1290

1291
   private:
1292
      size_type m_bits;
1293
      std::vector<block_type, allocator_type> m_blocks;
1294
};
1295

1296
using secure_bitvector = bitvector_base<secure_allocator>;
1297
using bitvector = bitvector_base<std::allocator>;
1298

1299
namespace detail {
1300

1301
/**
1302
 * If one of the allocators is a Botan::secure_allocator, this will always
1303
 * prefer it. Otherwise, the allocator of @p lhs will be used as a default.
1304
 */
1305
template <bitvectorish T1, bitvectorish T2>
1306
constexpr auto copy_lhs_allocator_aware(const T1& lhs, const T2&) {
60✔
1307
   constexpr bool needs_secure_allocator =
60✔
1308
      strong_type_wrapped_type<T1>::uses_secure_allocator || strong_type_wrapped_type<T2>::uses_secure_allocator;
1309

1310
   if constexpr(needs_secure_allocator) {
1311
      return lhs.template as<secure_bitvector>();
56✔
1312
   } else {
1313
      return lhs.template as<bitvector>();
5✔
1314
   }
1315
}
1316

1317
}  // namespace detail
1318

1319
template <bitvectorish T1, bitvectorish T2>
1320
auto operator|(const T1& lhs, const T2& rhs) {
4✔
1321
   auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
4✔
1322
   res |= rhs;
4✔
1323
   return res;
4✔
1324
}
×
1325

1326
template <bitvectorish T1, bitvectorish T2>
1327
auto operator&(const T1& lhs, const T2& rhs) {
3✔
1328
   auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
3✔
1329
   res &= rhs;
3✔
1330
   return res;
3✔
1331
}
×
1332

1333
template <bitvectorish T1, bitvectorish T2>
1334
auto operator^(const T1& lhs, const T2& rhs) {
53✔
1335
   auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
53✔
1336
   res ^= rhs;
53✔
1337
   return res;
53✔
1338
}
×
1339

1340
template <bitvectorish T1, bitvectorish T2>
1341
bool operator==(const T1& lhs, const T2& rhs) {
6✔
1342
   return lhs.equals_vartime(rhs);
6✔
1343
}
1344

1345
template <bitvectorish T1, bitvectorish T2>
1346
bool operator!=(const T1& lhs, const T2& rhs) {
1347
   return lhs.equals_vartime(rhs);
1348
}
1349

1350
namespace detail {
1351

1352
/**
1353
 * A Strong<> adapter for arbitrarily large bitvectors
1354
 */
1355
template <concepts::container T>
1356
   requires is_bitvector_v<T>
1357
class Strong_Adapter<T> : public Container_Strong_Adapter_Base<T> {
564,295✔
1358
   public:
1359
      using size_type = typename T::size_type;
1360

1361
   public:
1362
      using Container_Strong_Adapter_Base<T>::Container_Strong_Adapter_Base;
511✔
1363

1364
      auto at(size_type i) const { return this->get().at(i); }
51,200✔
1365

1366
      auto at(size_type i) { return this->get().at(i); }
416,687,886✔
1367

1368
      auto set(size_type i) { return this->get().set(i); }
1✔
1369

1370
      auto unset(size_type i) { return this->get().unset(i); }
1✔
1371

1372
      auto flip(size_type i) { return this->get().flip(i); }
1✔
1373

1374
      auto flip() { return this->get().flip(); }
1✔
1375

1376
      template <typename OutT>
1377
      auto as() const {
53✔
1378
         return this->get().template as<OutT>();
53✔
1379
      }
1380

1381
      template <bitvectorish OutT = T>
1382
      auto subvector(size_type pos, std::optional<size_type> length = std::nullopt) const {
119,028✔
1383
         return this->get().template subvector<OutT>(pos, length);
118,980✔
1384
      }
1385

1386
      template <typename OutT>
1387
         requires(std::unsigned_integral<strong_type_wrapped_type<OutT>> &&
1388
                  !std::same_as<bool, strong_type_wrapped_type<OutT>>)
1389
      auto subvector(size_type pos) const {
65,724✔
1390
         return this->get().template subvector<OutT>(pos);
65,724✔
1391
      }
1392

1393
      template <typename InT>
1394
         requires(std::unsigned_integral<strong_type_wrapped_type<InT>> && !std::same_as<bool, InT>)
1395
      void subvector_replace(size_type pos, InT value) {
65,723✔
1396
         return this->get().subvector_replace(pos, value);
65,723✔
1397
      }
1398

1399
      template <bitvectorish OtherT>
1400
      auto equals(const OtherT& other) const {
45✔
1401
         return this->get().equals(other);
45✔
1402
      }
1403

1404
      auto push_back(bool b) { return this->get().push_back(b); }
327,521✔
1405

1406
      auto pop_back() { return this->get().pop_back(); }
2✔
1407

1408
      auto front() const { return this->get().front(); }
1409

1410
      auto front() { return this->get().front(); }
1✔
1411

1412
      auto back() const { return this->get().back(); }
1413

1414
      auto back() { return this->get().back(); }
1✔
1415

1416
      auto any_vartime() const { return this->get().any_vartime(); }
1✔
1417

1418
      auto all_vartime() const { return this->get().all_vartime(); }
1✔
1419

1420
      auto none_vartime() const { return this->get().none_vartime(); }
1✔
1421

1422
      auto has_odd_hamming_weight() const { return this->get().has_odd_hamming_weight(); }
1✔
1423

1424
      auto hamming_weight() const { return this->get().hamming_weight(); }
53✔
1425

1426
      auto from_bytes(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
1427
         return this->get().from_bytes(bytes, bits);
1428
      }
1429

1430
      template <typename OutT = T>
1431
      auto to_bytes() const {
1432
         return this->get().template to_bytes<OutT>();
1433
      }
1434

1435
      auto to_bytes(std::span<uint8_t> out) const { return this->get().to_bytes(out); }
48✔
1436

1437
      auto to_string() const { return this->get().to_string(); }
1✔
1438

1439
      auto capacity() const { return this->get().capacity(); }
1440

1441
      auto reserve(size_type n) { return this->get().reserve(n); }
1442

1443
      constexpr void _const_time_poison() const { this->get()._const_time_poison(); }
51✔
1444

1445
      constexpr void _const_time_unpoison() const { this->get()._const_time_unpoison(); }
86✔
1446
};
1447

1448
}  // namespace detail
1449

1450
}  // namespace Botan
1451

1452
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc