• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

randombit / botan / 21768358452

06 Feb 2026 10:35PM UTC coverage: 90.064% (-0.003%) from 90.067%
21768358452

Pull #5289

github

web-flow
Merge f589db195 into 8ea0ca252
Pull Request #5289: Further misc header reductions, forward declarations, etc

102238 of 113517 relevant lines covered (90.06%)

11357432.36 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.45
/src/lib/utils/alignment_buffer.h
1
/*
2
 * Alignment buffer helper
3
 * (C) 2023 Jack Lloyd
4
 *     2023 René Meusel - Rohde & Schwarz Cybersecurity
5
 *
6
 * Botan is released under the Simplified BSD License (see license.txt)
7
 */
8

9
#ifndef BOTAN_ALIGNMENT_BUFFER_H_
10
#define BOTAN_ALIGNMENT_BUFFER_H_
11

12
#include <botan/internal/buffer_slicer.h>
13
#include <botan/internal/mem_utils.h>
14
#include <array>
15
#include <optional>
16
#include <span>
17

18
namespace Botan {
19

20
/**
21
 * Defines the strategy for handling the final block of input data in the
22
 * handle_unaligned_data() method of the AlignmentBuffer<>.
23
 *
24
 * - is_not_special:   the final block is treated like any other block
25
 * - must_be_deferred: the final block is not emitted while bulk processing (typically add_data())
26
 *                     but is deferred until manually consumed (typically final_result())
27
 *
28
 * The AlignmentBuffer<> assumes data to be "the final block" if no further
29
 * input data is available in the BufferSlicer<>. This might result in some
30
 * performance overhead when using the must_be_deferred strategy.
31
 */
32
enum class AlignmentBufferFinalBlock : uint8_t {
33
   is_not_special = 0,
34
   must_be_deferred = 1,
35
};
36

37
/**
38
 * @brief Alignment buffer helper
39
 *
40
 * Many algorithms have an intrinsic block size in which they consume input
41
 * data. When streaming arbitrary data chunks to such algorithms we must store
42
 * some data intermittently to honor the algorithm's alignment requirements.
43
 *
44
 * This helper encapsulates such an alignment buffer. The API of this class is
45
 * designed to minimize user errors in the algorithm implementations. Therefore,
46
 * it is strongly opinionated on its use case. Don't try to use it for anything
47
 * but the described circumstance.
48
 *
49
 * @tparam T                     the element type of the internal buffer
50
 * @tparam BLOCK_SIZE            the buffer size to use for the alignment buffer
51
 * @tparam FINAL_BLOCK_STRATEGY  defines whether the final input data block is
52
 *                               retained in handle_unaligned_data() and must be
53
 *                               manually consumed
54
 */
55
template <typename T,
56
          size_t BLOCK_SIZE,
57
          AlignmentBufferFinalBlock FINAL_BLOCK_STRATEGY = AlignmentBufferFinalBlock::is_not_special>
58
   requires(BLOCK_SIZE > 0)
59
class AlignmentBuffer {
60
   public:
61
      AlignmentBuffer() = default;
588,493✔
62

63
      ~AlignmentBuffer() { secure_zeroize_buffer(m_buffer.data(), sizeof(T) * m_buffer.size()); }
1,036✔
64

65
      AlignmentBuffer(const AlignmentBuffer& other) = default;
66
      AlignmentBuffer(AlignmentBuffer&& other) noexcept = default;
67
      AlignmentBuffer& operator=(const AlignmentBuffer& other) = default;
68
      AlignmentBuffer& operator=(AlignmentBuffer&& other) noexcept = default;
69

70
      void clear() {
238,508,462✔
71
         zeroize_buffer(m_buffer.data(), m_buffer.size());
238,508,462✔
72
         m_position = 0;
924,228✔
73
      }
74

75
      /**
76
       * Fills the currently unused bytes of the buffer with zero bytes
77
       */
78
      void fill_up_with_zeros() {
248,218,338✔
79
         if(!ready_to_consume()) {
248,218,338✔
80
            zeroize_buffer(&m_buffer[m_position], elements_until_alignment());
248,215,946✔
81
            m_position = m_buffer.size();
248,215,946✔
82
         }
83
      }
84

85
      /**
86
       * Appends the provided @p elements to the buffer. The user has to make
87
       * sure that @p elements fits in the remaining capacity of the buffer.
88
       */
89
      void append(std::span<const T> elements) {
960,111,677✔
90
         BOTAN_ASSERT_NOMSG(elements.size() <= elements_until_alignment());
×
91
         std::copy(elements.begin(), elements.end(), m_buffer.begin() + m_position);
960,111,677✔
92
         m_position += elements.size();
960,111,677✔
93
      }
960,111,677✔
94

95
      /**
96
       * Allows direct modification of the first @p elements in the buffer.
97
       * This is a low-level accessor that neither takes the buffer's current
98
       * capacity into account nor does it change the internal cursor.
99
       * Beware not to overwrite unconsumed bytes.
100
       */
101
      std::span<T> directly_modify_first(size_t elements) {
2,100✔
102
         BOTAN_ASSERT_NOMSG(size() >= elements);
2,100✔
103
         return std::span(m_buffer).first(elements);
2,100✔
104
      }
105

106
      /**
107
       * Allows direct modification of the last @p elements in the buffer.
108
       * This is a low-level accessor that neither takes the buffer's current
109
       * capacity into account nor does it change the internal cursor.
110
       * Beware not to overwrite unconsumed bytes.
111
       */
112
      std::span<T> directly_modify_last(size_t elements) {
237,230,955✔
113
         BOTAN_ASSERT_NOMSG(size() >= elements);
237,230,955✔
114
         return std::span(m_buffer).last(elements);
237,230,955✔
115
      }
116

117
      /**
118
       * Once the buffer reached alignment, this can be used to consume as many
119
       * input bytes from the given @p slider as possible. The output always
120
       * contains data elements that are a multiple of the intrinsic block size.
121
       *
122
       * @returns a view onto the aligned data from @p slicer and the number of
123
       *          full blocks that are represented by this view.
124
       */
125
      [[nodiscard]] std::tuple<std::span<const uint8_t>, size_t> aligned_data_to_process(BufferSlicer& slicer) const {
222,674,624✔
126
         BOTAN_ASSERT_NOMSG(in_alignment());
×
127

128
         // When the final block is to be deferred, the last block must not be
129
         // selected for processing if there is no (unaligned) extra input data.
130
         const size_t defer = (defers_final_block()) ? 1 : 0;
222,674,624✔
131
         const size_t full_blocks_to_process = (slicer.remaining() - defer) / m_buffer.size();
222,674,624✔
132
         return {slicer.take(full_blocks_to_process * m_buffer.size()), full_blocks_to_process};
222,674,624✔
133
      }
134

135
      /**
136
       * Once the buffer reached alignment, this can be used to consume full
137
       * blocks from the input data represented by @p slicer.
138
       *
139
       * @returns a view onto the next full block from @p slicer or std::nullopt
140
       *          if not enough data is available in @p slicer.
141
       */
142
      [[nodiscard]] std::optional<std::span<const uint8_t>> next_aligned_block_to_process(BufferSlicer& slicer) const {
6,276✔
143
         BOTAN_ASSERT_NOMSG(in_alignment());
×
144

145
         // When the final block is to be deferred, the last block must not be
146
         // selected for processing if there is no (unaligned) extra input data.
147
         const size_t defer = (defers_final_block()) ? 1 : 0;
6,276✔
148
         if(slicer.remaining() < m_buffer.size() + defer) {
6,276✔
149
            return std::nullopt;
2,751✔
150
         }
151

152
         return slicer.take(m_buffer.size());
3,525✔
153
      }
154

155
      /**
156
       * Intermittently buffers potentially unaligned data provided in @p
157
       * slicer. If the internal buffer already contains some elements, data is
158
       * appended. Once a full block is collected, it is returned to the caller
159
       * for processing.
160
       *
161
       * @param slicer the input data source to be (partially) consumed
162
       * @returns a view onto a full block once enough data was collected, or
163
       *          std::nullopt if no full block is available yet
164
       */
165
      [[nodiscard]] std::optional<std::span<const T>> handle_unaligned_data(BufferSlicer& slicer) {
830,952,847✔
166
         // When the final block is to be deferred, we would need to store and
167
         // hold a buffer that contains exactly one block until more data is
168
         // passed or it is explicitly consumed.
169
         const size_t defer = (defers_final_block()) ? 1 : 0;
830,952,847✔
170

171
         if(in_alignment() && slicer.remaining() >= m_buffer.size() + defer) {
457,054,279✔
172
            // We are currently in alignment and the passed-in data source
173
            // contains enough data to benefit from aligned processing.
174
            // Therefore, we don't copy anything into the intermittent buffer.
175
            return std::nullopt;
108,079,650✔
176
         }
177

178
         // Fill the buffer with as much input data as needed to reach alignment
179
         // or until the input source is depleted.
180
         const auto elements_to_consume = std::min(m_buffer.size() - m_position, slicer.remaining());
722,873,197✔
181
         append(slicer.take(elements_to_consume));
722,873,197✔
182

183
         // If we collected enough data, we push out one full block. When
184
         // deferring the final block is enabled, we additionally check that
185
         // more input data is available to continue processing a consecutive
186
         // block.
187
         if(ready_to_consume() && (!defers_final_block() || !slicer.empty())) {
722,452,591✔
188
            return consume();
114,597,721✔
189
         } else {
190
            return std::nullopt;
608,275,476✔
191
         }
192
      }
193

194
      /**
195
       * Explicitly consume the currently collected block. It is the caller's
196
       * responsibility to ensure that the buffer is filled fully. After
197
       * consumption, the buffer is cleared and ready to collect new data.
198
       */
199
      [[nodiscard]] std::span<const T> consume() {
362,816,060✔
200
         BOTAN_ASSERT_NOMSG(ready_to_consume());
×
201
         m_position = 0;
362,816,060✔
202
         return m_buffer;
362,816,060✔
203
      }
204

205
      /**
206
       * Explicitly consumes however many bytes are currently stored in the
207
       * buffer. After consumption, the buffer is cleared and ready to collect
208
       * new data.
209
       */
210
      [[nodiscard]] std::span<const T> consume_partial() {
3✔
211
         const auto elements = elements_in_buffer();
3✔
212
         m_position = 0;
3✔
213
         return std::span(m_buffer).first(elements);
3✔
214
      }
215

216
      constexpr size_t size() const { return m_buffer.size(); }
237,235,217✔
217

218
      size_t elements_in_buffer() const { return m_position; }
230,451✔
219

220
      size_t elements_until_alignment() const { return m_buffer.size() - m_position; }
1,219,063,844✔
221

222
      /**
223
       * @returns true if the buffer is empty (i.e. contains no unaligned data)
224
       */
225
      bool in_alignment() const { return m_position == 0; }
1,884,735,409✔
226

227
      /**
228
       * @returns true if the buffer is full (i.e. a block is ready to be consumed)
229
       */
230
      bool ready_to_consume() const { return m_position == m_buffer.size(); }
1,571,138,572✔
231

232
      constexpr bool defers_final_block() const {
233
         return FINAL_BLOCK_STRATEGY == AlignmentBufferFinalBlock::must_be_deferred;
234
      }
235

236
   private:
237
      std::array<T, BLOCK_SIZE> m_buffer = {};
238
      size_t m_position = 0;
239
};
240

241
}  // namespace Botan
242

243
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc