• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

randombit / botan / 19596147897

22 Nov 2025 01:25PM UTC coverage: 90.629% (+0.002%) from 90.627%
19596147897

Pull #5168

github

web-flow
Merge 9ea34d5a5 into f8eb34002
Pull Request #5168: Add clang-format 17 formatting rules for attributes and inline assembly

100659 of 111067 relevant lines covered (90.63%)

12696690.87 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.45
/src/lib/utils/alignment_buffer.h
1
/*
2
 * Alignment buffer helper
3
 * (C) 2023 Jack Lloyd
4
 *     2023 René Meusel - Rohde & Schwarz Cybersecurity
5
 *
6
 * Botan is released under the Simplified BSD License (see license.txt)
7
 */
8

9
#ifndef BOTAN_ALIGNMENT_BUFFER_H_
10
#define BOTAN_ALIGNMENT_BUFFER_H_
11

12
#include <botan/concepts.h>
13
#include <botan/mem_ops.h>
14
#include <botan/internal/stl_util.h>
15

16
#include <array>
17
#include <optional>
18
#include <span>
19

20
namespace Botan {
21

22
/**
23
 * Defines the strategy for handling the final block of input data in the
24
 * handle_unaligned_data() method of the AlignmentBuffer<>.
25
 *
26
 * - is_not_special:   the final block is treated like any other block
27
 * - must_be_deferred: the final block is not emitted while bulk processing (typically add_data())
28
 *                     but is deferred until manually consumed (typically final_result())
29
 *
30
 * The AlignmentBuffer<> assumes data to be "the final block" if no further
31
 * input data is available in the BufferSlicer<>. This might result in some
32
 * performance overhead when using the must_be_deferred strategy.
33
 */
34
enum class AlignmentBufferFinalBlock : uint8_t {
35
   is_not_special = 0,
36
   must_be_deferred = 1,
37
};
38

39
/**
40
 * @brief Alignment buffer helper
41
 *
42
 * Many algorithms have an intrinsic block size in which they consume input
43
 * data. When streaming arbitrary data chunks to such algorithms we must store
44
 * some data intermittently to honor the algorithm's alignment requirements.
45
 *
46
 * This helper encapsulates such an alignment buffer. The API of this class is
47
 * designed to minimize user errors in the algorithm implementations. Therefore,
48
 * it is strongly opinionated on its use case. Don't try to use it for anything
49
 * but the described circumstance.
50
 *
51
 * @tparam T                     the element type of the internal buffer
52
 * @tparam BLOCK_SIZE            the buffer size to use for the alignment buffer
53
 * @tparam FINAL_BLOCK_STRATEGY  defines whether the final input data block is
54
 *                               retained in handle_unaligned_data() and must be
55
 *                               manually consumed
56
 */
57
template <typename T,
58
          size_t BLOCK_SIZE,
59
          AlignmentBufferFinalBlock FINAL_BLOCK_STRATEGY = AlignmentBufferFinalBlock::is_not_special>
60
   requires(BLOCK_SIZE > 0)
61
class AlignmentBuffer {
62
   public:
63
      AlignmentBuffer() = default;
578,061✔
64

65
      ~AlignmentBuffer() { secure_scrub_memory(m_buffer.data(), m_buffer.size()); }
1,036✔
66

67
      AlignmentBuffer(const AlignmentBuffer& other) = default;
68
      AlignmentBuffer(AlignmentBuffer&& other) noexcept = default;
69
      AlignmentBuffer& operator=(const AlignmentBuffer& other) = default;
70
      AlignmentBuffer& operator=(AlignmentBuffer&& other) noexcept = default;
71

72
      void clear() {
449,208✔
73
         clear_mem(m_buffer.data(), m_buffer.size());
312,134,528✔
74
         m_position = 0;
895,657✔
75
      }
76

77
      /**
78
       * Fills the currently unused bytes of the buffer with zero bytes
79
       */
80
      void fill_up_with_zeros() {
321,862,401✔
81
         if(!ready_to_consume()) {
321,864,501✔
82
            clear_mem(&m_buffer[m_position], elements_until_alignment());
643,724,724✔
83
            m_position = m_buffer.size();
321,862,362✔
84
         }
85
      }
86

87
      /**
88
       * Appends the provided @p elements to the buffer. The user has to make
89
       * sure that @p elements fits in the remaining capacity of the buffer.
90
       */
91
      void append(std::span<const T> elements) {
1,299,005,602✔
92
         BOTAN_ASSERT_NOMSG(elements.size() <= elements_until_alignment());
×
93
         std::copy(elements.begin(), elements.end(), m_buffer.begin() + m_position);
1,299,005,602✔
94
         m_position += elements.size();
1,299,005,602✔
95
      }
1,299,005,602✔
96

97
      /**
98
       * Allows direct modification of the first @p elements in the buffer.
99
       * This is a low-level accessor that neither takes the buffer's current
100
       * capacity into account nor does it change the internal cursor.
101
       * Beware not to overwrite unconsumed bytes.
102
       */
103
      std::span<T> directly_modify_first(size_t elements) {
2,100✔
104
         BOTAN_ASSERT_NOMSG(size() >= elements);
2,100✔
105
         return std::span(m_buffer).first(elements);
2,100✔
106
      }
107

108
      /**
109
       * Allows direct modification of the last @p elements in the buffer.
110
       * This is a low-level accessor that neither takes the buffer's current
111
       * capacity into account nor does it change the internal cursor.
112
       * Beware not to overwrite unconsumed bytes.
113
       */
114
      std::span<T> directly_modify_last(size_t elements) {
310,877,087✔
115
         BOTAN_ASSERT_NOMSG(size() >= elements);
310,877,087✔
116
         return std::span(m_buffer).last(elements);
310,877,087✔
117
      }
118

119
      /**
120
       * Once the buffer reached alignment, this can be used to consume as many
121
       * input bytes from the given @p slider as possible. The output always
122
       * contains data elements that are a multiple of the intrinsic block size.
123
       *
124
       * @returns a view onto the aligned data from @p slicer and the number of
125
       *          full blocks that are represented by this view.
126
       */
127
      [[nodiscard]]
128
      std::tuple<std::span<const uint8_t>, size_t> aligned_data_to_process(BufferSlicer& slicer) const {
297,631,251✔
129
         BOTAN_ASSERT_NOMSG(in_alignment());
×
130

131
         // When the final block is to be deferred, the last block must not be
132
         // selected for processing if there is no (unaligned) extra input data.
133
         const size_t defer = (defers_final_block()) ? 1 : 0;
297,631,251✔
134
         const size_t full_blocks_to_process = (slicer.remaining() - defer) / m_buffer.size();
297,631,251✔
135
         return {slicer.take(full_blocks_to_process * m_buffer.size()), full_blocks_to_process};
297,631,251✔
136
      }
137

138
      /**
139
       * Once the buffer reached alignment, this can be used to consume full
140
       * blocks from the input data represented by @p slicer.
141
       *
142
       * @returns a view onto the next full block from @p slicer or std::nullopt
143
       *          if not enough data is available in @p slicer.
144
       */
145
      [[nodiscard]]
146
      std::optional<std::span<const uint8_t>> next_aligned_block_to_process(BufferSlicer& slicer) const {
6,276✔
147
         BOTAN_ASSERT_NOMSG(in_alignment());
×
148

149
         // When the final block is to be deferred, the last block must not be
150
         // selected for processing if there is no (unaligned) extra input data.
151
         const size_t defer = (defers_final_block()) ? 1 : 0;
6,276✔
152
         if(slicer.remaining() < m_buffer.size() + defer) {
6,276✔
153
            return std::nullopt;
2,750✔
154
         }
155

156
         return slicer.take(m_buffer.size());
3,526✔
157
      }
158

159
      /**
160
       * Intermittently buffers potentially unaligned data provided in @p
161
       * slicer. If the internal buffer already contains some elements, data is
162
       * appended. Once a full block is collected, it is returned to the caller
163
       * for processing.
164
       *
165
       * @param slicer the input data source to be (partially) consumed
166
       * @returns a view onto a full block once enough data was collected, or
167
       *          std::nullopt if no full block is available yet
168
       */
169
      [[nodiscard]]
170
      std::optional<std::span<const T>> handle_unaligned_data(BufferSlicer& slicer) {
1,110,680,836✔
171
         // When the final block is to be deferred, we would need to store and
172
         // hold a buffer that contains exactly one block until more data is
173
         // passed or it is explicitly consumed.
174
         const size_t defer = (defers_final_block()) ? 1 : 0;
1,110,680,836✔
175

176
         if(in_alignment() && slicer.remaining() >= m_buffer.size() + defer) {
604,302,722✔
177
            // We are currently in alignment and the passed-in data source
178
            // contains enough data to benefit from aligned processing.
179
            // Therefore, we don't copy anything into the intermittent buffer.
180
            return std::nullopt;
122,559,445✔
181
         }
182

183
         // Fill the buffer with as much input data as needed to reach alignment
184
         // or until the input source is depleted.
185
         const auto elements_to_consume = std::min(m_buffer.size() - m_position, slicer.remaining());
988,121,391✔
186
         append(slicer.take(elements_to_consume));
988,121,391✔
187

188
         // If we collected enough data, we push out one full block. When
189
         // deferring the final block is enabled, we additionally check that
190
         // more input data is available to continue processing a consecutive
191
         // block.
192
         if(ready_to_consume() && (!defers_final_block() || !slicer.empty())) {
987,697,115✔
193
            return consume();
175,074,552✔
194
         } else {
195
            return std::nullopt;
813,046,839✔
196
         }
197
      }
198

199
      /**
200
       * Explicitly consume the currently collected block. It is the caller's
201
       * responsibility to ensure that the buffer is filled fully. After
202
       * consumption, the buffer is cleared and ready to collect new data.
203
       */
204
      [[nodiscard]]
205
      std::span<const T> consume() {
496,939,054✔
206
         BOTAN_ASSERT_NOMSG(ready_to_consume());
×
207
         m_position = 0;
496,939,054✔
208
         return m_buffer;
496,939,054✔
209
      }
210

211
      /**
212
       * Explicitly consumes however many bytes are currently stored in the
213
       * buffer. After consumption, the buffer is cleared and ready to collect
214
       * new data.
215
       */
216
      [[nodiscard]]
217
      std::span<const T> consume_partial() {
3✔
218
         const auto elements = elements_in_buffer();
3✔
219
         m_position = 0;
3✔
220
         return std::span(m_buffer).first(elements);
3✔
221
      }
222

223
      constexpr size_t size() const { return m_buffer.size(); }
310,881,349✔
224

225
      size_t elements_in_buffer() const { return m_position; }
232,335✔
226

227
      size_t elements_until_alignment() const { return m_buffer.size() - m_position; }
1,631,606,624✔
228

229
      /**
230
       * @returns true if the buffer is empty (i.e. contains no unaligned data)
231
       */
232
      bool in_alignment() const { return m_position == 0; }
2,147,483,647✔
233

234
      /**
235
       * @returns true if the buffer is full (i.e. a block is ready to be consumed)
236
       */
237
      bool ready_to_consume() const { return m_position == m_buffer.size(); }
2,117,802,055✔
238

239
      constexpr bool defers_final_block() const {
240
         return FINAL_BLOCK_STRATEGY == AlignmentBufferFinalBlock::must_be_deferred;
241
      }
242

243
   private:
244
      std::array<T, BLOCK_SIZE> m_buffer = {};
245
      size_t m_position = 0;
246
};
247

248
}  // namespace Botan
249

250
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc