• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

randombit / botan / 6441040938

07 Oct 2023 12:08PM UTC coverage: 91.692% (-0.003%) from 91.695%
6441040938

push

github

web-flow
Merge pull request #3736 from randombit/fix/ub_in_alignment_buffer

79964 of 87209 relevant lines covered (91.69%)

8498879.53 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.31
/src/lib/utils/alignment_buffer.h
1
/*
2
 * Alignment buffer helper
3
 * (C) 2023 Jack Lloyd
4
 *     2023 René Meusel - Rohde & Schwarz Cybersecurity
5
 *
6
 * Botan is released under the Simplified BSD License (see license.txt)
7
 */
8

9
#ifndef BOTAN_ALIGNMENT_BUFFER_H_
10
#define BOTAN_ALIGNMENT_BUFFER_H_
11

12
#include <botan/concepts.h>
13
#include <botan/internal/stl_util.h>
14

15
#include <array>
16
#include <optional>
17
#include <span>
18

19
namespace Botan {
20

21
/**
22
 * Defines the strategy for handling the final block of input data in the
23
 * handle_unaligned_data() method of the AlignmentBuffer<>.
24
 *
25
 * - is_not_special:   the final block is treated like any other block
26
 * - must_be_deferred: the final block is not emitted while bulk processing (typically add_data())
27
 *                     but is deferred until manually consumed (typically final_result())
28
 *
29
 * The AlignmentBuffer<> assumes data to be "the final block" if no further
30
 * input data is available in the BufferSlicer<>. This might result in some
31
 * performance overhead when using the must_be_deferred strategy.
32
 */
33
enum class AlignmentBufferFinalBlock : size_t {
34
   is_not_special = 0,
35
   must_be_deferred = 1,
36
};
37

38
/**
39
 * @brief Alignment buffer helper
40
 *
41
 * Many algorithms have an intrinsic block size in which they consume input
42
 * data. When streaming arbitrary data chunks to such algorithms we must store
43
 * some data intermittently to honor the algorithm's alignment requirements.
44
 *
45
 * This helper encapsulates such an alignment buffer. The API of this class is
46
 * designed to minimize user errors in the algorithm implementations. Therefore,
47
 * it is strongly opinionated on its use case. Don't try to use it for anything
48
 * but the described circumstance.
49
 *
50
 * @tparam T                     the element type of the internal buffer
51
 * @tparam BLOCK_SIZE            the buffer size to use for the alignment buffer
52
 * @tparam FINAL_BLOCK_STRATEGY  defines whether the final input data block is
53
 *                               retained in handle_unaligned_data() and must be
54
 *                               manually consumed
55
 */
56
template <typename T,
57
          size_t BLOCK_SIZE,
58
          AlignmentBufferFinalBlock FINAL_BLOCK_STRATEGY = AlignmentBufferFinalBlock::is_not_special>
59
   requires(BLOCK_SIZE > 0)
60
class AlignmentBuffer {
61
   public:
62
      AlignmentBuffer() : m_position(0) {}
409,984✔
63

64
      ~AlignmentBuffer() { secure_scrub_memory(m_buffer.data(), m_buffer.size()); }
196✔
65

66
      AlignmentBuffer(const AlignmentBuffer& other) = default;
67
      AlignmentBuffer(AlignmentBuffer&& other) noexcept = default;
68
      AlignmentBuffer& operator=(const AlignmentBuffer& other) = default;
69
      AlignmentBuffer& operator=(AlignmentBuffer&& other) noexcept = default;
70

71
      void clear() {
199,367,301✔
72
         clear_mem(m_buffer.data(), m_buffer.size());
199,367,301✔
73
         m_position = 0;
199,367,301✔
74
      }
75

76
      /**
77
       * Fills the currently unused bytes of the buffer with zero bytes
78
       */
79
      void fill_up_with_zeros() {
207,685,536✔
80
         if(!ready_to_consume()) {
9,237,047✔
81
            clear_mem(&m_buffer[m_position], elements_until_alignment());
207,683,667✔
82
            m_position = m_buffer.size();
207,683,667✔
83
         }
84
      }
85

86
      /**
87
       * Appends the provided @p elements to the buffer. The user has to make
88
       * sure that @p elements fits in the remaining capacity of the buffer.
89
       */
90
      void append(std::span<const T> elements) {
907,815,820✔
91
         BOTAN_ASSERT_NOMSG(elements.size() <= elements_until_alignment());
×
92
         std::copy(elements.begin(), elements.end(), m_buffer.begin() + m_position);
907,815,820✔
93
         m_position += elements.size();
907,815,820✔
94
      }
907,815,820✔
95

96
      /**
97
       * Allows direct modification of the first @p elements in the buffer.
98
       * This is a low-level accessor that neither takes the buffer's current
99
       * capacity into account nor does it change the internal cursor.
100
       * Beware not to overwrite unconsumed bytes.
101
       */
102
      std::span<T> directly_modify_first(size_t elements) {
2,100✔
103
         BOTAN_ASSERT_NOMSG(size() >= elements);
2,100✔
104
         return std::span(m_buffer).first(elements);
2,100✔
105
      }
106

107
      /**
108
       * Allows direct modification of the last @p elements in the buffer.
109
       * This is a low-level accessor that neither takes the buffer's current
110
       * capacity into account nor does it change the internal cursor.
111
       * Beware not to overwrite unconsumed bytes.
112
       */
113
      std::span<T> directly_modify_last(size_t elements) {
198,448,489✔
114
         BOTAN_ASSERT_NOMSG(size() >= elements);
198,448,489✔
115
         return std::span(m_buffer).last(elements);
198,448,489✔
116
      }
117

118
      /**
119
       * Once the buffer reached alignment, this can be used to consume as many
120
       * input bytes from the given @p slider as possible. The output always
121
       * contains data elements that are a multiple of the intrinsic block size.
122
       *
123
       * @returns a view onto the aligned data from @p slicer and the number of
124
       *          full blocks that are represented by this view.
125
       */
126
      [[nodiscard]] std::tuple<std::span<const uint8_t>, size_t> aligned_data_to_process(BufferSlicer& slicer) const {
211,928,042✔
127
         BOTAN_ASSERT_NOMSG(in_alignment());
×
128

129
         // When the final block is to be deferred, the last block must not be
130
         // selected for processing if there is no (unaligned) extra input data.
131
         const size_t defer = (defers_final_block()) ? 1 : 0;
211,928,042✔
132
         const size_t full_blocks_to_process = (slicer.remaining() - defer) / m_buffer.size();
211,928,042✔
133
         return {slicer.take(full_blocks_to_process * m_buffer.size()), full_blocks_to_process};
211,928,042✔
134
      }
135

136
      /**
137
       * Once the buffer reached alignment, this can be used to consume full
138
       * blocks from the input data represented by @p slicer.
139
       *
140
       * @returns a view onto the next full block from @p slicer or std::nullopt
141
       *          if not enough data is available in @p slicer.
142
       */
143
      [[nodiscard]] std::optional<std::span<const uint8_t>> next_aligned_block_to_process(BufferSlicer& slicer) const {
2,082✔
144
         BOTAN_ASSERT_NOMSG(in_alignment());
×
145

146
         // When the final block is to be deferred, the last block must not be
147
         // selected for processing if there is no (unaligned) extra input data.
148
         const size_t defer = (defers_final_block()) ? 1 : 0;
2,082✔
149
         if(slicer.remaining() < m_buffer.size() + defer) {
2,082✔
150
            return std::nullopt;
1,097✔
151
         }
152

153
         return slicer.take(m_buffer.size());
985✔
154
      }
155

156
      /**
157
       * Intermittently buffers potentially unaligned data provided in @p
158
       * slicer. If the internal buffer already contains some elements, data is
159
       * appended. Once a full block is collected, it is returned to the caller
160
       * for processing.
161
       *
162
       * @param slicer the input data source to be (partially) consumed
163
       * @returns a view onto a full block once enough data was collected, or
164
       *          std::nullopt if no full block is available yet
165
       */
166
      [[nodiscard]] std::optional<std::span<const T>> handle_unaligned_data(BufferSlicer& slicer) {
771,005,596✔
167
         // When the final block is to be deferred, we would need to store and
168
         // hold a buffer that contains exactly one block until more data is
169
         // passed or it is explicitly consumed.
170
         const size_t defer = (defers_final_block()) ? 1 : 0;
771,005,596✔
171

172
         if(in_alignment() && slicer.remaining() >= m_buffer.size() + defer) {
406,831,760✔
173
            // We are currently in alignment and the passed-in data source
174
            // contains enough data to benefit from aligned processing.
175
            // Therefore, we don't copy anything into the intermittent buffer.
176
            return std::nullopt;
61,644,970✔
177
         }
178

179
         // Fill the buffer with as much input data as needed to reach alignment
180
         // or until the input source is depleted.
181
         const auto elements_to_consume = std::min(m_buffer.size() - m_position, slicer.remaining());
709,360,626✔
182
         append(slicer.take(elements_to_consume));
709,360,626✔
183

184
         // If we collected enough data, we push out one full block. When
185
         // deferring the final block is enabled, we additionally check that
186
         // more input data is available to continue processing a consecutive
187
         // block.
188
         if(ready_to_consume() && (!defers_final_block() || !slicer.empty())) {
708,936,031✔
189
            return consume();
150,284,165✔
190
         } else {
191
            return std::nullopt;
559,076,461✔
192
         }
193
      }
194

195
      /**
196
       * Explicitly consume the currently collected block. It is the caller's
197
       * responsibility to ensure that the buffer is filled fully. After
198
       * consumption, the buffer is cleared and ready to collect new data.
199
       */
200
      [[nodiscard]] std::span<const T> consume() {
357,969,702✔
201
         BOTAN_ASSERT_NOMSG(ready_to_consume());
×
202
         m_position = 0;
357,969,702✔
203
         return m_buffer;
357,969,702✔
204
      }
205

206
      /**
207
       * Explicitly consumes however many bytes are currently stored in the
208
       * buffer. After consumption, the buffer is cleared and ready to collect
209
       * new data.
210
       */
211
      [[nodiscard]] std::span<const T> consume_partial() {
291✔
212
         const auto elements = elements_in_buffer();
291✔
213
         m_position = 0;
291✔
214
         return std::span(m_buffer).first(elements);
291✔
215
      }
216

217
      constexpr size_t size() const { return m_buffer.size(); }
218

219
      size_t elements_in_buffer() const { return m_position; }
232,407✔
220

221
      size_t elements_until_alignment() const { return m_buffer.size() - m_position; }
1,115,501,022✔
222

223
      /**
224
       * @returns true if the buffer is empty (i.e. contains no unaligned data)
225
       */
226
      bool in_alignment() const { return m_position == 0; }
1,753,941,858✔
227

228
      /**
229
       * @returns true if the buffer is full (i.e. a block is ready to be consumed)
230
       */
231
      bool ready_to_consume() const { return m_position == m_buffer.size(); }
1,265,783,041✔
232

233
      constexpr bool defers_final_block() const {
234
         return FINAL_BLOCK_STRATEGY == AlignmentBufferFinalBlock::must_be_deferred;
235
      }
236

237
   private:
238
      std::array<T, BLOCK_SIZE> m_buffer;
239
      size_t m_position;
240
};
241

242
}  // namespace Botan
243

244
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc