• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

cryspen / hacl-packages / 5808703668

pending completion
5808703668

Pull #418

github

web-flow
Merge 4abdd0203 into 1575f26e8
Pull Request #418: Add support for Hacl_AES_128_GCM_NI and Hacl_AES_128_GCM_M32

7433 of 7433 new or added lines in 12 files covered. (100.0%)

31975 of 62256 relevant lines covered (51.36%)

1238863.46 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/src/Hacl_Gf128_NI.c
1
/* MIT License
2
 *
3
 * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
4
 * Copyright (c) 2022-2023 HACL* Contributors
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in all
14
 * copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
 * SOFTWARE.
23
 */
24

25

26
#include "Hacl_Gf128_NI.h"
27

28
static inline void
29
fadd0(Lib_IntVector_Intrinsics_vec128 *x, Lib_IntVector_Intrinsics_vec128 *y)
30
{
×
31
  x[0U] = Lib_IntVector_Intrinsics_vec128_xor(x[0U], y[0U]);
×
32
}
×
33

34
static inline void
35
fmul0(Lib_IntVector_Intrinsics_vec128 *x, Lib_IntVector_Intrinsics_vec128 *y)
36
{
×
37
  Lib_IntVector_Intrinsics_vec128 xe = x[0U];
×
38
  Lib_IntVector_Intrinsics_vec128 ye = y[0U];
×
39
  Lib_IntVector_Intrinsics_vec128
×
40
  lo0 = Lib_IntVector_Intrinsics_ni_clmul(xe, ye, (uint8_t)0x00U);
×
41
  Lib_IntVector_Intrinsics_vec128 m1 = Lib_IntVector_Intrinsics_ni_clmul(xe, ye, (uint8_t)0x10U);
×
42
  Lib_IntVector_Intrinsics_vec128 m2 = Lib_IntVector_Intrinsics_ni_clmul(xe, ye, (uint8_t)0x01U);
×
43
  Lib_IntVector_Intrinsics_vec128 hi = Lib_IntVector_Intrinsics_ni_clmul(xe, ye, (uint8_t)0x11U);
×
44
  Lib_IntVector_Intrinsics_vec128 m11 = Lib_IntVector_Intrinsics_vec128_xor(m1, m2);
×
45
  Lib_IntVector_Intrinsics_vec128
×
46
  m21 = Lib_IntVector_Intrinsics_vec128_shift_left(m11, (uint32_t)64U);
×
47
  Lib_IntVector_Intrinsics_vec128
×
48
  m12 = Lib_IntVector_Intrinsics_vec128_shift_right(m11, (uint32_t)64U);
×
49
  Lib_IntVector_Intrinsics_vec128 lo10 = Lib_IntVector_Intrinsics_vec128_xor(lo0, m21);
×
50
  Lib_IntVector_Intrinsics_vec128 hi10 = Lib_IntVector_Intrinsics_vec128_xor(hi, m12);
×
51
  Lib_IntVector_Intrinsics_vec128 hi0 = hi10;
×
52
  Lib_IntVector_Intrinsics_vec128 lo = lo10;
×
53
  Lib_IntVector_Intrinsics_vec128
×
54
  lo1 = Lib_IntVector_Intrinsics_vec128_shift_right64(lo, (uint32_t)63U);
×
55
  Lib_IntVector_Intrinsics_vec128
×
56
  lo2 = Lib_IntVector_Intrinsics_vec128_shift_left(lo1, (uint32_t)64U);
×
57
  Lib_IntVector_Intrinsics_vec128
×
58
  lo3 = Lib_IntVector_Intrinsics_vec128_shift_left64(lo, (uint32_t)1U);
×
59
  Lib_IntVector_Intrinsics_vec128 lo31 = Lib_IntVector_Intrinsics_vec128_xor(lo3, lo2);
×
60
  Lib_IntVector_Intrinsics_vec128
×
61
  hi1 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi0, (uint32_t)63U);
×
62
  Lib_IntVector_Intrinsics_vec128
×
63
  hi11 = Lib_IntVector_Intrinsics_vec128_shift_left(hi1, (uint32_t)64U);
×
64
  Lib_IntVector_Intrinsics_vec128
×
65
  hi2 = Lib_IntVector_Intrinsics_vec128_shift_left64(hi0, (uint32_t)1U);
×
66
  Lib_IntVector_Intrinsics_vec128 hi21 = Lib_IntVector_Intrinsics_vec128_xor(hi2, hi11);
×
67
  Lib_IntVector_Intrinsics_vec128
×
68
  lo11 = Lib_IntVector_Intrinsics_vec128_shift_right64(lo, (uint32_t)63U);
×
69
  Lib_IntVector_Intrinsics_vec128
×
70
  lo12 = Lib_IntVector_Intrinsics_vec128_shift_right(lo11, (uint32_t)64U);
×
71
  Lib_IntVector_Intrinsics_vec128 hi22 = Lib_IntVector_Intrinsics_vec128_xor(hi21, lo12);
×
72
  Lib_IntVector_Intrinsics_vec128 lo4 = lo31;
×
73
  Lib_IntVector_Intrinsics_vec128 hi3 = hi22;
×
74
  Lib_IntVector_Intrinsics_vec128
×
75
  lo13 = Lib_IntVector_Intrinsics_vec128_shift_left64(lo4, (uint32_t)63U);
×
76
  Lib_IntVector_Intrinsics_vec128
×
77
  lo21 = Lib_IntVector_Intrinsics_vec128_shift_left64(lo4, (uint32_t)62U);
×
78
  Lib_IntVector_Intrinsics_vec128
×
79
  lo32 = Lib_IntVector_Intrinsics_vec128_shift_left64(lo4, (uint32_t)57U);
×
80
  Lib_IntVector_Intrinsics_vec128 lo14 = Lib_IntVector_Intrinsics_vec128_xor(lo13, lo21);
×
81
  Lib_IntVector_Intrinsics_vec128 lo15 = Lib_IntVector_Intrinsics_vec128_xor(lo14, lo32);
×
82
  Lib_IntVector_Intrinsics_vec128
×
83
  lo22 = Lib_IntVector_Intrinsics_vec128_shift_right(lo15, (uint32_t)64U);
×
84
  Lib_IntVector_Intrinsics_vec128
×
85
  lo33 = Lib_IntVector_Intrinsics_vec128_shift_left(lo15, (uint32_t)64U);
×
86
  Lib_IntVector_Intrinsics_vec128 lo5 = Lib_IntVector_Intrinsics_vec128_xor(lo4, lo33);
×
87
  Lib_IntVector_Intrinsics_vec128 lo_ = lo22;
×
88
  Lib_IntVector_Intrinsics_vec128
×
89
  lo16 = Lib_IntVector_Intrinsics_vec128_shift_right64(lo5, (uint32_t)1U);
×
90
  Lib_IntVector_Intrinsics_vec128
×
91
  lo23 = Lib_IntVector_Intrinsics_vec128_shift_right64(lo5, (uint32_t)2U);
×
92
  Lib_IntVector_Intrinsics_vec128
×
93
  lo34 = Lib_IntVector_Intrinsics_vec128_shift_right64(lo5, (uint32_t)7U);
×
94
  Lib_IntVector_Intrinsics_vec128 lo17 = Lib_IntVector_Intrinsics_vec128_xor(lo16, lo23);
×
95
  Lib_IntVector_Intrinsics_vec128 lo18 = Lib_IntVector_Intrinsics_vec128_xor(lo17, lo34);
×
96
  Lib_IntVector_Intrinsics_vec128 lo19 = Lib_IntVector_Intrinsics_vec128_xor(lo18, lo_);
×
97
  Lib_IntVector_Intrinsics_vec128 lo6 = Lib_IntVector_Intrinsics_vec128_xor(lo5, lo19);
×
98
  Lib_IntVector_Intrinsics_vec128 lo7 = Lib_IntVector_Intrinsics_vec128_xor(lo6, hi3);
×
99
  Lib_IntVector_Intrinsics_vec128 lo110 = lo7;
×
100
  x[0U] = lo110;
×
101
}
×
102

103
static inline void load_precompute_r(Lib_IntVector_Intrinsics_vec128 *pre, uint8_t *key)
104
{
×
105
  Lib_IntVector_Intrinsics_vec128 *r4 = pre;
×
106
  Lib_IntVector_Intrinsics_vec128 *r3 = pre + (uint32_t)1U;
×
107
  Lib_IntVector_Intrinsics_vec128 *r2 = pre + (uint32_t)2U;
×
108
  Lib_IntVector_Intrinsics_vec128 *r1 = pre + (uint32_t)3U;
×
109
  r1[0U] = Lib_IntVector_Intrinsics_vec128_load_be(key);
×
110
  r4[0U] = r1[0U];
×
111
  r3[0U] = r1[0U];
×
112
  r2[0U] = r1[0U];
×
113
  fmul0(r2, r1);
×
114
  fmul0(r3, r2);
×
115
  fmul0(r4, r3);
×
116
}
×
117

118
static inline void
119
normalize4(
120
  Lib_IntVector_Intrinsics_vec128 *acc,
121
  Lib_IntVector_Intrinsics_vec128 *x,
122
  Lib_IntVector_Intrinsics_vec128 *pre
123
)
124
{
×
125
  Lib_IntVector_Intrinsics_vec128 x1 = x[0U];
×
126
  Lib_IntVector_Intrinsics_vec128 x2 = x[1U];
×
127
  Lib_IntVector_Intrinsics_vec128 x3 = x[2U];
×
128
  Lib_IntVector_Intrinsics_vec128 x4 = x[3U];
×
129
  Lib_IntVector_Intrinsics_vec128 y1 = pre[0U];
×
130
  Lib_IntVector_Intrinsics_vec128 y2 = pre[1U];
×
131
  Lib_IntVector_Intrinsics_vec128 y3 = pre[2U];
×
132
  Lib_IntVector_Intrinsics_vec128 y4 = pre[3U];
×
133
  Lib_IntVector_Intrinsics_vec128
×
134
  lo10 = Lib_IntVector_Intrinsics_ni_clmul(x1, y1, (uint8_t)0x00U);
×
135
  Lib_IntVector_Intrinsics_vec128
×
136
  lo2 = Lib_IntVector_Intrinsics_ni_clmul(x2, y2, (uint8_t)0x00U);
×
137
  Lib_IntVector_Intrinsics_vec128
×
138
  lo30 = Lib_IntVector_Intrinsics_ni_clmul(x3, y3, (uint8_t)0x00U);
×
139
  Lib_IntVector_Intrinsics_vec128
×
140
  lo40 = Lib_IntVector_Intrinsics_ni_clmul(x4, y4, (uint8_t)0x00U);
×
141
  Lib_IntVector_Intrinsics_vec128 lo0 = Lib_IntVector_Intrinsics_vec128_xor(lo10, lo2);
×
142
  Lib_IntVector_Intrinsics_vec128 lo5 = Lib_IntVector_Intrinsics_vec128_xor(lo0, lo30);
×
143
  Lib_IntVector_Intrinsics_vec128 lo6 = Lib_IntVector_Intrinsics_vec128_xor(lo5, lo40);
×
144
  Lib_IntVector_Intrinsics_vec128 m1 = Lib_IntVector_Intrinsics_ni_clmul(x1, y1, (uint8_t)0x10U);
×
145
  Lib_IntVector_Intrinsics_vec128 m2 = Lib_IntVector_Intrinsics_ni_clmul(x2, y2, (uint8_t)0x10U);
×
146
  Lib_IntVector_Intrinsics_vec128 m3 = Lib_IntVector_Intrinsics_ni_clmul(x3, y3, (uint8_t)0x10U);
×
147
  Lib_IntVector_Intrinsics_vec128 m4 = Lib_IntVector_Intrinsics_ni_clmul(x4, y4, (uint8_t)0x10U);
×
148
  Lib_IntVector_Intrinsics_vec128 m = Lib_IntVector_Intrinsics_vec128_xor(m1, m2);
×
149
  Lib_IntVector_Intrinsics_vec128 m5 = Lib_IntVector_Intrinsics_vec128_xor(m, m3);
×
150
  Lib_IntVector_Intrinsics_vec128 m6 = Lib_IntVector_Intrinsics_vec128_xor(m5, m4);
×
151
  Lib_IntVector_Intrinsics_vec128
×
152
  m11 = Lib_IntVector_Intrinsics_ni_clmul(x1, y1, (uint8_t)0x01U);
×
153
  Lib_IntVector_Intrinsics_vec128
×
154
  m21 = Lib_IntVector_Intrinsics_ni_clmul(x2, y2, (uint8_t)0x01U);
×
155
  Lib_IntVector_Intrinsics_vec128
×
156
  m31 = Lib_IntVector_Intrinsics_ni_clmul(x3, y3, (uint8_t)0x01U);
×
157
  Lib_IntVector_Intrinsics_vec128
×
158
  m41 = Lib_IntVector_Intrinsics_ni_clmul(x4, y4, (uint8_t)0x01U);
×
159
  Lib_IntVector_Intrinsics_vec128 m7 = Lib_IntVector_Intrinsics_vec128_xor(m6, m11);
×
160
  Lib_IntVector_Intrinsics_vec128 m8 = Lib_IntVector_Intrinsics_vec128_xor(m7, m21);
×
161
  Lib_IntVector_Intrinsics_vec128 m9 = Lib_IntVector_Intrinsics_vec128_xor(m8, m31);
×
162
  Lib_IntVector_Intrinsics_vec128 m10 = Lib_IntVector_Intrinsics_vec128_xor(m9, m41);
×
163
  Lib_IntVector_Intrinsics_vec128
×
164
  hi10 = Lib_IntVector_Intrinsics_ni_clmul(x1, y1, (uint8_t)0x11U);
×
165
  Lib_IntVector_Intrinsics_vec128
×
166
  hi20 = Lib_IntVector_Intrinsics_ni_clmul(x2, y2, (uint8_t)0x11U);
×
167
  Lib_IntVector_Intrinsics_vec128
×
168
  hi30 = Lib_IntVector_Intrinsics_ni_clmul(x3, y3, (uint8_t)0x11U);
×
169
  Lib_IntVector_Intrinsics_vec128
×
170
  hi4 = Lib_IntVector_Intrinsics_ni_clmul(x4, y4, (uint8_t)0x11U);
×
171
  Lib_IntVector_Intrinsics_vec128 hi = Lib_IntVector_Intrinsics_vec128_xor(hi10, hi20);
×
172
  Lib_IntVector_Intrinsics_vec128 hi5 = Lib_IntVector_Intrinsics_vec128_xor(hi, hi30);
×
173
  Lib_IntVector_Intrinsics_vec128 hi6 = Lib_IntVector_Intrinsics_vec128_xor(hi5, hi4);
×
174
  Lib_IntVector_Intrinsics_vec128
×
175
  m12 = Lib_IntVector_Intrinsics_vec128_shift_left(m10, (uint32_t)64U);
×
176
  Lib_IntVector_Intrinsics_vec128
×
177
  m22 = Lib_IntVector_Intrinsics_vec128_shift_right(m10, (uint32_t)64U);
×
178
  Lib_IntVector_Intrinsics_vec128 lo7 = Lib_IntVector_Intrinsics_vec128_xor(lo6, m12);
×
179
  Lib_IntVector_Intrinsics_vec128 hi7 = Lib_IntVector_Intrinsics_vec128_xor(hi6, m22);
×
180
  Lib_IntVector_Intrinsics_vec128 hi0 = hi7;
×
181
  Lib_IntVector_Intrinsics_vec128 lo = lo7;
×
182
  Lib_IntVector_Intrinsics_vec128
×
183
  lo1 = Lib_IntVector_Intrinsics_vec128_shift_right64(lo, (uint32_t)63U);
×
184
  Lib_IntVector_Intrinsics_vec128
×
185
  lo20 = Lib_IntVector_Intrinsics_vec128_shift_left(lo1, (uint32_t)64U);
×
186
  Lib_IntVector_Intrinsics_vec128
×
187
  lo3 = Lib_IntVector_Intrinsics_vec128_shift_left64(lo, (uint32_t)1U);
×
188
  Lib_IntVector_Intrinsics_vec128 lo31 = Lib_IntVector_Intrinsics_vec128_xor(lo3, lo20);
×
189
  Lib_IntVector_Intrinsics_vec128
×
190
  hi1 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi0, (uint32_t)63U);
×
191
  Lib_IntVector_Intrinsics_vec128
×
192
  hi11 = Lib_IntVector_Intrinsics_vec128_shift_left(hi1, (uint32_t)64U);
×
193
  Lib_IntVector_Intrinsics_vec128
×
194
  hi2 = Lib_IntVector_Intrinsics_vec128_shift_left64(hi0, (uint32_t)1U);
×
195
  Lib_IntVector_Intrinsics_vec128 hi21 = Lib_IntVector_Intrinsics_vec128_xor(hi2, hi11);
×
196
  Lib_IntVector_Intrinsics_vec128
×
197
  lo11 = Lib_IntVector_Intrinsics_vec128_shift_right64(lo, (uint32_t)63U);
×
198
  Lib_IntVector_Intrinsics_vec128
×
199
  lo12 = Lib_IntVector_Intrinsics_vec128_shift_right(lo11, (uint32_t)64U);
×
200
  Lib_IntVector_Intrinsics_vec128 hi22 = Lib_IntVector_Intrinsics_vec128_xor(hi21, lo12);
×
201
  Lib_IntVector_Intrinsics_vec128 lo4 = lo31;
×
202
  Lib_IntVector_Intrinsics_vec128 hi3 = hi22;
×
203
  Lib_IntVector_Intrinsics_vec128
×
204
  lo13 = Lib_IntVector_Intrinsics_vec128_shift_left64(lo4, (uint32_t)63U);
×
205
  Lib_IntVector_Intrinsics_vec128
×
206
  lo21 = Lib_IntVector_Intrinsics_vec128_shift_left64(lo4, (uint32_t)62U);
×
207
  Lib_IntVector_Intrinsics_vec128
×
208
  lo32 = Lib_IntVector_Intrinsics_vec128_shift_left64(lo4, (uint32_t)57U);
×
209
  Lib_IntVector_Intrinsics_vec128 lo14 = Lib_IntVector_Intrinsics_vec128_xor(lo13, lo21);
×
210
  Lib_IntVector_Intrinsics_vec128 lo15 = Lib_IntVector_Intrinsics_vec128_xor(lo14, lo32);
×
211
  Lib_IntVector_Intrinsics_vec128
×
212
  lo22 = Lib_IntVector_Intrinsics_vec128_shift_right(lo15, (uint32_t)64U);
×
213
  Lib_IntVector_Intrinsics_vec128
×
214
  lo33 = Lib_IntVector_Intrinsics_vec128_shift_left(lo15, (uint32_t)64U);
×
215
  Lib_IntVector_Intrinsics_vec128 lo50 = Lib_IntVector_Intrinsics_vec128_xor(lo4, lo33);
×
216
  Lib_IntVector_Intrinsics_vec128 lo_ = lo22;
×
217
  Lib_IntVector_Intrinsics_vec128
×
218
  lo16 = Lib_IntVector_Intrinsics_vec128_shift_right64(lo50, (uint32_t)1U);
×
219
  Lib_IntVector_Intrinsics_vec128
×
220
  lo23 = Lib_IntVector_Intrinsics_vec128_shift_right64(lo50, (uint32_t)2U);
×
221
  Lib_IntVector_Intrinsics_vec128
×
222
  lo34 = Lib_IntVector_Intrinsics_vec128_shift_right64(lo50, (uint32_t)7U);
×
223
  Lib_IntVector_Intrinsics_vec128 lo17 = Lib_IntVector_Intrinsics_vec128_xor(lo16, lo23);
×
224
  Lib_IntVector_Intrinsics_vec128 lo18 = Lib_IntVector_Intrinsics_vec128_xor(lo17, lo34);
×
225
  Lib_IntVector_Intrinsics_vec128 lo19 = Lib_IntVector_Intrinsics_vec128_xor(lo18, lo_);
×
226
  Lib_IntVector_Intrinsics_vec128 lo60 = Lib_IntVector_Intrinsics_vec128_xor(lo50, lo19);
×
227
  Lib_IntVector_Intrinsics_vec128 lo70 = Lib_IntVector_Intrinsics_vec128_xor(lo60, hi3);
×
228
  Lib_IntVector_Intrinsics_vec128 lo110 = lo70;
×
229
  acc[0U] = lo110;
×
230
}
×
231

232
void Hacl_Gf128_NI_gcm_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key)
233
{
×
234
  Lib_IntVector_Intrinsics_vec128 *acc = ctx;
×
235
  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)1U;
×
236
  acc[0U] = Lib_IntVector_Intrinsics_vec128_zero;
×
237
  load_precompute_r(pre, key);
×
238
}
×
239

240
void
241
Hacl_Gf128_NI_gcm_update_blocks(
242
  Lib_IntVector_Intrinsics_vec128 *ctx,
243
  uint32_t len,
244
  uint8_t *text
245
)
246
{
×
247
  Lib_IntVector_Intrinsics_vec128 *acc = ctx;
×
248
  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)1U;
×
249
  uint32_t len0 = len / (uint32_t)64U * (uint32_t)64U;
×
250
  uint8_t *t0 = text;
×
251
  if (len0 > (uint32_t)0U)
×
252
  {
×
253
    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 f[4U] KRML_POST_ALIGN(16) = { 0U };
×
254
    Lib_IntVector_Intrinsics_vec128 *b4 = f;
×
255
    uint32_t nb = len0 / (uint32_t)64U;
×
256
    for (uint32_t i = (uint32_t)0U; i < nb; i++)
×
257
    {
×
258
      uint8_t *tb = t0 + i * (uint32_t)64U;
×
259
      b4[0U] = Lib_IntVector_Intrinsics_vec128_load_be(tb);
×
260
      b4[1U] = Lib_IntVector_Intrinsics_vec128_load_be(tb + (uint32_t)16U);
×
261
      b4[2U] = Lib_IntVector_Intrinsics_vec128_load_be(tb + (uint32_t)32U);
×
262
      b4[3U] = Lib_IntVector_Intrinsics_vec128_load_be(tb + (uint32_t)48U);
×
263
      b4[0U] = Lib_IntVector_Intrinsics_vec128_xor(acc[0U], b4[0U]);
×
264
      normalize4(acc, b4, pre);
×
265
    }
×
266
  }
×
267
  uint32_t len1 = len - len0;
×
268
  uint8_t *t1 = text + len0;
×
269
  Lib_IntVector_Intrinsics_vec128 *r1 = pre + (uint32_t)3U;
×
270
  uint32_t nb = len1 / (uint32_t)16U;
×
271
  uint32_t rem = len1 % (uint32_t)16U;
×
272
  for (uint32_t i = (uint32_t)0U; i < nb; i++)
×
273
  {
×
274
    uint8_t *tb = t1 + i * (uint32_t)16U;
×
275
    Lib_IntVector_Intrinsics_vec128 elem = Lib_IntVector_Intrinsics_vec128_zero;
×
276
    elem = Lib_IntVector_Intrinsics_vec128_load_be(tb);
×
277
    fadd0(acc, &elem);
×
278
    fmul0(acc, r1);
×
279
  }
×
280
  if (rem > (uint32_t)0U)
×
281
  {
×
282
    uint8_t *last = t1 + nb * (uint32_t)16U;
×
283
    Lib_IntVector_Intrinsics_vec128 elem = Lib_IntVector_Intrinsics_vec128_zero;
×
284
    uint8_t b[16U] = { 0U };
×
285
    memcpy(b, last, rem * sizeof (uint8_t));
×
286
    elem = Lib_IntVector_Intrinsics_vec128_load_be(b);
×
287
    fadd0(acc, &elem);
×
288
    fmul0(acc, r1);
×
289
    return;
×
290
  }
×
291
}
×
292

293
void
294
(*Hacl_Gf128_NI_gcm_update_padded)(
295
  Lib_IntVector_Intrinsics_vec128 *x0,
296
  uint32_t x1,
297
  uint8_t *x2
298
) = Hacl_Gf128_NI_gcm_update_blocks;
299

300
void Hacl_Gf128_NI_gcm_emit(uint8_t *tag, Lib_IntVector_Intrinsics_vec128 *ctx)
301
{
×
302
  Lib_IntVector_Intrinsics_vec128 *acc = ctx;
×
303
  Lib_IntVector_Intrinsics_vec128_store_be(tag, acc[0U]);
×
304
}
×
305

306
void Hacl_Gf128_NI_ghash(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
307
{
×
308
  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[5U] KRML_POST_ALIGN(16) = { 0U };
×
309
  Lib_IntVector_Intrinsics_vec128 *acc = ctx;
×
310
  Lib_IntVector_Intrinsics_vec128 *pre0 = ctx + (uint32_t)1U;
×
311
  acc[0U] = Lib_IntVector_Intrinsics_vec128_zero;
×
312
  load_precompute_r(pre0, key);
×
313
  Lib_IntVector_Intrinsics_vec128 *acc0 = ctx;
×
314
  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)1U;
×
315
  uint32_t len0 = len / (uint32_t)64U * (uint32_t)64U;
×
316
  uint8_t *t0 = text;
×
317
  if (len0 > (uint32_t)0U)
×
318
  {
×
319
    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 f[4U] KRML_POST_ALIGN(16) = { 0U };
×
320
    Lib_IntVector_Intrinsics_vec128 *b4 = f;
×
321
    uint32_t nb = len0 / (uint32_t)64U;
×
322
    for (uint32_t i = (uint32_t)0U; i < nb; i++)
×
323
    {
×
324
      uint8_t *tb = t0 + i * (uint32_t)64U;
×
325
      b4[0U] = Lib_IntVector_Intrinsics_vec128_load_be(tb);
×
326
      b4[1U] = Lib_IntVector_Intrinsics_vec128_load_be(tb + (uint32_t)16U);
×
327
      b4[2U] = Lib_IntVector_Intrinsics_vec128_load_be(tb + (uint32_t)32U);
×
328
      b4[3U] = Lib_IntVector_Intrinsics_vec128_load_be(tb + (uint32_t)48U);
×
329
      b4[0U] = Lib_IntVector_Intrinsics_vec128_xor(acc0[0U], b4[0U]);
×
330
      normalize4(acc0, b4, pre);
×
331
    }
×
332
  }
×
333
  uint32_t len1 = len - len0;
×
334
  uint8_t *t1 = text + len0;
×
335
  Lib_IntVector_Intrinsics_vec128 *r1 = pre + (uint32_t)3U;
×
336
  uint32_t nb = len1 / (uint32_t)16U;
×
337
  uint32_t rem = len1 % (uint32_t)16U;
×
338
  for (uint32_t i = (uint32_t)0U; i < nb; i++)
×
339
  {
×
340
    uint8_t *tb = t1 + i * (uint32_t)16U;
×
341
    Lib_IntVector_Intrinsics_vec128 elem = Lib_IntVector_Intrinsics_vec128_zero;
×
342
    elem = Lib_IntVector_Intrinsics_vec128_load_be(tb);
×
343
    fadd0(acc0, &elem);
×
344
    fmul0(acc0, r1);
×
345
  }
×
346
  if (rem > (uint32_t)0U)
×
347
  {
×
348
    uint8_t *last = t1 + nb * (uint32_t)16U;
×
349
    Lib_IntVector_Intrinsics_vec128 elem = Lib_IntVector_Intrinsics_vec128_zero;
×
350
    uint8_t b[16U] = { 0U };
×
351
    memcpy(b, last, rem * sizeof (uint8_t));
×
352
    elem = Lib_IntVector_Intrinsics_vec128_load_be(b);
×
353
    fadd0(acc0, &elem);
×
354
    fmul0(acc0, r1);
×
355
  }
×
356
  Lib_IntVector_Intrinsics_vec128 *acc1 = ctx;
×
357
  Lib_IntVector_Intrinsics_vec128_store_be(tag, acc1[0U]);
×
358
}
×
359

STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc