• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

cryspen / hacl-packages / 5808703668

pending completion
5808703668

Pull #418

github

web-flow
Merge 4abdd0203 into 1575f26e8
Pull Request #418: Add support for Hacl_AES_128_GCM_NI and Hacl_AES_128_GCM_M32

7433 of 7433 new or added lines in 12 files covered. (100.0%)

31975 of 62256 relevant lines covered (51.36%)

1238863.46 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/src/Hacl_AES_128_CTR32_NI.c
1
/* MIT License
2
 *
3
 * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
4
 * Copyright (c) 2022-2023 HACL* Contributors
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in all
14
 * copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
 * SOFTWARE.
23
 */
24

25

26
#include "Hacl_AES_128_CTR32_NI.h"
27

28
void
29
Hacl_AES_128_CTR32_NI_aes128_init(
30
  Lib_IntVector_Intrinsics_vec128 *ctx,
31
  uint8_t *key,
32
  uint8_t *nonce
33
)
34
{
×
35
  Lib_IntVector_Intrinsics_vec128 *kex = ctx + (uint32_t)1U;
×
36
  Lib_IntVector_Intrinsics_vec128 *n = ctx;
×
37
  uint32_t klen = (uint32_t)1U;
×
38
  Lib_IntVector_Intrinsics_vec128 *uu____0 = kex;
×
39
  uu____0[0U] = Lib_IntVector_Intrinsics_vec128_load128_le(key);
×
40
  Lib_IntVector_Intrinsics_vec128 *prev = kex;
×
41
  Lib_IntVector_Intrinsics_vec128 *next = kex + klen;
×
42
  Lib_IntVector_Intrinsics_vec128
×
43
  v0 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev[0U], (uint8_t)0x01U);
×
44
  next[0U] =
×
45
    Lib_IntVector_Intrinsics_vec128_shuffle32(v0,
×
46
      (uint32_t)3U,
×
47
      (uint32_t)3U,
×
48
      (uint32_t)3U,
×
49
      (uint32_t)3U);
×
50
  Lib_IntVector_Intrinsics_vec128 key1 = prev[0U];
×
51
  Lib_IntVector_Intrinsics_vec128
×
52
  key2 =
×
53
    Lib_IntVector_Intrinsics_vec128_xor(key1,
×
54
      Lib_IntVector_Intrinsics_vec128_shift_left(key1, (uint32_t)32U));
×
55
  Lib_IntVector_Intrinsics_vec128
×
56
  key3 =
×
57
    Lib_IntVector_Intrinsics_vec128_xor(key2,
×
58
      Lib_IntVector_Intrinsics_vec128_shift_left(key2, (uint32_t)32U));
×
59
  Lib_IntVector_Intrinsics_vec128
×
60
  key4 =
×
61
    Lib_IntVector_Intrinsics_vec128_xor(key3,
×
62
      Lib_IntVector_Intrinsics_vec128_shift_left(key3, (uint32_t)32U));
×
63
  next[0U] = Lib_IntVector_Intrinsics_vec128_xor(next[0U], key4);
×
64
  Lib_IntVector_Intrinsics_vec128 *prev1 = kex + klen;
×
65
  Lib_IntVector_Intrinsics_vec128 *next1 = kex + (uint32_t)2U * klen;
×
66
  Lib_IntVector_Intrinsics_vec128
×
67
  v1 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev1[0U], (uint8_t)0x02U);
×
68
  next1[0U] =
×
69
    Lib_IntVector_Intrinsics_vec128_shuffle32(v1,
×
70
      (uint32_t)3U,
×
71
      (uint32_t)3U,
×
72
      (uint32_t)3U,
×
73
      (uint32_t)3U);
×
74
  Lib_IntVector_Intrinsics_vec128 key10 = prev1[0U];
×
75
  Lib_IntVector_Intrinsics_vec128
×
76
  key20 =
×
77
    Lib_IntVector_Intrinsics_vec128_xor(key10,
×
78
      Lib_IntVector_Intrinsics_vec128_shift_left(key10, (uint32_t)32U));
×
79
  Lib_IntVector_Intrinsics_vec128
×
80
  key30 =
×
81
    Lib_IntVector_Intrinsics_vec128_xor(key20,
×
82
      Lib_IntVector_Intrinsics_vec128_shift_left(key20, (uint32_t)32U));
×
83
  Lib_IntVector_Intrinsics_vec128
×
84
  key40 =
×
85
    Lib_IntVector_Intrinsics_vec128_xor(key30,
×
86
      Lib_IntVector_Intrinsics_vec128_shift_left(key30, (uint32_t)32U));
×
87
  next1[0U] = Lib_IntVector_Intrinsics_vec128_xor(next1[0U], key40);
×
88
  Lib_IntVector_Intrinsics_vec128 *prev2 = kex + klen * (uint32_t)2U;
×
89
  Lib_IntVector_Intrinsics_vec128 *next2 = kex + klen * (uint32_t)3U;
×
90
  Lib_IntVector_Intrinsics_vec128
×
91
  v2 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev2[0U], (uint8_t)0x04U);
×
92
  next2[0U] =
×
93
    Lib_IntVector_Intrinsics_vec128_shuffle32(v2,
×
94
      (uint32_t)3U,
×
95
      (uint32_t)3U,
×
96
      (uint32_t)3U,
×
97
      (uint32_t)3U);
×
98
  Lib_IntVector_Intrinsics_vec128 key11 = prev2[0U];
×
99
  Lib_IntVector_Intrinsics_vec128
×
100
  key21 =
×
101
    Lib_IntVector_Intrinsics_vec128_xor(key11,
×
102
      Lib_IntVector_Intrinsics_vec128_shift_left(key11, (uint32_t)32U));
×
103
  Lib_IntVector_Intrinsics_vec128
×
104
  key31 =
×
105
    Lib_IntVector_Intrinsics_vec128_xor(key21,
×
106
      Lib_IntVector_Intrinsics_vec128_shift_left(key21, (uint32_t)32U));
×
107
  Lib_IntVector_Intrinsics_vec128
×
108
  key41 =
×
109
    Lib_IntVector_Intrinsics_vec128_xor(key31,
×
110
      Lib_IntVector_Intrinsics_vec128_shift_left(key31, (uint32_t)32U));
×
111
  next2[0U] = Lib_IntVector_Intrinsics_vec128_xor(next2[0U], key41);
×
112
  Lib_IntVector_Intrinsics_vec128 *prev3 = kex + klen * (uint32_t)3U;
×
113
  Lib_IntVector_Intrinsics_vec128 *next3 = kex + klen * (uint32_t)4U;
×
114
  Lib_IntVector_Intrinsics_vec128
×
115
  v3 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev3[0U], (uint8_t)0x08U);
×
116
  next3[0U] =
×
117
    Lib_IntVector_Intrinsics_vec128_shuffle32(v3,
×
118
      (uint32_t)3U,
×
119
      (uint32_t)3U,
×
120
      (uint32_t)3U,
×
121
      (uint32_t)3U);
×
122
  Lib_IntVector_Intrinsics_vec128 key12 = prev3[0U];
×
123
  Lib_IntVector_Intrinsics_vec128
×
124
  key22 =
×
125
    Lib_IntVector_Intrinsics_vec128_xor(key12,
×
126
      Lib_IntVector_Intrinsics_vec128_shift_left(key12, (uint32_t)32U));
×
127
  Lib_IntVector_Intrinsics_vec128
×
128
  key32 =
×
129
    Lib_IntVector_Intrinsics_vec128_xor(key22,
×
130
      Lib_IntVector_Intrinsics_vec128_shift_left(key22, (uint32_t)32U));
×
131
  Lib_IntVector_Intrinsics_vec128
×
132
  key42 =
×
133
    Lib_IntVector_Intrinsics_vec128_xor(key32,
×
134
      Lib_IntVector_Intrinsics_vec128_shift_left(key32, (uint32_t)32U));
×
135
  next3[0U] = Lib_IntVector_Intrinsics_vec128_xor(next3[0U], key42);
×
136
  Lib_IntVector_Intrinsics_vec128 *prev4 = kex + klen * (uint32_t)4U;
×
137
  Lib_IntVector_Intrinsics_vec128 *next4 = kex + klen * (uint32_t)5U;
×
138
  Lib_IntVector_Intrinsics_vec128
×
139
  v4 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev4[0U], (uint8_t)0x10U);
×
140
  next4[0U] =
×
141
    Lib_IntVector_Intrinsics_vec128_shuffle32(v4,
×
142
      (uint32_t)3U,
×
143
      (uint32_t)3U,
×
144
      (uint32_t)3U,
×
145
      (uint32_t)3U);
×
146
  Lib_IntVector_Intrinsics_vec128 key13 = prev4[0U];
×
147
  Lib_IntVector_Intrinsics_vec128
×
148
  key23 =
×
149
    Lib_IntVector_Intrinsics_vec128_xor(key13,
×
150
      Lib_IntVector_Intrinsics_vec128_shift_left(key13, (uint32_t)32U));
×
151
  Lib_IntVector_Intrinsics_vec128
×
152
  key33 =
×
153
    Lib_IntVector_Intrinsics_vec128_xor(key23,
×
154
      Lib_IntVector_Intrinsics_vec128_shift_left(key23, (uint32_t)32U));
×
155
  Lib_IntVector_Intrinsics_vec128
×
156
  key43 =
×
157
    Lib_IntVector_Intrinsics_vec128_xor(key33,
×
158
      Lib_IntVector_Intrinsics_vec128_shift_left(key33, (uint32_t)32U));
×
159
  next4[0U] = Lib_IntVector_Intrinsics_vec128_xor(next4[0U], key43);
×
160
  Lib_IntVector_Intrinsics_vec128 *prev5 = kex + klen * (uint32_t)5U;
×
161
  Lib_IntVector_Intrinsics_vec128 *next5 = kex + klen * (uint32_t)6U;
×
162
  Lib_IntVector_Intrinsics_vec128
×
163
  v5 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev5[0U], (uint8_t)0x20U);
×
164
  next5[0U] =
×
165
    Lib_IntVector_Intrinsics_vec128_shuffle32(v5,
×
166
      (uint32_t)3U,
×
167
      (uint32_t)3U,
×
168
      (uint32_t)3U,
×
169
      (uint32_t)3U);
×
170
  Lib_IntVector_Intrinsics_vec128 key14 = prev5[0U];
×
171
  Lib_IntVector_Intrinsics_vec128
×
172
  key24 =
×
173
    Lib_IntVector_Intrinsics_vec128_xor(key14,
×
174
      Lib_IntVector_Intrinsics_vec128_shift_left(key14, (uint32_t)32U));
×
175
  Lib_IntVector_Intrinsics_vec128
×
176
  key34 =
×
177
    Lib_IntVector_Intrinsics_vec128_xor(key24,
×
178
      Lib_IntVector_Intrinsics_vec128_shift_left(key24, (uint32_t)32U));
×
179
  Lib_IntVector_Intrinsics_vec128
×
180
  key44 =
×
181
    Lib_IntVector_Intrinsics_vec128_xor(key34,
×
182
      Lib_IntVector_Intrinsics_vec128_shift_left(key34, (uint32_t)32U));
×
183
  next5[0U] = Lib_IntVector_Intrinsics_vec128_xor(next5[0U], key44);
×
184
  Lib_IntVector_Intrinsics_vec128 *prev6 = kex + klen * (uint32_t)6U;
×
185
  Lib_IntVector_Intrinsics_vec128 *next6 = kex + klen * (uint32_t)7U;
×
186
  Lib_IntVector_Intrinsics_vec128
×
187
  v6 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev6[0U], (uint8_t)0x40U);
×
188
  next6[0U] =
×
189
    Lib_IntVector_Intrinsics_vec128_shuffle32(v6,
×
190
      (uint32_t)3U,
×
191
      (uint32_t)3U,
×
192
      (uint32_t)3U,
×
193
      (uint32_t)3U);
×
194
  Lib_IntVector_Intrinsics_vec128 key15 = prev6[0U];
×
195
  Lib_IntVector_Intrinsics_vec128
×
196
  key25 =
×
197
    Lib_IntVector_Intrinsics_vec128_xor(key15,
×
198
      Lib_IntVector_Intrinsics_vec128_shift_left(key15, (uint32_t)32U));
×
199
  Lib_IntVector_Intrinsics_vec128
×
200
  key35 =
×
201
    Lib_IntVector_Intrinsics_vec128_xor(key25,
×
202
      Lib_IntVector_Intrinsics_vec128_shift_left(key25, (uint32_t)32U));
×
203
  Lib_IntVector_Intrinsics_vec128
×
204
  key45 =
×
205
    Lib_IntVector_Intrinsics_vec128_xor(key35,
×
206
      Lib_IntVector_Intrinsics_vec128_shift_left(key35, (uint32_t)32U));
×
207
  next6[0U] = Lib_IntVector_Intrinsics_vec128_xor(next6[0U], key45);
×
208
  Lib_IntVector_Intrinsics_vec128 *prev7 = kex + klen * (uint32_t)7U;
×
209
  Lib_IntVector_Intrinsics_vec128 *next7 = kex + klen * (uint32_t)8U;
×
210
  Lib_IntVector_Intrinsics_vec128
×
211
  v7 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev7[0U], (uint8_t)0x80U);
×
212
  next7[0U] =
×
213
    Lib_IntVector_Intrinsics_vec128_shuffle32(v7,
×
214
      (uint32_t)3U,
×
215
      (uint32_t)3U,
×
216
      (uint32_t)3U,
×
217
      (uint32_t)3U);
×
218
  Lib_IntVector_Intrinsics_vec128 key16 = prev7[0U];
×
219
  Lib_IntVector_Intrinsics_vec128
×
220
  key26 =
×
221
    Lib_IntVector_Intrinsics_vec128_xor(key16,
×
222
      Lib_IntVector_Intrinsics_vec128_shift_left(key16, (uint32_t)32U));
×
223
  Lib_IntVector_Intrinsics_vec128
×
224
  key36 =
×
225
    Lib_IntVector_Intrinsics_vec128_xor(key26,
×
226
      Lib_IntVector_Intrinsics_vec128_shift_left(key26, (uint32_t)32U));
×
227
  Lib_IntVector_Intrinsics_vec128
×
228
  key46 =
×
229
    Lib_IntVector_Intrinsics_vec128_xor(key36,
×
230
      Lib_IntVector_Intrinsics_vec128_shift_left(key36, (uint32_t)32U));
×
231
  next7[0U] = Lib_IntVector_Intrinsics_vec128_xor(next7[0U], key46);
×
232
  Lib_IntVector_Intrinsics_vec128 *prev8 = kex + klen * (uint32_t)8U;
×
233
  Lib_IntVector_Intrinsics_vec128 *next8 = kex + klen * (uint32_t)9U;
×
234
  Lib_IntVector_Intrinsics_vec128
×
235
  v8 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev8[0U], (uint8_t)0x1bU);
×
236
  next8[0U] =
×
237
    Lib_IntVector_Intrinsics_vec128_shuffle32(v8,
×
238
      (uint32_t)3U,
×
239
      (uint32_t)3U,
×
240
      (uint32_t)3U,
×
241
      (uint32_t)3U);
×
242
  Lib_IntVector_Intrinsics_vec128 key17 = prev8[0U];
×
243
  Lib_IntVector_Intrinsics_vec128
×
244
  key27 =
×
245
    Lib_IntVector_Intrinsics_vec128_xor(key17,
×
246
      Lib_IntVector_Intrinsics_vec128_shift_left(key17, (uint32_t)32U));
×
247
  Lib_IntVector_Intrinsics_vec128
×
248
  key37 =
×
249
    Lib_IntVector_Intrinsics_vec128_xor(key27,
×
250
      Lib_IntVector_Intrinsics_vec128_shift_left(key27, (uint32_t)32U));
×
251
  Lib_IntVector_Intrinsics_vec128
×
252
  key47 =
×
253
    Lib_IntVector_Intrinsics_vec128_xor(key37,
×
254
      Lib_IntVector_Intrinsics_vec128_shift_left(key37, (uint32_t)32U));
×
255
  next8[0U] = Lib_IntVector_Intrinsics_vec128_xor(next8[0U], key47);
×
256
  Lib_IntVector_Intrinsics_vec128 *prev9 = kex + klen * (uint32_t)9U;
×
257
  Lib_IntVector_Intrinsics_vec128 *next9 = kex + klen * (uint32_t)10U;
×
258
  Lib_IntVector_Intrinsics_vec128
×
259
  v = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev9[0U], (uint8_t)0x36U);
×
260
  next9[0U] =
×
261
    Lib_IntVector_Intrinsics_vec128_shuffle32(v,
×
262
      (uint32_t)3U,
×
263
      (uint32_t)3U,
×
264
      (uint32_t)3U,
×
265
      (uint32_t)3U);
×
266
  Lib_IntVector_Intrinsics_vec128 key18 = prev9[0U];
×
267
  Lib_IntVector_Intrinsics_vec128
×
268
  key28 =
×
269
    Lib_IntVector_Intrinsics_vec128_xor(key18,
×
270
      Lib_IntVector_Intrinsics_vec128_shift_left(key18, (uint32_t)32U));
×
271
  Lib_IntVector_Intrinsics_vec128
×
272
  key38 =
×
273
    Lib_IntVector_Intrinsics_vec128_xor(key28,
×
274
      Lib_IntVector_Intrinsics_vec128_shift_left(key28, (uint32_t)32U));
×
275
  Lib_IntVector_Intrinsics_vec128
×
276
  key48 =
×
277
    Lib_IntVector_Intrinsics_vec128_xor(key38,
×
278
      Lib_IntVector_Intrinsics_vec128_shift_left(key38, (uint32_t)32U));
×
279
  next9[0U] = Lib_IntVector_Intrinsics_vec128_xor(next9[0U], key48);
×
280
  uint8_t nb[16U] = { 0U };
×
281
  memcpy(nb, nonce, (uint32_t)12U * sizeof (uint8_t));
×
282
  n[0U] = Lib_IntVector_Intrinsics_vec128_load128_le(nb);
×
283
}
×
284

285
void
286
Hacl_AES_128_CTR32_NI_aes128_set_nonce(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *nonce)
287
{
×
288
  Lib_IntVector_Intrinsics_vec128 *n = ctx;
×
289
  uint8_t nb[16U] = { 0U };
×
290
  memcpy(nb, nonce, (uint32_t)12U * sizeof (uint8_t));
×
291
  n[0U] = Lib_IntVector_Intrinsics_vec128_load128_le(nb);
×
292
}
×
293

294
void
295
Hacl_AES_128_CTR32_NI_aes128_key_block(
296
  uint8_t *kb,
297
  Lib_IntVector_Intrinsics_vec128 *ctx,
298
  uint32_t counter
299
)
300
{
×
301
  Lib_IntVector_Intrinsics_vec128 *kex = ctx + (uint32_t)1U;
×
302
  Lib_IntVector_Intrinsics_vec128 *n = ctx;
×
303
  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[4U] KRML_POST_ALIGN(16) = { 0U };
×
304
  uint32_t counter0 = htobe32(counter);
×
305
  uint32_t counter1 = htobe32(counter + (uint32_t)1U);
×
306
  uint32_t counter2 = htobe32(counter + (uint32_t)2U);
×
307
  uint32_t counter3 = htobe32(counter + (uint32_t)3U);
×
308
  Lib_IntVector_Intrinsics_vec128 nonce0 = n[0U];
×
309
  st[0U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter0, (uint32_t)3U);
×
310
  st[1U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter1, (uint32_t)3U);
×
311
  st[2U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter2, (uint32_t)3U);
×
312
  st[3U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter3, (uint32_t)3U);
×
313
  uint32_t klen = (uint32_t)1U;
×
314
  Lib_IntVector_Intrinsics_vec128 *k0 = kex;
×
315
  Lib_IntVector_Intrinsics_vec128 *kr = kex + klen;
×
316
  Lib_IntVector_Intrinsics_vec128 *kn = kex + (uint32_t)10U * klen;
×
317
  st[0U] = Lib_IntVector_Intrinsics_vec128_xor(st[0U], k0[0U]);
×
318
  st[1U] = Lib_IntVector_Intrinsics_vec128_xor(st[1U], k0[0U]);
×
319
  st[2U] = Lib_IntVector_Intrinsics_vec128_xor(st[2U], k0[0U]);
×
320
  st[3U] = Lib_IntVector_Intrinsics_vec128_xor(st[3U], k0[0U]);
×
321
  KRML_MAYBE_FOR9(i,
×
322
    (uint32_t)0U,
×
323
    (uint32_t)9U,
×
324
    (uint32_t)1U,
×
325
    Lib_IntVector_Intrinsics_vec128 *sub_key = kr + i * (uint32_t)1U;
×
326
    st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[0U], sub_key[0U]);
×
327
    st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[1U], sub_key[0U]);
×
328
    st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[2U], sub_key[0U]);
×
329
    st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[3U], sub_key[0U]););
×
330
  st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[0U], kn[0U]);
×
331
  st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[1U], kn[0U]);
×
332
  st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[2U], kn[0U]);
×
333
  st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[3U], kn[0U]);
×
334
  Lib_IntVector_Intrinsics_vec128_store128_le(kb, st[0U]);
×
335
}
×
336

337
inline void
338
Hacl_AES_128_CTR32_NI_aes128_ctr_encrypt(
339
  uint32_t len,
340
  uint8_t *out,
341
  uint8_t *inp,
342
  uint8_t *k,
343
  uint8_t *n,
344
  uint32_t c
345
)
346
{
×
347
  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[12U] KRML_POST_ALIGN(16) = { 0U };
×
348
  Lib_IntVector_Intrinsics_vec128 *kex0 = ctx + (uint32_t)1U;
×
349
  Lib_IntVector_Intrinsics_vec128 *n10 = ctx;
×
350
  uint32_t klen = (uint32_t)1U;
×
351
  Lib_IntVector_Intrinsics_vec128 *uu____0 = kex0;
×
352
  uu____0[0U] = Lib_IntVector_Intrinsics_vec128_load128_le(k);
×
353
  Lib_IntVector_Intrinsics_vec128 *prev = kex0;
×
354
  Lib_IntVector_Intrinsics_vec128 *next = kex0 + klen;
×
355
  Lib_IntVector_Intrinsics_vec128
×
356
  v0 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev[0U], (uint8_t)0x01U);
×
357
  next[0U] =
×
358
    Lib_IntVector_Intrinsics_vec128_shuffle32(v0,
×
359
      (uint32_t)3U,
×
360
      (uint32_t)3U,
×
361
      (uint32_t)3U,
×
362
      (uint32_t)3U);
×
363
  Lib_IntVector_Intrinsics_vec128 key = prev[0U];
×
364
  Lib_IntVector_Intrinsics_vec128
×
365
  key1 =
×
366
    Lib_IntVector_Intrinsics_vec128_xor(key,
×
367
      Lib_IntVector_Intrinsics_vec128_shift_left(key, (uint32_t)32U));
×
368
  Lib_IntVector_Intrinsics_vec128
×
369
  key2 =
×
370
    Lib_IntVector_Intrinsics_vec128_xor(key1,
×
371
      Lib_IntVector_Intrinsics_vec128_shift_left(key1, (uint32_t)32U));
×
372
  Lib_IntVector_Intrinsics_vec128
×
373
  key3 =
×
374
    Lib_IntVector_Intrinsics_vec128_xor(key2,
×
375
      Lib_IntVector_Intrinsics_vec128_shift_left(key2, (uint32_t)32U));
×
376
  next[0U] = Lib_IntVector_Intrinsics_vec128_xor(next[0U], key3);
×
377
  Lib_IntVector_Intrinsics_vec128 *prev1 = kex0 + klen;
×
378
  Lib_IntVector_Intrinsics_vec128 *next1 = kex0 + (uint32_t)2U * klen;
×
379
  Lib_IntVector_Intrinsics_vec128
×
380
  v4 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev1[0U], (uint8_t)0x02U);
×
381
  next1[0U] =
×
382
    Lib_IntVector_Intrinsics_vec128_shuffle32(v4,
×
383
      (uint32_t)3U,
×
384
      (uint32_t)3U,
×
385
      (uint32_t)3U,
×
386
      (uint32_t)3U);
×
387
  Lib_IntVector_Intrinsics_vec128 key0 = prev1[0U];
×
388
  Lib_IntVector_Intrinsics_vec128
×
389
  key10 =
×
390
    Lib_IntVector_Intrinsics_vec128_xor(key0,
×
391
      Lib_IntVector_Intrinsics_vec128_shift_left(key0, (uint32_t)32U));
×
392
  Lib_IntVector_Intrinsics_vec128
×
393
  key20 =
×
394
    Lib_IntVector_Intrinsics_vec128_xor(key10,
×
395
      Lib_IntVector_Intrinsics_vec128_shift_left(key10, (uint32_t)32U));
×
396
  Lib_IntVector_Intrinsics_vec128
×
397
  key30 =
×
398
    Lib_IntVector_Intrinsics_vec128_xor(key20,
×
399
      Lib_IntVector_Intrinsics_vec128_shift_left(key20, (uint32_t)32U));
×
400
  next1[0U] = Lib_IntVector_Intrinsics_vec128_xor(next1[0U], key30);
×
401
  Lib_IntVector_Intrinsics_vec128 *prev2 = kex0 + klen * (uint32_t)2U;
×
402
  Lib_IntVector_Intrinsics_vec128 *next2 = kex0 + klen * (uint32_t)3U;
×
403
  Lib_IntVector_Intrinsics_vec128
×
404
  v5 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev2[0U], (uint8_t)0x04U);
×
405
  next2[0U] =
×
406
    Lib_IntVector_Intrinsics_vec128_shuffle32(v5,
×
407
      (uint32_t)3U,
×
408
      (uint32_t)3U,
×
409
      (uint32_t)3U,
×
410
      (uint32_t)3U);
×
411
  Lib_IntVector_Intrinsics_vec128 key4 = prev2[0U];
×
412
  Lib_IntVector_Intrinsics_vec128
×
413
  key11 =
×
414
    Lib_IntVector_Intrinsics_vec128_xor(key4,
×
415
      Lib_IntVector_Intrinsics_vec128_shift_left(key4, (uint32_t)32U));
×
416
  Lib_IntVector_Intrinsics_vec128
×
417
  key21 =
×
418
    Lib_IntVector_Intrinsics_vec128_xor(key11,
×
419
      Lib_IntVector_Intrinsics_vec128_shift_left(key11, (uint32_t)32U));
×
420
  Lib_IntVector_Intrinsics_vec128
×
421
  key31 =
×
422
    Lib_IntVector_Intrinsics_vec128_xor(key21,
×
423
      Lib_IntVector_Intrinsics_vec128_shift_left(key21, (uint32_t)32U));
×
424
  next2[0U] = Lib_IntVector_Intrinsics_vec128_xor(next2[0U], key31);
×
425
  Lib_IntVector_Intrinsics_vec128 *prev3 = kex0 + klen * (uint32_t)3U;
×
426
  Lib_IntVector_Intrinsics_vec128 *next3 = kex0 + klen * (uint32_t)4U;
×
427
  Lib_IntVector_Intrinsics_vec128
×
428
  v6 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev3[0U], (uint8_t)0x08U);
×
429
  next3[0U] =
×
430
    Lib_IntVector_Intrinsics_vec128_shuffle32(v6,
×
431
      (uint32_t)3U,
×
432
      (uint32_t)3U,
×
433
      (uint32_t)3U,
×
434
      (uint32_t)3U);
×
435
  Lib_IntVector_Intrinsics_vec128 key5 = prev3[0U];
×
436
  Lib_IntVector_Intrinsics_vec128
×
437
  key12 =
×
438
    Lib_IntVector_Intrinsics_vec128_xor(key5,
×
439
      Lib_IntVector_Intrinsics_vec128_shift_left(key5, (uint32_t)32U));
×
440
  Lib_IntVector_Intrinsics_vec128
×
441
  key22 =
×
442
    Lib_IntVector_Intrinsics_vec128_xor(key12,
×
443
      Lib_IntVector_Intrinsics_vec128_shift_left(key12, (uint32_t)32U));
×
444
  Lib_IntVector_Intrinsics_vec128
×
445
  key32 =
×
446
    Lib_IntVector_Intrinsics_vec128_xor(key22,
×
447
      Lib_IntVector_Intrinsics_vec128_shift_left(key22, (uint32_t)32U));
×
448
  next3[0U] = Lib_IntVector_Intrinsics_vec128_xor(next3[0U], key32);
×
449
  Lib_IntVector_Intrinsics_vec128 *prev4 = kex0 + klen * (uint32_t)4U;
×
450
  Lib_IntVector_Intrinsics_vec128 *next4 = kex0 + klen * (uint32_t)5U;
×
451
  Lib_IntVector_Intrinsics_vec128
×
452
  v7 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev4[0U], (uint8_t)0x10U);
×
453
  next4[0U] =
×
454
    Lib_IntVector_Intrinsics_vec128_shuffle32(v7,
×
455
      (uint32_t)3U,
×
456
      (uint32_t)3U,
×
457
      (uint32_t)3U,
×
458
      (uint32_t)3U);
×
459
  Lib_IntVector_Intrinsics_vec128 key6 = prev4[0U];
×
460
  Lib_IntVector_Intrinsics_vec128
×
461
  key13 =
×
462
    Lib_IntVector_Intrinsics_vec128_xor(key6,
×
463
      Lib_IntVector_Intrinsics_vec128_shift_left(key6, (uint32_t)32U));
×
464
  Lib_IntVector_Intrinsics_vec128
×
465
  key23 =
×
466
    Lib_IntVector_Intrinsics_vec128_xor(key13,
×
467
      Lib_IntVector_Intrinsics_vec128_shift_left(key13, (uint32_t)32U));
×
468
  Lib_IntVector_Intrinsics_vec128
×
469
  key33 =
×
470
    Lib_IntVector_Intrinsics_vec128_xor(key23,
×
471
      Lib_IntVector_Intrinsics_vec128_shift_left(key23, (uint32_t)32U));
×
472
  next4[0U] = Lib_IntVector_Intrinsics_vec128_xor(next4[0U], key33);
×
473
  Lib_IntVector_Intrinsics_vec128 *prev5 = kex0 + klen * (uint32_t)5U;
×
474
  Lib_IntVector_Intrinsics_vec128 *next5 = kex0 + klen * (uint32_t)6U;
×
475
  Lib_IntVector_Intrinsics_vec128
×
476
  v8 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev5[0U], (uint8_t)0x20U);
×
477
  next5[0U] =
×
478
    Lib_IntVector_Intrinsics_vec128_shuffle32(v8,
×
479
      (uint32_t)3U,
×
480
      (uint32_t)3U,
×
481
      (uint32_t)3U,
×
482
      (uint32_t)3U);
×
483
  Lib_IntVector_Intrinsics_vec128 key7 = prev5[0U];
×
484
  Lib_IntVector_Intrinsics_vec128
×
485
  key14 =
×
486
    Lib_IntVector_Intrinsics_vec128_xor(key7,
×
487
      Lib_IntVector_Intrinsics_vec128_shift_left(key7, (uint32_t)32U));
×
488
  Lib_IntVector_Intrinsics_vec128
×
489
  key24 =
×
490
    Lib_IntVector_Intrinsics_vec128_xor(key14,
×
491
      Lib_IntVector_Intrinsics_vec128_shift_left(key14, (uint32_t)32U));
×
492
  Lib_IntVector_Intrinsics_vec128
×
493
  key34 =
×
494
    Lib_IntVector_Intrinsics_vec128_xor(key24,
×
495
      Lib_IntVector_Intrinsics_vec128_shift_left(key24, (uint32_t)32U));
×
496
  next5[0U] = Lib_IntVector_Intrinsics_vec128_xor(next5[0U], key34);
×
497
  Lib_IntVector_Intrinsics_vec128 *prev6 = kex0 + klen * (uint32_t)6U;
×
498
  Lib_IntVector_Intrinsics_vec128 *next6 = kex0 + klen * (uint32_t)7U;
×
499
  Lib_IntVector_Intrinsics_vec128
×
500
  v9 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev6[0U], (uint8_t)0x40U);
×
501
  next6[0U] =
×
502
    Lib_IntVector_Intrinsics_vec128_shuffle32(v9,
×
503
      (uint32_t)3U,
×
504
      (uint32_t)3U,
×
505
      (uint32_t)3U,
×
506
      (uint32_t)3U);
×
507
  Lib_IntVector_Intrinsics_vec128 key8 = prev6[0U];
×
508
  Lib_IntVector_Intrinsics_vec128
×
509
  key15 =
×
510
    Lib_IntVector_Intrinsics_vec128_xor(key8,
×
511
      Lib_IntVector_Intrinsics_vec128_shift_left(key8, (uint32_t)32U));
×
512
  Lib_IntVector_Intrinsics_vec128
×
513
  key25 =
×
514
    Lib_IntVector_Intrinsics_vec128_xor(key15,
×
515
      Lib_IntVector_Intrinsics_vec128_shift_left(key15, (uint32_t)32U));
×
516
  Lib_IntVector_Intrinsics_vec128
×
517
  key35 =
×
518
    Lib_IntVector_Intrinsics_vec128_xor(key25,
×
519
      Lib_IntVector_Intrinsics_vec128_shift_left(key25, (uint32_t)32U));
×
520
  next6[0U] = Lib_IntVector_Intrinsics_vec128_xor(next6[0U], key35);
×
521
  Lib_IntVector_Intrinsics_vec128 *prev7 = kex0 + klen * (uint32_t)7U;
×
522
  Lib_IntVector_Intrinsics_vec128 *next7 = kex0 + klen * (uint32_t)8U;
×
523
  Lib_IntVector_Intrinsics_vec128
×
524
  v10 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev7[0U], (uint8_t)0x80U);
×
525
  next7[0U] =
×
526
    Lib_IntVector_Intrinsics_vec128_shuffle32(v10,
×
527
      (uint32_t)3U,
×
528
      (uint32_t)3U,
×
529
      (uint32_t)3U,
×
530
      (uint32_t)3U);
×
531
  Lib_IntVector_Intrinsics_vec128 key9 = prev7[0U];
×
532
  Lib_IntVector_Intrinsics_vec128
×
533
  key16 =
×
534
    Lib_IntVector_Intrinsics_vec128_xor(key9,
×
535
      Lib_IntVector_Intrinsics_vec128_shift_left(key9, (uint32_t)32U));
×
536
  Lib_IntVector_Intrinsics_vec128
×
537
  key26 =
×
538
    Lib_IntVector_Intrinsics_vec128_xor(key16,
×
539
      Lib_IntVector_Intrinsics_vec128_shift_left(key16, (uint32_t)32U));
×
540
  Lib_IntVector_Intrinsics_vec128
×
541
  key36 =
×
542
    Lib_IntVector_Intrinsics_vec128_xor(key26,
×
543
      Lib_IntVector_Intrinsics_vec128_shift_left(key26, (uint32_t)32U));
×
544
  next7[0U] = Lib_IntVector_Intrinsics_vec128_xor(next7[0U], key36);
×
545
  Lib_IntVector_Intrinsics_vec128 *prev8 = kex0 + klen * (uint32_t)8U;
×
546
  Lib_IntVector_Intrinsics_vec128 *next8 = kex0 + klen * (uint32_t)9U;
×
547
  Lib_IntVector_Intrinsics_vec128
×
548
  v12 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev8[0U], (uint8_t)0x1bU);
×
549
  next8[0U] =
×
550
    Lib_IntVector_Intrinsics_vec128_shuffle32(v12,
×
551
      (uint32_t)3U,
×
552
      (uint32_t)3U,
×
553
      (uint32_t)3U,
×
554
      (uint32_t)3U);
×
555
  Lib_IntVector_Intrinsics_vec128 key17 = prev8[0U];
×
556
  Lib_IntVector_Intrinsics_vec128
×
557
  key18 =
×
558
    Lib_IntVector_Intrinsics_vec128_xor(key17,
×
559
      Lib_IntVector_Intrinsics_vec128_shift_left(key17, (uint32_t)32U));
×
560
  Lib_IntVector_Intrinsics_vec128
×
561
  key27 =
×
562
    Lib_IntVector_Intrinsics_vec128_xor(key18,
×
563
      Lib_IntVector_Intrinsics_vec128_shift_left(key18, (uint32_t)32U));
×
564
  Lib_IntVector_Intrinsics_vec128
×
565
  key37 =
×
566
    Lib_IntVector_Intrinsics_vec128_xor(key27,
×
567
      Lib_IntVector_Intrinsics_vec128_shift_left(key27, (uint32_t)32U));
×
568
  next8[0U] = Lib_IntVector_Intrinsics_vec128_xor(next8[0U], key37);
×
569
  Lib_IntVector_Intrinsics_vec128 *prev9 = kex0 + klen * (uint32_t)9U;
×
570
  Lib_IntVector_Intrinsics_vec128 *next9 = kex0 + klen * (uint32_t)10U;
×
571
  Lib_IntVector_Intrinsics_vec128
×
572
  v = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev9[0U], (uint8_t)0x36U);
×
573
  next9[0U] =
×
574
    Lib_IntVector_Intrinsics_vec128_shuffle32(v,
×
575
      (uint32_t)3U,
×
576
      (uint32_t)3U,
×
577
      (uint32_t)3U,
×
578
      (uint32_t)3U);
×
579
  Lib_IntVector_Intrinsics_vec128 key19 = prev9[0U];
×
580
  Lib_IntVector_Intrinsics_vec128
×
581
  key110 =
×
582
    Lib_IntVector_Intrinsics_vec128_xor(key19,
×
583
      Lib_IntVector_Intrinsics_vec128_shift_left(key19, (uint32_t)32U));
×
584
  Lib_IntVector_Intrinsics_vec128
×
585
  key28 =
×
586
    Lib_IntVector_Intrinsics_vec128_xor(key110,
×
587
      Lib_IntVector_Intrinsics_vec128_shift_left(key110, (uint32_t)32U));
×
588
  Lib_IntVector_Intrinsics_vec128
×
589
  key38 =
×
590
    Lib_IntVector_Intrinsics_vec128_xor(key28,
×
591
      Lib_IntVector_Intrinsics_vec128_shift_left(key28, (uint32_t)32U));
×
592
  next9[0U] = Lib_IntVector_Intrinsics_vec128_xor(next9[0U], key38);
×
593
  uint8_t nb[16U] = { 0U };
×
594
  memcpy(nb, n, (uint32_t)12U * sizeof (uint8_t));
×
595
  n10[0U] = Lib_IntVector_Intrinsics_vec128_load128_le(nb);
×
596
  uint32_t blocks64 = len / (uint32_t)64U;
×
597
  for (uint32_t i = (uint32_t)0U; i < blocks64; i++)
×
598
  {
×
599
    uint32_t ctr = c + i * (uint32_t)4U;
×
600
    uint8_t *ib = inp + i * (uint32_t)64U;
×
601
    uint8_t *ob = out + i * (uint32_t)64U;
×
602
    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[4U] KRML_POST_ALIGN(16) = { 0U };
×
603
    Lib_IntVector_Intrinsics_vec128 *kex = ctx + (uint32_t)1U;
×
604
    Lib_IntVector_Intrinsics_vec128 *n1 = ctx;
×
605
    uint32_t counter0 = htobe32(ctr);
×
606
    uint32_t counter1 = htobe32(ctr + (uint32_t)1U);
×
607
    uint32_t counter2 = htobe32(ctr + (uint32_t)2U);
×
608
    uint32_t counter3 = htobe32(ctr + (uint32_t)3U);
×
609
    Lib_IntVector_Intrinsics_vec128 nonce0 = n1[0U];
×
610
    st[0U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter0, (uint32_t)3U);
×
611
    st[1U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter1, (uint32_t)3U);
×
612
    st[2U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter2, (uint32_t)3U);
×
613
    st[3U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter3, (uint32_t)3U);
×
614
    uint32_t klen0 = (uint32_t)1U;
×
615
    Lib_IntVector_Intrinsics_vec128 *k0 = kex;
×
616
    Lib_IntVector_Intrinsics_vec128 *kr = kex + klen0;
×
617
    Lib_IntVector_Intrinsics_vec128 *kn = kex + (uint32_t)10U * klen0;
×
618
    st[0U] = Lib_IntVector_Intrinsics_vec128_xor(st[0U], k0[0U]);
×
619
    st[1U] = Lib_IntVector_Intrinsics_vec128_xor(st[1U], k0[0U]);
×
620
    st[2U] = Lib_IntVector_Intrinsics_vec128_xor(st[2U], k0[0U]);
×
621
    st[3U] = Lib_IntVector_Intrinsics_vec128_xor(st[3U], k0[0U]);
×
622
    KRML_MAYBE_FOR9(i0,
×
623
      (uint32_t)0U,
×
624
      (uint32_t)9U,
×
625
      (uint32_t)1U,
×
626
      Lib_IntVector_Intrinsics_vec128 *sub_key = kr + i0 * (uint32_t)1U;
×
627
      st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[0U], sub_key[0U]);
×
628
      st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[1U], sub_key[0U]);
×
629
      st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[2U], sub_key[0U]);
×
630
      st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[3U], sub_key[0U]););
×
631
    st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[0U], kn[0U]);
×
632
    st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[1U], kn[0U]);
×
633
    st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[2U], kn[0U]);
×
634
    st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[3U], kn[0U]);
×
635
    Lib_IntVector_Intrinsics_vec128 v00 = Lib_IntVector_Intrinsics_vec128_load128_le(ib);
×
636
    Lib_IntVector_Intrinsics_vec128
×
637
    v1 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)16U);
×
638
    Lib_IntVector_Intrinsics_vec128
×
639
    v2 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)32U);
×
640
    Lib_IntVector_Intrinsics_vec128
×
641
    v3 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)48U);
×
642
    Lib_IntVector_Intrinsics_vec128 v01 = Lib_IntVector_Intrinsics_vec128_xor(v00, st[0U]);
×
643
    Lib_IntVector_Intrinsics_vec128 v11 = Lib_IntVector_Intrinsics_vec128_xor(v1, st[1U]);
×
644
    Lib_IntVector_Intrinsics_vec128 v21 = Lib_IntVector_Intrinsics_vec128_xor(v2, st[2U]);
×
645
    Lib_IntVector_Intrinsics_vec128 v31 = Lib_IntVector_Intrinsics_vec128_xor(v3, st[3U]);
×
646
    Lib_IntVector_Intrinsics_vec128_store128_le(ob, v01);
×
647
    Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)16U, v11);
×
648
    Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)32U, v21);
×
649
    Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)48U, v31);
×
650
  }
×
651
  uint32_t rem = len % (uint32_t)64U;
×
652
  uint8_t last[64U] = { 0U };
×
653
  if (rem > (uint32_t)0U)
×
654
  {
×
655
    uint32_t ctr = c + blocks64 * (uint32_t)4U;
×
656
    uint8_t *ib = inp + blocks64 * (uint32_t)64U;
×
657
    uint8_t *ob = out + blocks64 * (uint32_t)64U;
×
658
    memcpy(last, ib, rem * sizeof (uint8_t));
×
659
    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[4U] KRML_POST_ALIGN(16) = { 0U };
×
660
    Lib_IntVector_Intrinsics_vec128 *kex = ctx + (uint32_t)1U;
×
661
    Lib_IntVector_Intrinsics_vec128 *n1 = ctx;
×
662
    uint32_t counter0 = htobe32(ctr);
×
663
    uint32_t counter1 = htobe32(ctr + (uint32_t)1U);
×
664
    uint32_t counter2 = htobe32(ctr + (uint32_t)2U);
×
665
    uint32_t counter3 = htobe32(ctr + (uint32_t)3U);
×
666
    Lib_IntVector_Intrinsics_vec128 nonce0 = n1[0U];
×
667
    st[0U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter0, (uint32_t)3U);
×
668
    st[1U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter1, (uint32_t)3U);
×
669
    st[2U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter2, (uint32_t)3U);
×
670
    st[3U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter3, (uint32_t)3U);
×
671
    uint32_t klen0 = (uint32_t)1U;
×
672
    Lib_IntVector_Intrinsics_vec128 *k0 = kex;
×
673
    Lib_IntVector_Intrinsics_vec128 *kr = kex + klen0;
×
674
    Lib_IntVector_Intrinsics_vec128 *kn = kex + (uint32_t)10U * klen0;
×
675
    st[0U] = Lib_IntVector_Intrinsics_vec128_xor(st[0U], k0[0U]);
×
676
    st[1U] = Lib_IntVector_Intrinsics_vec128_xor(st[1U], k0[0U]);
×
677
    st[2U] = Lib_IntVector_Intrinsics_vec128_xor(st[2U], k0[0U]);
×
678
    st[3U] = Lib_IntVector_Intrinsics_vec128_xor(st[3U], k0[0U]);
×
679
    KRML_MAYBE_FOR9(i,
×
680
      (uint32_t)0U,
×
681
      (uint32_t)9U,
×
682
      (uint32_t)1U,
×
683
      Lib_IntVector_Intrinsics_vec128 *sub_key = kr + i * (uint32_t)1U;
×
684
      st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[0U], sub_key[0U]);
×
685
      st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[1U], sub_key[0U]);
×
686
      st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[2U], sub_key[0U]);
×
687
      st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[3U], sub_key[0U]););
×
688
    st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[0U], kn[0U]);
×
689
    st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[1U], kn[0U]);
×
690
    st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[2U], kn[0U]);
×
691
    st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[3U], kn[0U]);
×
692
    Lib_IntVector_Intrinsics_vec128 v00 = Lib_IntVector_Intrinsics_vec128_load128_le(last);
×
693
    Lib_IntVector_Intrinsics_vec128
×
694
    v1 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)16U);
×
695
    Lib_IntVector_Intrinsics_vec128
×
696
    v2 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)32U);
×
697
    Lib_IntVector_Intrinsics_vec128
×
698
    v3 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)48U);
×
699
    Lib_IntVector_Intrinsics_vec128 v01 = Lib_IntVector_Intrinsics_vec128_xor(v00, st[0U]);
×
700
    Lib_IntVector_Intrinsics_vec128 v11 = Lib_IntVector_Intrinsics_vec128_xor(v1, st[1U]);
×
701
    Lib_IntVector_Intrinsics_vec128 v21 = Lib_IntVector_Intrinsics_vec128_xor(v2, st[2U]);
×
702
    Lib_IntVector_Intrinsics_vec128 v31 = Lib_IntVector_Intrinsics_vec128_xor(v3, st[3U]);
×
703
    Lib_IntVector_Intrinsics_vec128_store128_le(last, v01);
×
704
    Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)16U, v11);
×
705
    Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)32U, v21);
×
706
    Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)48U, v31);
×
707
    memcpy(ob, last, rem * sizeof (uint8_t));
×
708
  }
×
709
}
×
710

711
inline void
712
Hacl_AES_128_CTR32_NI_aes128_ctr_decrypt(
713
  uint32_t len,
714
  uint8_t *out,
715
  uint8_t *inp,
716
  uint8_t *k,
717
  uint8_t *n,
718
  uint32_t c
719
)
720
{
×
721
  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[12U] KRML_POST_ALIGN(16) = { 0U };
×
722
  Lib_IntVector_Intrinsics_vec128 *kex0 = ctx + (uint32_t)1U;
×
723
  Lib_IntVector_Intrinsics_vec128 *n10 = ctx;
×
724
  uint32_t klen = (uint32_t)1U;
×
725
  Lib_IntVector_Intrinsics_vec128 *uu____0 = kex0;
×
726
  uu____0[0U] = Lib_IntVector_Intrinsics_vec128_load128_le(k);
×
727
  Lib_IntVector_Intrinsics_vec128 *prev = kex0;
×
728
  Lib_IntVector_Intrinsics_vec128 *next = kex0 + klen;
×
729
  Lib_IntVector_Intrinsics_vec128
×
730
  v0 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev[0U], (uint8_t)0x01U);
×
731
  next[0U] =
×
732
    Lib_IntVector_Intrinsics_vec128_shuffle32(v0,
×
733
      (uint32_t)3U,
×
734
      (uint32_t)3U,
×
735
      (uint32_t)3U,
×
736
      (uint32_t)3U);
×
737
  Lib_IntVector_Intrinsics_vec128 key = prev[0U];
×
738
  Lib_IntVector_Intrinsics_vec128
×
739
  key1 =
×
740
    Lib_IntVector_Intrinsics_vec128_xor(key,
×
741
      Lib_IntVector_Intrinsics_vec128_shift_left(key, (uint32_t)32U));
×
742
  Lib_IntVector_Intrinsics_vec128
×
743
  key2 =
×
744
    Lib_IntVector_Intrinsics_vec128_xor(key1,
×
745
      Lib_IntVector_Intrinsics_vec128_shift_left(key1, (uint32_t)32U));
×
746
  Lib_IntVector_Intrinsics_vec128
×
747
  key3 =
×
748
    Lib_IntVector_Intrinsics_vec128_xor(key2,
×
749
      Lib_IntVector_Intrinsics_vec128_shift_left(key2, (uint32_t)32U));
×
750
  next[0U] = Lib_IntVector_Intrinsics_vec128_xor(next[0U], key3);
×
751
  Lib_IntVector_Intrinsics_vec128 *prev1 = kex0 + klen;
×
752
  Lib_IntVector_Intrinsics_vec128 *next1 = kex0 + (uint32_t)2U * klen;
×
753
  Lib_IntVector_Intrinsics_vec128
×
754
  v4 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev1[0U], (uint8_t)0x02U);
×
755
  next1[0U] =
×
756
    Lib_IntVector_Intrinsics_vec128_shuffle32(v4,
×
757
      (uint32_t)3U,
×
758
      (uint32_t)3U,
×
759
      (uint32_t)3U,
×
760
      (uint32_t)3U);
×
761
  Lib_IntVector_Intrinsics_vec128 key0 = prev1[0U];
×
762
  Lib_IntVector_Intrinsics_vec128
×
763
  key10 =
×
764
    Lib_IntVector_Intrinsics_vec128_xor(key0,
×
765
      Lib_IntVector_Intrinsics_vec128_shift_left(key0, (uint32_t)32U));
×
766
  Lib_IntVector_Intrinsics_vec128
×
767
  key20 =
×
768
    Lib_IntVector_Intrinsics_vec128_xor(key10,
×
769
      Lib_IntVector_Intrinsics_vec128_shift_left(key10, (uint32_t)32U));
×
770
  Lib_IntVector_Intrinsics_vec128
×
771
  key30 =
×
772
    Lib_IntVector_Intrinsics_vec128_xor(key20,
×
773
      Lib_IntVector_Intrinsics_vec128_shift_left(key20, (uint32_t)32U));
×
774
  next1[0U] = Lib_IntVector_Intrinsics_vec128_xor(next1[0U], key30);
×
775
  Lib_IntVector_Intrinsics_vec128 *prev2 = kex0 + klen * (uint32_t)2U;
×
776
  Lib_IntVector_Intrinsics_vec128 *next2 = kex0 + klen * (uint32_t)3U;
×
777
  Lib_IntVector_Intrinsics_vec128
×
778
  v5 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev2[0U], (uint8_t)0x04U);
×
779
  next2[0U] =
×
780
    Lib_IntVector_Intrinsics_vec128_shuffle32(v5,
×
781
      (uint32_t)3U,
×
782
      (uint32_t)3U,
×
783
      (uint32_t)3U,
×
784
      (uint32_t)3U);
×
785
  Lib_IntVector_Intrinsics_vec128 key4 = prev2[0U];
×
786
  Lib_IntVector_Intrinsics_vec128
×
787
  key11 =
×
788
    Lib_IntVector_Intrinsics_vec128_xor(key4,
×
789
      Lib_IntVector_Intrinsics_vec128_shift_left(key4, (uint32_t)32U));
×
790
  Lib_IntVector_Intrinsics_vec128
×
791
  key21 =
×
792
    Lib_IntVector_Intrinsics_vec128_xor(key11,
×
793
      Lib_IntVector_Intrinsics_vec128_shift_left(key11, (uint32_t)32U));
×
794
  Lib_IntVector_Intrinsics_vec128
×
795
  key31 =
×
796
    Lib_IntVector_Intrinsics_vec128_xor(key21,
×
797
      Lib_IntVector_Intrinsics_vec128_shift_left(key21, (uint32_t)32U));
×
798
  next2[0U] = Lib_IntVector_Intrinsics_vec128_xor(next2[0U], key31);
×
799
  Lib_IntVector_Intrinsics_vec128 *prev3 = kex0 + klen * (uint32_t)3U;
×
800
  Lib_IntVector_Intrinsics_vec128 *next3 = kex0 + klen * (uint32_t)4U;
×
801
  Lib_IntVector_Intrinsics_vec128
×
802
  v6 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev3[0U], (uint8_t)0x08U);
×
803
  next3[0U] =
×
804
    Lib_IntVector_Intrinsics_vec128_shuffle32(v6,
×
805
      (uint32_t)3U,
×
806
      (uint32_t)3U,
×
807
      (uint32_t)3U,
×
808
      (uint32_t)3U);
×
809
  Lib_IntVector_Intrinsics_vec128 key5 = prev3[0U];
×
810
  Lib_IntVector_Intrinsics_vec128
×
811
  key12 =
×
812
    Lib_IntVector_Intrinsics_vec128_xor(key5,
×
813
      Lib_IntVector_Intrinsics_vec128_shift_left(key5, (uint32_t)32U));
×
814
  Lib_IntVector_Intrinsics_vec128
×
815
  key22 =
×
816
    Lib_IntVector_Intrinsics_vec128_xor(key12,
×
817
      Lib_IntVector_Intrinsics_vec128_shift_left(key12, (uint32_t)32U));
×
818
  Lib_IntVector_Intrinsics_vec128
×
819
  key32 =
×
820
    Lib_IntVector_Intrinsics_vec128_xor(key22,
×
821
      Lib_IntVector_Intrinsics_vec128_shift_left(key22, (uint32_t)32U));
×
822
  next3[0U] = Lib_IntVector_Intrinsics_vec128_xor(next3[0U], key32);
×
823
  Lib_IntVector_Intrinsics_vec128 *prev4 = kex0 + klen * (uint32_t)4U;
×
824
  Lib_IntVector_Intrinsics_vec128 *next4 = kex0 + klen * (uint32_t)5U;
×
825
  Lib_IntVector_Intrinsics_vec128
×
826
  v7 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev4[0U], (uint8_t)0x10U);
×
827
  next4[0U] =
×
828
    Lib_IntVector_Intrinsics_vec128_shuffle32(v7,
×
829
      (uint32_t)3U,
×
830
      (uint32_t)3U,
×
831
      (uint32_t)3U,
×
832
      (uint32_t)3U);
×
833
  Lib_IntVector_Intrinsics_vec128 key6 = prev4[0U];
×
834
  Lib_IntVector_Intrinsics_vec128
×
835
  key13 =
×
836
    Lib_IntVector_Intrinsics_vec128_xor(key6,
×
837
      Lib_IntVector_Intrinsics_vec128_shift_left(key6, (uint32_t)32U));
×
838
  Lib_IntVector_Intrinsics_vec128
×
839
  key23 =
×
840
    Lib_IntVector_Intrinsics_vec128_xor(key13,
×
841
      Lib_IntVector_Intrinsics_vec128_shift_left(key13, (uint32_t)32U));
×
842
  Lib_IntVector_Intrinsics_vec128
×
843
  key33 =
×
844
    Lib_IntVector_Intrinsics_vec128_xor(key23,
×
845
      Lib_IntVector_Intrinsics_vec128_shift_left(key23, (uint32_t)32U));
×
846
  next4[0U] = Lib_IntVector_Intrinsics_vec128_xor(next4[0U], key33);
×
847
  Lib_IntVector_Intrinsics_vec128 *prev5 = kex0 + klen * (uint32_t)5U;
×
848
  Lib_IntVector_Intrinsics_vec128 *next5 = kex0 + klen * (uint32_t)6U;
×
849
  Lib_IntVector_Intrinsics_vec128
×
850
  v8 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev5[0U], (uint8_t)0x20U);
×
851
  next5[0U] =
×
852
    Lib_IntVector_Intrinsics_vec128_shuffle32(v8,
×
853
      (uint32_t)3U,
×
854
      (uint32_t)3U,
×
855
      (uint32_t)3U,
×
856
      (uint32_t)3U);
×
857
  Lib_IntVector_Intrinsics_vec128 key7 = prev5[0U];
×
858
  Lib_IntVector_Intrinsics_vec128
×
859
  key14 =
×
860
    Lib_IntVector_Intrinsics_vec128_xor(key7,
×
861
      Lib_IntVector_Intrinsics_vec128_shift_left(key7, (uint32_t)32U));
×
862
  Lib_IntVector_Intrinsics_vec128
×
863
  key24 =
×
864
    Lib_IntVector_Intrinsics_vec128_xor(key14,
×
865
      Lib_IntVector_Intrinsics_vec128_shift_left(key14, (uint32_t)32U));
×
866
  Lib_IntVector_Intrinsics_vec128
×
867
  key34 =
×
868
    Lib_IntVector_Intrinsics_vec128_xor(key24,
×
869
      Lib_IntVector_Intrinsics_vec128_shift_left(key24, (uint32_t)32U));
×
870
  next5[0U] = Lib_IntVector_Intrinsics_vec128_xor(next5[0U], key34);
×
871
  Lib_IntVector_Intrinsics_vec128 *prev6 = kex0 + klen * (uint32_t)6U;
×
872
  Lib_IntVector_Intrinsics_vec128 *next6 = kex0 + klen * (uint32_t)7U;
×
873
  Lib_IntVector_Intrinsics_vec128
×
874
  v9 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev6[0U], (uint8_t)0x40U);
×
875
  next6[0U] =
×
876
    Lib_IntVector_Intrinsics_vec128_shuffle32(v9,
×
877
      (uint32_t)3U,
×
878
      (uint32_t)3U,
×
879
      (uint32_t)3U,
×
880
      (uint32_t)3U);
×
881
  Lib_IntVector_Intrinsics_vec128 key8 = prev6[0U];
×
882
  Lib_IntVector_Intrinsics_vec128
×
883
  key15 =
×
884
    Lib_IntVector_Intrinsics_vec128_xor(key8,
×
885
      Lib_IntVector_Intrinsics_vec128_shift_left(key8, (uint32_t)32U));
×
886
  Lib_IntVector_Intrinsics_vec128
×
887
  key25 =
×
888
    Lib_IntVector_Intrinsics_vec128_xor(key15,
×
889
      Lib_IntVector_Intrinsics_vec128_shift_left(key15, (uint32_t)32U));
×
890
  Lib_IntVector_Intrinsics_vec128
×
891
  key35 =
×
892
    Lib_IntVector_Intrinsics_vec128_xor(key25,
×
893
      Lib_IntVector_Intrinsics_vec128_shift_left(key25, (uint32_t)32U));
×
894
  next6[0U] = Lib_IntVector_Intrinsics_vec128_xor(next6[0U], key35);
×
895
  Lib_IntVector_Intrinsics_vec128 *prev7 = kex0 + klen * (uint32_t)7U;
×
896
  Lib_IntVector_Intrinsics_vec128 *next7 = kex0 + klen * (uint32_t)8U;
×
897
  Lib_IntVector_Intrinsics_vec128
×
898
  v10 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev7[0U], (uint8_t)0x80U);
×
899
  next7[0U] =
×
900
    Lib_IntVector_Intrinsics_vec128_shuffle32(v10,
×
901
      (uint32_t)3U,
×
902
      (uint32_t)3U,
×
903
      (uint32_t)3U,
×
904
      (uint32_t)3U);
×
905
  Lib_IntVector_Intrinsics_vec128 key9 = prev7[0U];
×
906
  Lib_IntVector_Intrinsics_vec128
×
907
  key16 =
×
908
    Lib_IntVector_Intrinsics_vec128_xor(key9,
×
909
      Lib_IntVector_Intrinsics_vec128_shift_left(key9, (uint32_t)32U));
×
910
  Lib_IntVector_Intrinsics_vec128
×
911
  key26 =
×
912
    Lib_IntVector_Intrinsics_vec128_xor(key16,
×
913
      Lib_IntVector_Intrinsics_vec128_shift_left(key16, (uint32_t)32U));
×
914
  Lib_IntVector_Intrinsics_vec128
×
915
  key36 =
×
916
    Lib_IntVector_Intrinsics_vec128_xor(key26,
×
917
      Lib_IntVector_Intrinsics_vec128_shift_left(key26, (uint32_t)32U));
×
918
  next7[0U] = Lib_IntVector_Intrinsics_vec128_xor(next7[0U], key36);
×
919
  Lib_IntVector_Intrinsics_vec128 *prev8 = kex0 + klen * (uint32_t)8U;
×
920
  Lib_IntVector_Intrinsics_vec128 *next8 = kex0 + klen * (uint32_t)9U;
×
921
  Lib_IntVector_Intrinsics_vec128
×
922
  v12 = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev8[0U], (uint8_t)0x1bU);
×
923
  next8[0U] =
×
924
    Lib_IntVector_Intrinsics_vec128_shuffle32(v12,
×
925
      (uint32_t)3U,
×
926
      (uint32_t)3U,
×
927
      (uint32_t)3U,
×
928
      (uint32_t)3U);
×
929
  Lib_IntVector_Intrinsics_vec128 key17 = prev8[0U];
×
930
  Lib_IntVector_Intrinsics_vec128
×
931
  key18 =
×
932
    Lib_IntVector_Intrinsics_vec128_xor(key17,
×
933
      Lib_IntVector_Intrinsics_vec128_shift_left(key17, (uint32_t)32U));
×
934
  Lib_IntVector_Intrinsics_vec128
×
935
  key27 =
×
936
    Lib_IntVector_Intrinsics_vec128_xor(key18,
×
937
      Lib_IntVector_Intrinsics_vec128_shift_left(key18, (uint32_t)32U));
×
938
  Lib_IntVector_Intrinsics_vec128
×
939
  key37 =
×
940
    Lib_IntVector_Intrinsics_vec128_xor(key27,
×
941
      Lib_IntVector_Intrinsics_vec128_shift_left(key27, (uint32_t)32U));
×
942
  next8[0U] = Lib_IntVector_Intrinsics_vec128_xor(next8[0U], key37);
×
943
  Lib_IntVector_Intrinsics_vec128 *prev9 = kex0 + klen * (uint32_t)9U;
×
944
  Lib_IntVector_Intrinsics_vec128 *next9 = kex0 + klen * (uint32_t)10U;
×
945
  Lib_IntVector_Intrinsics_vec128
×
946
  v = Lib_IntVector_Intrinsics_ni_aes_keygen_assist(prev9[0U], (uint8_t)0x36U);
×
947
  next9[0U] =
×
948
    Lib_IntVector_Intrinsics_vec128_shuffle32(v,
×
949
      (uint32_t)3U,
×
950
      (uint32_t)3U,
×
951
      (uint32_t)3U,
×
952
      (uint32_t)3U);
×
953
  Lib_IntVector_Intrinsics_vec128 key19 = prev9[0U];
×
954
  Lib_IntVector_Intrinsics_vec128
×
955
  key110 =
×
956
    Lib_IntVector_Intrinsics_vec128_xor(key19,
×
957
      Lib_IntVector_Intrinsics_vec128_shift_left(key19, (uint32_t)32U));
×
958
  Lib_IntVector_Intrinsics_vec128
×
959
  key28 =
×
960
    Lib_IntVector_Intrinsics_vec128_xor(key110,
×
961
      Lib_IntVector_Intrinsics_vec128_shift_left(key110, (uint32_t)32U));
×
962
  Lib_IntVector_Intrinsics_vec128
×
963
  key38 =
×
964
    Lib_IntVector_Intrinsics_vec128_xor(key28,
×
965
      Lib_IntVector_Intrinsics_vec128_shift_left(key28, (uint32_t)32U));
×
966
  next9[0U] = Lib_IntVector_Intrinsics_vec128_xor(next9[0U], key38);
×
967
  uint8_t nb[16U] = { 0U };
×
968
  memcpy(nb, n, (uint32_t)12U * sizeof (uint8_t));
×
969
  n10[0U] = Lib_IntVector_Intrinsics_vec128_load128_le(nb);
×
970
  uint32_t blocks64 = len / (uint32_t)64U;
×
971
  for (uint32_t i = (uint32_t)0U; i < blocks64; i++)
×
972
  {
×
973
    uint32_t ctr = c + i * (uint32_t)4U;
×
974
    uint8_t *ib = inp + i * (uint32_t)64U;
×
975
    uint8_t *ob = out + i * (uint32_t)64U;
×
976
    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[4U] KRML_POST_ALIGN(16) = { 0U };
×
977
    Lib_IntVector_Intrinsics_vec128 *kex = ctx + (uint32_t)1U;
×
978
    Lib_IntVector_Intrinsics_vec128 *n1 = ctx;
×
979
    uint32_t counter0 = htobe32(ctr);
×
980
    uint32_t counter1 = htobe32(ctr + (uint32_t)1U);
×
981
    uint32_t counter2 = htobe32(ctr + (uint32_t)2U);
×
982
    uint32_t counter3 = htobe32(ctr + (uint32_t)3U);
×
983
    Lib_IntVector_Intrinsics_vec128 nonce0 = n1[0U];
×
984
    st[0U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter0, (uint32_t)3U);
×
985
    st[1U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter1, (uint32_t)3U);
×
986
    st[2U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter2, (uint32_t)3U);
×
987
    st[3U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter3, (uint32_t)3U);
×
988
    uint32_t klen0 = (uint32_t)1U;
×
989
    Lib_IntVector_Intrinsics_vec128 *k0 = kex;
×
990
    Lib_IntVector_Intrinsics_vec128 *kr = kex + klen0;
×
991
    Lib_IntVector_Intrinsics_vec128 *kn = kex + (uint32_t)10U * klen0;
×
992
    st[0U] = Lib_IntVector_Intrinsics_vec128_xor(st[0U], k0[0U]);
×
993
    st[1U] = Lib_IntVector_Intrinsics_vec128_xor(st[1U], k0[0U]);
×
994
    st[2U] = Lib_IntVector_Intrinsics_vec128_xor(st[2U], k0[0U]);
×
995
    st[3U] = Lib_IntVector_Intrinsics_vec128_xor(st[3U], k0[0U]);
×
996
    KRML_MAYBE_FOR9(i0,
×
997
      (uint32_t)0U,
×
998
      (uint32_t)9U,
×
999
      (uint32_t)1U,
×
1000
      Lib_IntVector_Intrinsics_vec128 *sub_key = kr + i0 * (uint32_t)1U;
×
1001
      st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[0U], sub_key[0U]);
×
1002
      st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[1U], sub_key[0U]);
×
1003
      st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[2U], sub_key[0U]);
×
1004
      st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[3U], sub_key[0U]););
×
1005
    st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[0U], kn[0U]);
×
1006
    st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[1U], kn[0U]);
×
1007
    st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[2U], kn[0U]);
×
1008
    st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[3U], kn[0U]);
×
1009
    Lib_IntVector_Intrinsics_vec128 v00 = Lib_IntVector_Intrinsics_vec128_load128_le(ib);
×
1010
    Lib_IntVector_Intrinsics_vec128
×
1011
    v1 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)16U);
×
1012
    Lib_IntVector_Intrinsics_vec128
×
1013
    v2 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)32U);
×
1014
    Lib_IntVector_Intrinsics_vec128
×
1015
    v3 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)48U);
×
1016
    Lib_IntVector_Intrinsics_vec128 v01 = Lib_IntVector_Intrinsics_vec128_xor(v00, st[0U]);
×
1017
    Lib_IntVector_Intrinsics_vec128 v11 = Lib_IntVector_Intrinsics_vec128_xor(v1, st[1U]);
×
1018
    Lib_IntVector_Intrinsics_vec128 v21 = Lib_IntVector_Intrinsics_vec128_xor(v2, st[2U]);
×
1019
    Lib_IntVector_Intrinsics_vec128 v31 = Lib_IntVector_Intrinsics_vec128_xor(v3, st[3U]);
×
1020
    Lib_IntVector_Intrinsics_vec128_store128_le(ob, v01);
×
1021
    Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)16U, v11);
×
1022
    Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)32U, v21);
×
1023
    Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)48U, v31);
×
1024
  }
×
1025
  uint32_t rem = len % (uint32_t)64U;
×
1026
  uint8_t last[64U] = { 0U };
×
1027
  if (rem > (uint32_t)0U)
×
1028
  {
×
1029
    uint32_t ctr = c + blocks64 * (uint32_t)4U;
×
1030
    uint8_t *ib = inp + blocks64 * (uint32_t)64U;
×
1031
    uint8_t *ob = out + blocks64 * (uint32_t)64U;
×
1032
    memcpy(last, ib, rem * sizeof (uint8_t));
×
1033
    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[4U] KRML_POST_ALIGN(16) = { 0U };
×
1034
    Lib_IntVector_Intrinsics_vec128 *kex = ctx + (uint32_t)1U;
×
1035
    Lib_IntVector_Intrinsics_vec128 *n1 = ctx;
×
1036
    uint32_t counter0 = htobe32(ctr);
×
1037
    uint32_t counter1 = htobe32(ctr + (uint32_t)1U);
×
1038
    uint32_t counter2 = htobe32(ctr + (uint32_t)2U);
×
1039
    uint32_t counter3 = htobe32(ctr + (uint32_t)3U);
×
1040
    Lib_IntVector_Intrinsics_vec128 nonce0 = n1[0U];
×
1041
    st[0U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter0, (uint32_t)3U);
×
1042
    st[1U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter1, (uint32_t)3U);
×
1043
    st[2U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter2, (uint32_t)3U);
×
1044
    st[3U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter3, (uint32_t)3U);
×
1045
    uint32_t klen0 = (uint32_t)1U;
×
1046
    Lib_IntVector_Intrinsics_vec128 *k0 = kex;
×
1047
    Lib_IntVector_Intrinsics_vec128 *kr = kex + klen0;
×
1048
    Lib_IntVector_Intrinsics_vec128 *kn = kex + (uint32_t)10U * klen0;
×
1049
    st[0U] = Lib_IntVector_Intrinsics_vec128_xor(st[0U], k0[0U]);
×
1050
    st[1U] = Lib_IntVector_Intrinsics_vec128_xor(st[1U], k0[0U]);
×
1051
    st[2U] = Lib_IntVector_Intrinsics_vec128_xor(st[2U], k0[0U]);
×
1052
    st[3U] = Lib_IntVector_Intrinsics_vec128_xor(st[3U], k0[0U]);
×
1053
    KRML_MAYBE_FOR9(i,
×
1054
      (uint32_t)0U,
×
1055
      (uint32_t)9U,
×
1056
      (uint32_t)1U,
×
1057
      Lib_IntVector_Intrinsics_vec128 *sub_key = kr + i * (uint32_t)1U;
×
1058
      st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[0U], sub_key[0U]);
×
1059
      st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[1U], sub_key[0U]);
×
1060
      st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[2U], sub_key[0U]);
×
1061
      st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[3U], sub_key[0U]););
×
1062
    st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[0U], kn[0U]);
×
1063
    st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[1U], kn[0U]);
×
1064
    st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[2U], kn[0U]);
×
1065
    st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[3U], kn[0U]);
×
1066
    Lib_IntVector_Intrinsics_vec128 v00 = Lib_IntVector_Intrinsics_vec128_load128_le(last);
×
1067
    Lib_IntVector_Intrinsics_vec128
×
1068
    v1 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)16U);
×
1069
    Lib_IntVector_Intrinsics_vec128
×
1070
    v2 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)32U);
×
1071
    Lib_IntVector_Intrinsics_vec128
×
1072
    v3 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)48U);
×
1073
    Lib_IntVector_Intrinsics_vec128 v01 = Lib_IntVector_Intrinsics_vec128_xor(v00, st[0U]);
×
1074
    Lib_IntVector_Intrinsics_vec128 v11 = Lib_IntVector_Intrinsics_vec128_xor(v1, st[1U]);
×
1075
    Lib_IntVector_Intrinsics_vec128 v21 = Lib_IntVector_Intrinsics_vec128_xor(v2, st[2U]);
×
1076
    Lib_IntVector_Intrinsics_vec128 v31 = Lib_IntVector_Intrinsics_vec128_xor(v3, st[3U]);
×
1077
    Lib_IntVector_Intrinsics_vec128_store128_le(last, v01);
×
1078
    Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)16U, v11);
×
1079
    Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)32U, v21);
×
1080
    Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)48U, v31);
×
1081
    memcpy(ob, last, rem * sizeof (uint8_t));
×
1082
  }
×
1083
}
×
1084

STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc