• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

cryspen / hacl-packages / 5808703668

pending completion
5808703668

Pull #418

github

web-flow
Merge 4abdd0203 into 1575f26e8
Pull Request #418: Add support for Hacl_AES_128_GCM_NI and Hacl_AES_128_GCM_M32

7433 of 7433 new or added lines in 12 files covered. (100.0%)

31975 of 62256 relevant lines covered (51.36%)

1238863.46 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/src/Hacl_AES_128_GCM_NI.c
1
/* MIT License
2
 *
3
 * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
4
 * Copyright (c) 2022-2023 HACL* Contributors
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in all
14
 * copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
 * SOFTWARE.
23
 */
24

25

26
#include "Hacl_AES_128_GCM_NI.h"
27

28
void Hacl_AES_128_GCM_NI_aes128_gcm_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key)
29
{
×
30
  uint8_t gcm_key[16U] = { 0U };
×
31
  uint8_t nonce0[12U] = { 0U };
×
32
  Lib_IntVector_Intrinsics_vec128 *aes_ctx = ctx;
×
33
  Lib_IntVector_Intrinsics_vec128 *gcm_ctx = ctx + (uint32_t)12U;
×
34
  Hacl_AES_128_CTR32_NI_aes128_init(aes_ctx, key, nonce0);
×
35
  Hacl_AES_128_CTR32_NI_aes128_key_block(gcm_key, aes_ctx, (uint32_t)0U);
×
36
  Hacl_Gf128_NI_gcm_init(gcm_ctx, gcm_key);
×
37
}
×
38

39
void
40
Hacl_AES_128_GCM_NI_aes128_gcm_encrypt(
41
  Lib_IntVector_Intrinsics_vec128 *ctx,
42
  uint32_t len,
43
  uint8_t *out,
44
  uint8_t *text,
45
  uint32_t aad_len,
46
  uint8_t *aad,
47
  uint32_t iv_len,
48
  uint8_t *iv
49
)
50
{
×
51
  uint32_t ctr;
×
52
  uint8_t tag_mix0[16U] = { 0U };
×
53
  uint8_t gcm_key[16U] = { 0U };
×
54
  uint8_t tag_iv[16U] = { 0U };
×
55
  uint8_t size_iv[16U] = { 0U };
×
56
  uint8_t tag_mix1[16U] = { 0U };
×
57
  if (iv_len == (uint32_t)12U)
×
58
  {
×
59
    Lib_IntVector_Intrinsics_vec128 *aes_ctx = ctx;
×
60
    Hacl_AES_128_CTR32_NI_aes128_set_nonce(aes_ctx, iv);
×
61
    Hacl_AES_128_CTR32_NI_aes128_key_block(tag_mix0, aes_ctx, (uint32_t)1U);
×
62
    ctx[17U] = Lib_IntVector_Intrinsics_vec128_load128_le(tag_mix0);
×
63
    ctr = (uint32_t)2U;
×
64
  }
×
65
  else
×
66
  {
×
67
    Lib_IntVector_Intrinsics_vec128 *aes_ctx = ctx;
×
68
    Lib_IntVector_Intrinsics_vec128 *gcm_ctx = ctx + (uint32_t)12U;
×
69
    Lib_IntVector_Intrinsics_vec128_store_be(gcm_key, gcm_ctx[4U]);
×
70
    Hacl_Gf128_NI_ghash(tag_iv, iv_len, iv, gcm_key);
×
71
    store64_be(size_iv + (uint32_t)8U, (uint64_t)(iv_len * (uint32_t)8U));
×
72
    KRML_MAYBE_FOR16(i,
×
73
      (uint32_t)0U,
×
74
      (uint32_t)16U,
×
75
      (uint32_t)1U,
×
76
      size_iv[i] = tag_iv[i] ^ size_iv[i];);
×
77
    Hacl_Gf128_NI_ghash(tag_iv, (uint32_t)16U, size_iv, gcm_key);
×
78
    Hacl_AES_128_CTR32_NI_aes128_set_nonce(aes_ctx, tag_iv);
×
79
    uint32_t u = load32_be(tag_iv + (uint32_t)12U);
×
80
    uint32_t ctr0 = u;
×
81
    Hacl_AES_128_CTR32_NI_aes128_key_block(tag_mix1, aes_ctx, ctr0);
×
82
    ctx[17U] = Lib_IntVector_Intrinsics_vec128_load128_le(tag_mix1);
×
83
    ctr = ctr0 + (uint32_t)1U;
×
84
  }
×
85
  uint8_t *cip = out;
×
86
  Lib_IntVector_Intrinsics_vec128 *aes_ctx = ctx;
×
87
  uint32_t blocks64 = len / (uint32_t)64U;
×
88
  for (uint32_t i = (uint32_t)0U; i < blocks64; i++)
×
89
  {
×
90
    uint32_t ctr1 = ctr + i * (uint32_t)4U;
×
91
    uint8_t *ib = text + i * (uint32_t)64U;
×
92
    uint8_t *ob = cip + i * (uint32_t)64U;
×
93
    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[4U] KRML_POST_ALIGN(16) = { 0U };
×
94
    Lib_IntVector_Intrinsics_vec128 *kex = aes_ctx + (uint32_t)1U;
×
95
    Lib_IntVector_Intrinsics_vec128 *n = aes_ctx;
×
96
    uint32_t counter0 = htobe32(ctr1);
×
97
    uint32_t counter1 = htobe32(ctr1 + (uint32_t)1U);
×
98
    uint32_t counter2 = htobe32(ctr1 + (uint32_t)2U);
×
99
    uint32_t counter3 = htobe32(ctr1 + (uint32_t)3U);
×
100
    Lib_IntVector_Intrinsics_vec128 nonce0 = n[0U];
×
101
    st[0U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter0, (uint32_t)3U);
×
102
    st[1U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter1, (uint32_t)3U);
×
103
    st[2U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter2, (uint32_t)3U);
×
104
    st[3U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter3, (uint32_t)3U);
×
105
    uint32_t klen = (uint32_t)1U;
×
106
    Lib_IntVector_Intrinsics_vec128 *k0 = kex;
×
107
    Lib_IntVector_Intrinsics_vec128 *kr = kex + klen;
×
108
    Lib_IntVector_Intrinsics_vec128 *kn = kex + (uint32_t)10U * klen;
×
109
    st[0U] = Lib_IntVector_Intrinsics_vec128_xor(st[0U], k0[0U]);
×
110
    st[1U] = Lib_IntVector_Intrinsics_vec128_xor(st[1U], k0[0U]);
×
111
    st[2U] = Lib_IntVector_Intrinsics_vec128_xor(st[2U], k0[0U]);
×
112
    st[3U] = Lib_IntVector_Intrinsics_vec128_xor(st[3U], k0[0U]);
×
113
    KRML_MAYBE_FOR9(i0,
×
114
      (uint32_t)0U,
×
115
      (uint32_t)9U,
×
116
      (uint32_t)1U,
×
117
      Lib_IntVector_Intrinsics_vec128 *sub_key = kr + i0 * (uint32_t)1U;
×
118
      st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[0U], sub_key[0U]);
×
119
      st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[1U], sub_key[0U]);
×
120
      st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[2U], sub_key[0U]);
×
121
      st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[3U], sub_key[0U]););
×
122
    st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[0U], kn[0U]);
×
123
    st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[1U], kn[0U]);
×
124
    st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[2U], kn[0U]);
×
125
    st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[3U], kn[0U]);
×
126
    Lib_IntVector_Intrinsics_vec128 v0 = Lib_IntVector_Intrinsics_vec128_load128_le(ib);
×
127
    Lib_IntVector_Intrinsics_vec128
×
128
    v1 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)16U);
×
129
    Lib_IntVector_Intrinsics_vec128
×
130
    v2 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)32U);
×
131
    Lib_IntVector_Intrinsics_vec128
×
132
    v3 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)48U);
×
133
    Lib_IntVector_Intrinsics_vec128 v01 = Lib_IntVector_Intrinsics_vec128_xor(v0, st[0U]);
×
134
    Lib_IntVector_Intrinsics_vec128 v11 = Lib_IntVector_Intrinsics_vec128_xor(v1, st[1U]);
×
135
    Lib_IntVector_Intrinsics_vec128 v21 = Lib_IntVector_Intrinsics_vec128_xor(v2, st[2U]);
×
136
    Lib_IntVector_Intrinsics_vec128 v31 = Lib_IntVector_Intrinsics_vec128_xor(v3, st[3U]);
×
137
    Lib_IntVector_Intrinsics_vec128_store128_le(ob, v01);
×
138
    Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)16U, v11);
×
139
    Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)32U, v21);
×
140
    Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)48U, v31);
×
141
  }
×
142
  uint32_t rem = len % (uint32_t)64U;
×
143
  uint8_t last[64U] = { 0U };
×
144
  if (rem > (uint32_t)0U)
×
145
  {
×
146
    uint32_t ctr1 = ctr + blocks64 * (uint32_t)4U;
×
147
    uint8_t *ib = text + blocks64 * (uint32_t)64U;
×
148
    uint8_t *ob = cip + blocks64 * (uint32_t)64U;
×
149
    memcpy(last, ib, rem * sizeof (uint8_t));
×
150
    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[4U] KRML_POST_ALIGN(16) = { 0U };
×
151
    Lib_IntVector_Intrinsics_vec128 *kex = aes_ctx + (uint32_t)1U;
×
152
    Lib_IntVector_Intrinsics_vec128 *n = aes_ctx;
×
153
    uint32_t counter0 = htobe32(ctr1);
×
154
    uint32_t counter1 = htobe32(ctr1 + (uint32_t)1U);
×
155
    uint32_t counter2 = htobe32(ctr1 + (uint32_t)2U);
×
156
    uint32_t counter3 = htobe32(ctr1 + (uint32_t)3U);
×
157
    Lib_IntVector_Intrinsics_vec128 nonce0 = n[0U];
×
158
    st[0U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter0, (uint32_t)3U);
×
159
    st[1U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter1, (uint32_t)3U);
×
160
    st[2U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter2, (uint32_t)3U);
×
161
    st[3U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter3, (uint32_t)3U);
×
162
    uint32_t klen = (uint32_t)1U;
×
163
    Lib_IntVector_Intrinsics_vec128 *k0 = kex;
×
164
    Lib_IntVector_Intrinsics_vec128 *kr = kex + klen;
×
165
    Lib_IntVector_Intrinsics_vec128 *kn = kex + (uint32_t)10U * klen;
×
166
    st[0U] = Lib_IntVector_Intrinsics_vec128_xor(st[0U], k0[0U]);
×
167
    st[1U] = Lib_IntVector_Intrinsics_vec128_xor(st[1U], k0[0U]);
×
168
    st[2U] = Lib_IntVector_Intrinsics_vec128_xor(st[2U], k0[0U]);
×
169
    st[3U] = Lib_IntVector_Intrinsics_vec128_xor(st[3U], k0[0U]);
×
170
    KRML_MAYBE_FOR9(i,
×
171
      (uint32_t)0U,
×
172
      (uint32_t)9U,
×
173
      (uint32_t)1U,
×
174
      Lib_IntVector_Intrinsics_vec128 *sub_key = kr + i * (uint32_t)1U;
×
175
      st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[0U], sub_key[0U]);
×
176
      st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[1U], sub_key[0U]);
×
177
      st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[2U], sub_key[0U]);
×
178
      st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[3U], sub_key[0U]););
×
179
    st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[0U], kn[0U]);
×
180
    st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[1U], kn[0U]);
×
181
    st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[2U], kn[0U]);
×
182
    st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[3U], kn[0U]);
×
183
    Lib_IntVector_Intrinsics_vec128 v0 = Lib_IntVector_Intrinsics_vec128_load128_le(last);
×
184
    Lib_IntVector_Intrinsics_vec128
×
185
    v1 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)16U);
×
186
    Lib_IntVector_Intrinsics_vec128
×
187
    v2 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)32U);
×
188
    Lib_IntVector_Intrinsics_vec128
×
189
    v3 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)48U);
×
190
    Lib_IntVector_Intrinsics_vec128 v01 = Lib_IntVector_Intrinsics_vec128_xor(v0, st[0U]);
×
191
    Lib_IntVector_Intrinsics_vec128 v11 = Lib_IntVector_Intrinsics_vec128_xor(v1, st[1U]);
×
192
    Lib_IntVector_Intrinsics_vec128 v21 = Lib_IntVector_Intrinsics_vec128_xor(v2, st[2U]);
×
193
    Lib_IntVector_Intrinsics_vec128 v31 = Lib_IntVector_Intrinsics_vec128_xor(v3, st[3U]);
×
194
    Lib_IntVector_Intrinsics_vec128_store128_le(last, v01);
×
195
    Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)16U, v11);
×
196
    Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)32U, v21);
×
197
    Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)48U, v31);
×
198
    memcpy(ob, last, rem * sizeof (uint8_t));
×
199
  }
×
200
  Lib_IntVector_Intrinsics_vec128 *gcm_ctx = ctx + (uint32_t)12U;
×
201
  Lib_IntVector_Intrinsics_vec128 tag_mix = ctx[17U];
×
202
  gcm_ctx[0U] = Lib_IntVector_Intrinsics_vec128_zero;
×
203
  Hacl_Gf128_NI_gcm_update_padded(gcm_ctx, aad_len, aad);
×
204
  Hacl_Gf128_NI_gcm_update_padded(gcm_ctx, len, cip);
×
205
  uint8_t tmp[16U] = { 0U };
×
206
  store64_be(tmp, (uint64_t)(aad_len * (uint32_t)8U));
×
207
  store64_be(tmp + (uint32_t)8U, (uint64_t)(len * (uint32_t)8U));
×
208
  Hacl_Gf128_NI_gcm_update_blocks(gcm_ctx, (uint32_t)16U, tmp);
×
209
  Hacl_Gf128_NI_gcm_emit(tmp, gcm_ctx);
×
210
  Lib_IntVector_Intrinsics_vec128 tmp_vec = Lib_IntVector_Intrinsics_vec128_load128_le(tmp);
×
211
  Lib_IntVector_Intrinsics_vec128
×
212
  tmp_vec1 = Lib_IntVector_Intrinsics_vec128_xor(tmp_vec, tag_mix);
×
213
  Lib_IntVector_Intrinsics_vec128_store128_le(out + len, tmp_vec1);
×
214
}
×
215

216
bool
217
Hacl_AES_128_GCM_NI_aes128_gcm_decrypt(
218
  Lib_IntVector_Intrinsics_vec128 *ctx,
219
  uint32_t len,
220
  uint8_t *out,
221
  uint8_t *cipher,
222
  uint32_t aad_len,
223
  uint8_t *aad,
224
  uint32_t iv_len,
225
  uint8_t *iv
226
)
227
{
×
228
  uint8_t scratch[18U] = { 0U };
×
229
  uint8_t *text = scratch;
×
230
  uint8_t *result = scratch + (uint32_t)17U;
×
231
  uint8_t *ciphertext = cipher;
×
232
  uint8_t *tag = cipher + len;
×
233
  uint32_t ctr;
×
234
  uint8_t tag_mix0[16U] = { 0U };
×
235
  uint8_t gcm_key[16U] = { 0U };
×
236
  uint8_t tag_iv[16U] = { 0U };
×
237
  uint8_t size_iv[16U] = { 0U };
×
238
  uint8_t tag_mix1[16U] = { 0U };
×
239
  if (iv_len == (uint32_t)12U)
×
240
  {
×
241
    Lib_IntVector_Intrinsics_vec128 *aes_ctx = ctx;
×
242
    Hacl_AES_128_CTR32_NI_aes128_set_nonce(aes_ctx, iv);
×
243
    Hacl_AES_128_CTR32_NI_aes128_key_block(tag_mix0, aes_ctx, (uint32_t)1U);
×
244
    ctx[17U] = Lib_IntVector_Intrinsics_vec128_load128_le(tag_mix0);
×
245
    ctr = (uint32_t)2U;
×
246
  }
×
247
  else
×
248
  {
×
249
    Lib_IntVector_Intrinsics_vec128 *aes_ctx = ctx;
×
250
    Lib_IntVector_Intrinsics_vec128 *gcm_ctx = ctx + (uint32_t)12U;
×
251
    Lib_IntVector_Intrinsics_vec128_store_be(gcm_key, gcm_ctx[4U]);
×
252
    Hacl_Gf128_NI_ghash(tag_iv, iv_len, iv, gcm_key);
×
253
    store64_be(size_iv + (uint32_t)8U, (uint64_t)(iv_len * (uint32_t)8U));
×
254
    KRML_MAYBE_FOR16(i,
×
255
      (uint32_t)0U,
×
256
      (uint32_t)16U,
×
257
      (uint32_t)1U,
×
258
      size_iv[i] = tag_iv[i] ^ size_iv[i];);
×
259
    Hacl_Gf128_NI_ghash(tag_iv, (uint32_t)16U, size_iv, gcm_key);
×
260
    Hacl_AES_128_CTR32_NI_aes128_set_nonce(aes_ctx, tag_iv);
×
261
    uint32_t u = load32_be(tag_iv + (uint32_t)12U);
×
262
    uint32_t ctr0 = u;
×
263
    Hacl_AES_128_CTR32_NI_aes128_key_block(tag_mix1, aes_ctx, ctr0);
×
264
    ctx[17U] = Lib_IntVector_Intrinsics_vec128_load128_le(tag_mix1);
×
265
    ctr = ctr0 + (uint32_t)1U;
×
266
  }
×
267
  Lib_IntVector_Intrinsics_vec128 *aes_ctx = ctx;
×
268
  Lib_IntVector_Intrinsics_vec128 *gcm_ctx = ctx + (uint32_t)12U;
×
269
  Lib_IntVector_Intrinsics_vec128 tag_mix = ctx[17U];
×
270
  gcm_ctx[0U] = Lib_IntVector_Intrinsics_vec128_zero;
×
271
  Hacl_Gf128_NI_gcm_update_padded(gcm_ctx, aad_len, aad);
×
272
  Hacl_Gf128_NI_gcm_update_padded(gcm_ctx, len, ciphertext);
×
273
  store64_be(text, (uint64_t)(aad_len * (uint32_t)8U));
×
274
  store64_be(text + (uint32_t)8U, (uint64_t)(len * (uint32_t)8U));
×
275
  Hacl_Gf128_NI_gcm_update_blocks(gcm_ctx, (uint32_t)16U, text);
×
276
  Hacl_Gf128_NI_gcm_emit(text, gcm_ctx);
×
277
  Lib_IntVector_Intrinsics_vec128 text_vec = Lib_IntVector_Intrinsics_vec128_load128_le(text);
×
278
  Lib_IntVector_Intrinsics_vec128
×
279
  text_vec1 = Lib_IntVector_Intrinsics_vec128_xor(text_vec, tag_mix);
×
280
  Lib_IntVector_Intrinsics_vec128_store128_le(text, text_vec1);
×
281
  KRML_MAYBE_FOR16(i,
×
282
    (uint32_t)0U,
×
283
    (uint32_t)16U,
×
284
    (uint32_t)1U,
×
285
    result[0U] = result[0U] | (text[i] ^ tag[i]););
×
286
  uint8_t res8 = result[0U];
×
287
  if (res8 == (uint8_t)0U)
×
288
  {
×
289
    uint32_t blocks64 = len / (uint32_t)64U;
×
290
    for (uint32_t i = (uint32_t)0U; i < blocks64; i++)
×
291
    {
×
292
      uint32_t ctr1 = ctr + i * (uint32_t)4U;
×
293
      uint8_t *ib = ciphertext + i * (uint32_t)64U;
×
294
      uint8_t *ob = out + i * (uint32_t)64U;
×
295
      KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[4U] KRML_POST_ALIGN(16) = { 0U };
×
296
      Lib_IntVector_Intrinsics_vec128 *kex = aes_ctx + (uint32_t)1U;
×
297
      Lib_IntVector_Intrinsics_vec128 *n = aes_ctx;
×
298
      uint32_t counter0 = htobe32(ctr1);
×
299
      uint32_t counter1 = htobe32(ctr1 + (uint32_t)1U);
×
300
      uint32_t counter2 = htobe32(ctr1 + (uint32_t)2U);
×
301
      uint32_t counter3 = htobe32(ctr1 + (uint32_t)3U);
×
302
      Lib_IntVector_Intrinsics_vec128 nonce0 = n[0U];
×
303
      st[0U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter0, (uint32_t)3U);
×
304
      st[1U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter1, (uint32_t)3U);
×
305
      st[2U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter2, (uint32_t)3U);
×
306
      st[3U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter3, (uint32_t)3U);
×
307
      uint32_t klen = (uint32_t)1U;
×
308
      Lib_IntVector_Intrinsics_vec128 *k0 = kex;
×
309
      Lib_IntVector_Intrinsics_vec128 *kr = kex + klen;
×
310
      Lib_IntVector_Intrinsics_vec128 *kn = kex + (uint32_t)10U * klen;
×
311
      st[0U] = Lib_IntVector_Intrinsics_vec128_xor(st[0U], k0[0U]);
×
312
      st[1U] = Lib_IntVector_Intrinsics_vec128_xor(st[1U], k0[0U]);
×
313
      st[2U] = Lib_IntVector_Intrinsics_vec128_xor(st[2U], k0[0U]);
×
314
      st[3U] = Lib_IntVector_Intrinsics_vec128_xor(st[3U], k0[0U]);
×
315
      KRML_MAYBE_FOR9(i0,
×
316
        (uint32_t)0U,
×
317
        (uint32_t)9U,
×
318
        (uint32_t)1U,
×
319
        Lib_IntVector_Intrinsics_vec128 *sub_key = kr + i0 * (uint32_t)1U;
×
320
        st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[0U], sub_key[0U]);
×
321
        st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[1U], sub_key[0U]);
×
322
        st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[2U], sub_key[0U]);
×
323
        st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[3U], sub_key[0U]););
×
324
      st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[0U], kn[0U]);
×
325
      st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[1U], kn[0U]);
×
326
      st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[2U], kn[0U]);
×
327
      st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[3U], kn[0U]);
×
328
      Lib_IntVector_Intrinsics_vec128 v0 = Lib_IntVector_Intrinsics_vec128_load128_le(ib);
×
329
      Lib_IntVector_Intrinsics_vec128
×
330
      v1 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)16U);
×
331
      Lib_IntVector_Intrinsics_vec128
×
332
      v2 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)32U);
×
333
      Lib_IntVector_Intrinsics_vec128
×
334
      v3 = Lib_IntVector_Intrinsics_vec128_load128_le(ib + (uint32_t)48U);
×
335
      Lib_IntVector_Intrinsics_vec128 v01 = Lib_IntVector_Intrinsics_vec128_xor(v0, st[0U]);
×
336
      Lib_IntVector_Intrinsics_vec128 v11 = Lib_IntVector_Intrinsics_vec128_xor(v1, st[1U]);
×
337
      Lib_IntVector_Intrinsics_vec128 v21 = Lib_IntVector_Intrinsics_vec128_xor(v2, st[2U]);
×
338
      Lib_IntVector_Intrinsics_vec128 v31 = Lib_IntVector_Intrinsics_vec128_xor(v3, st[3U]);
×
339
      Lib_IntVector_Intrinsics_vec128_store128_le(ob, v01);
×
340
      Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)16U, v11);
×
341
      Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)32U, v21);
×
342
      Lib_IntVector_Intrinsics_vec128_store128_le(ob + (uint32_t)48U, v31);
×
343
    }
×
344
    uint32_t rem = len % (uint32_t)64U;
×
345
    uint8_t last[64U] = { 0U };
×
346
    if (rem > (uint32_t)0U)
×
347
    {
×
348
      uint32_t ctr1 = ctr + blocks64 * (uint32_t)4U;
×
349
      uint8_t *ib = ciphertext + blocks64 * (uint32_t)64U;
×
350
      uint8_t *ob = out + blocks64 * (uint32_t)64U;
×
351
      memcpy(last, ib, rem * sizeof (uint8_t));
×
352
      KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[4U] KRML_POST_ALIGN(16) = { 0U };
×
353
      Lib_IntVector_Intrinsics_vec128 *kex = aes_ctx + (uint32_t)1U;
×
354
      Lib_IntVector_Intrinsics_vec128 *n = aes_ctx;
×
355
      uint32_t counter0 = htobe32(ctr1);
×
356
      uint32_t counter1 = htobe32(ctr1 + (uint32_t)1U);
×
357
      uint32_t counter2 = htobe32(ctr1 + (uint32_t)2U);
×
358
      uint32_t counter3 = htobe32(ctr1 + (uint32_t)3U);
×
359
      Lib_IntVector_Intrinsics_vec128 nonce0 = n[0U];
×
360
      st[0U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter0, (uint32_t)3U);
×
361
      st[1U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter1, (uint32_t)3U);
×
362
      st[2U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter2, (uint32_t)3U);
×
363
      st[3U] = Lib_IntVector_Intrinsics_vec128_insert32(nonce0, counter3, (uint32_t)3U);
×
364
      uint32_t klen = (uint32_t)1U;
×
365
      Lib_IntVector_Intrinsics_vec128 *k0 = kex;
×
366
      Lib_IntVector_Intrinsics_vec128 *kr = kex + klen;
×
367
      Lib_IntVector_Intrinsics_vec128 *kn = kex + (uint32_t)10U * klen;
×
368
      st[0U] = Lib_IntVector_Intrinsics_vec128_xor(st[0U], k0[0U]);
×
369
      st[1U] = Lib_IntVector_Intrinsics_vec128_xor(st[1U], k0[0U]);
×
370
      st[2U] = Lib_IntVector_Intrinsics_vec128_xor(st[2U], k0[0U]);
×
371
      st[3U] = Lib_IntVector_Intrinsics_vec128_xor(st[3U], k0[0U]);
×
372
      KRML_MAYBE_FOR9(i,
×
373
        (uint32_t)0U,
×
374
        (uint32_t)9U,
×
375
        (uint32_t)1U,
×
376
        Lib_IntVector_Intrinsics_vec128 *sub_key = kr + i * (uint32_t)1U;
×
377
        st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[0U], sub_key[0U]);
×
378
        st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[1U], sub_key[0U]);
×
379
        st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[2U], sub_key[0U]);
×
380
        st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc(st[3U], sub_key[0U]););
×
381
      st[0U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[0U], kn[0U]);
×
382
      st[1U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[1U], kn[0U]);
×
383
      st[2U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[2U], kn[0U]);
×
384
      st[3U] = Lib_IntVector_Intrinsics_ni_aes_enc_last(st[3U], kn[0U]);
×
385
      Lib_IntVector_Intrinsics_vec128 v0 = Lib_IntVector_Intrinsics_vec128_load128_le(last);
×
386
      Lib_IntVector_Intrinsics_vec128
×
387
      v1 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)16U);
×
388
      Lib_IntVector_Intrinsics_vec128
×
389
      v2 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)32U);
×
390
      Lib_IntVector_Intrinsics_vec128
×
391
      v3 = Lib_IntVector_Intrinsics_vec128_load128_le(last + (uint32_t)48U);
×
392
      Lib_IntVector_Intrinsics_vec128 v01 = Lib_IntVector_Intrinsics_vec128_xor(v0, st[0U]);
×
393
      Lib_IntVector_Intrinsics_vec128 v11 = Lib_IntVector_Intrinsics_vec128_xor(v1, st[1U]);
×
394
      Lib_IntVector_Intrinsics_vec128 v21 = Lib_IntVector_Intrinsics_vec128_xor(v2, st[2U]);
×
395
      Lib_IntVector_Intrinsics_vec128 v31 = Lib_IntVector_Intrinsics_vec128_xor(v3, st[3U]);
×
396
      Lib_IntVector_Intrinsics_vec128_store128_le(last, v01);
×
397
      Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)16U, v11);
×
398
      Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)32U, v21);
×
399
      Lib_IntVector_Intrinsics_vec128_store128_le(last + (uint32_t)48U, v31);
×
400
      memcpy(ob, last, rem * sizeof (uint8_t));
×
401
    }
×
402
    return true;
×
403
  }
×
404
  return false;
×
405
}
×
406

STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc