FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
aes_gcm.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2019 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
22 #include <crypto_native/aes.h>
23 #include <crypto_native/ghash.h>
24 
25 #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
26 #pragma GCC optimize ("O3")
27 #endif
28 
29 #ifdef __VAES__
30 #define NUM_HI 32
31 #else
32 #define NUM_HI 8
33 #endif
34 
35 typedef struct
36 {
37  /* pre-calculated hash key values */
38  const u8x16 Hi[NUM_HI];
39  /* extracted AES key */
40  const u8x16 Ke[15];
41 #ifdef __VAES__
42  const u8x64 Ke4[15];
43 #endif
45 
46 typedef struct
47 {
49  union
50  {
52  u32x16 Y4;
53  };
55 
56 typedef enum
57 {
60  AES_GCM_F_ENCRYPT = (1 << 2),
61  AES_GCM_F_DECRYPT = (1 << 3),
63 
64 static const u32x4 ctr_inv_1 = { 0, 0, 0, 1 << 24 };
65 
66 #ifndef __VAES__
68 aes_gcm_enc_first_round (u8x16 * r, aes_gcm_counter_t * ctr, u8x16 k,
69  int n_blocks)
70 {
71  if (PREDICT_TRUE ((u8) ctr->counter < (256 - 2 * n_blocks)))
72  {
73  for (int i = 0; i < n_blocks; i++)
74  {
75  r[i] = k ^ (u8x16) ctr->Y;
76  ctr->Y += ctr_inv_1;
77  }
78  ctr->counter += n_blocks;
79  }
80  else
81  {
82  for (int i = 0; i < n_blocks; i++)
83  {
84  r[i] = k ^ (u8x16) ctr->Y;
85  ctr->counter++;
86  ctr->Y[3] = clib_host_to_net_u32 (ctr->counter + 1);
87  }
88  }
89 }
90 
92 aes_gcm_enc_round (u8x16 * r, u8x16 k, int n_blocks)
93 {
94  for (int i = 0; i < n_blocks; i++)
95  r[i] = aes_enc_round (r[i], k);
96 }
97 
99 aes_gcm_enc_last_round (u8x16 * r, u8x16 * d, u8x16 const *k,
100  int rounds, int n_blocks)
101 {
102 
103  /* additional ronuds for AES-192 and AES-256 */
104  for (int i = 10; i < rounds; i++)
105  aes_gcm_enc_round (r, k[i], n_blocks);
106 
107  for (int i = 0; i < n_blocks; i++)
108  d[i] ^= aes_enc_last_round (r[i], k[rounds]);
109 }
110 #endif
111 
114  u8x16u * in, int n_blocks)
115 {
116  ghash_data_t _gd, *gd = &_gd;
117  u8x16 *Hi = (u8x16 *) kd->Hi + NUM_HI - n_blocks;
118  ghash_mul_first (gd, u8x16_reflect (in[0]) ^ T, Hi[0]);
119  for (int i = 1; i < n_blocks; i++)
120  ghash_mul_next (gd, u8x16_reflect ((in[i])), Hi[i]);
121  ghash_reduce (gd);
122  ghash_reduce2 (gd);
123  return ghash_final (gd);
124 }
125 
127 aes_gcm_ghash (u8x16 T, aes_gcm_key_data_t * kd, u8x16u * in, u32 n_left)
128 {
129 
130  while (n_left >= 128)
131  {
132  T = aes_gcm_ghash_blocks (T, kd, in, 8);
133  n_left -= 128;
134  in += 8;
135  }
136 
137  if (n_left >= 64)
138  {
139  T = aes_gcm_ghash_blocks (T, kd, in, 4);
140  n_left -= 64;
141  in += 4;
142  }
143 
144  if (n_left >= 32)
145  {
146  T = aes_gcm_ghash_blocks (T, kd, in, 2);
147  n_left -= 32;
148  in += 2;
149  }
150 
151  if (n_left >= 16)
152  {
153  T = aes_gcm_ghash_blocks (T, kd, in, 1);
154  n_left -= 16;
155  in += 1;
156  }
157 
158  if (n_left)
159  {
160  u8x16 r = aes_load_partial (in, n_left);
161  T = ghash_mul (u8x16_reflect (r) ^ T, kd->Hi[NUM_HI - 1]);
162  }
163  return T;
164 }
165 
166 #ifndef __VAES__
168 aes_gcm_calc (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d,
169  aes_gcm_counter_t * ctr, u8x16u * inv, u8x16u * outv,
170  int rounds, int n, int last_block_bytes, aes_gcm_flags_t f)
171 {
172  u8x16 r[n];
173  ghash_data_t _gd = { }, *gd = &_gd;
174  const u8x16 *rk = (u8x16 *) kd->Ke;
175  int ghash_blocks = (f & AES_GCM_F_ENCRYPT) ? 4 : n, gc = 1;
176  u8x16 *Hi = (u8x16 *) kd->Hi + NUM_HI - ghash_blocks;
177 
178  clib_prefetch_load (inv + 4);
179 
180  /* AES rounds 0 and 1 */
181  aes_gcm_enc_first_round (r, ctr, rk[0], n);
182  aes_gcm_enc_round (r, rk[1], n);
183 
184  /* load data - decrypt round */
185  if (f & AES_GCM_F_DECRYPT)
186  {
187  for (int i = 0; i < n - ((f & AES_GCM_F_LAST_ROUND) != 0); i++)
188  d[i] = inv[i];
189 
190  if (f & AES_GCM_F_LAST_ROUND)
191  d[n - 1] = aes_load_partial (inv + n - 1, last_block_bytes);
192  }
193 
194  /* GHASH multiply block 1 */
195  if (f & AES_GCM_F_WITH_GHASH)
196  ghash_mul_first (gd, u8x16_reflect (d[0]) ^ T, Hi[0]);
197 
198  /* AES rounds 2 and 3 */
199  aes_gcm_enc_round (r, rk[2], n);
200  aes_gcm_enc_round (r, rk[3], n);
201 
202  /* GHASH multiply block 2 */
203  if ((f & AES_GCM_F_WITH_GHASH) && gc++ < ghash_blocks)
204  ghash_mul_next (gd, u8x16_reflect (d[1]), Hi[1]);
205 
206  /* AES rounds 4 and 5 */
207  aes_gcm_enc_round (r, rk[4], n);
208  aes_gcm_enc_round (r, rk[5], n);
209 
210  /* GHASH multiply block 3 */
211  if ((f & AES_GCM_F_WITH_GHASH) && gc++ < ghash_blocks)
212  ghash_mul_next (gd, u8x16_reflect (d[2]), Hi[2]);
213 
214  /* AES rounds 6 and 7 */
215  aes_gcm_enc_round (r, rk[6], n);
216  aes_gcm_enc_round (r, rk[7], n);
217 
218  /* GHASH multiply block 4 */
219  if ((f & AES_GCM_F_WITH_GHASH) && gc++ < ghash_blocks)
220  ghash_mul_next (gd, u8x16_reflect (d[3]), Hi[3]);
221 
222  /* AES rounds 8 and 9 */
223  aes_gcm_enc_round (r, rk[8], n);
224  aes_gcm_enc_round (r, rk[9], n);
225 
226  /* GHASH reduce 1st step */
227  if (f & AES_GCM_F_WITH_GHASH)
228  ghash_reduce (gd);
229 
230  /* load data - encrypt round */
231  if (f & AES_GCM_F_ENCRYPT)
232  {
233  for (int i = 0; i < n - ((f & AES_GCM_F_LAST_ROUND) != 0); i++)
234  d[i] = inv[i];
235 
236  if (f & AES_GCM_F_LAST_ROUND)
237  d[n - 1] = aes_load_partial (inv + n - 1, last_block_bytes);
238  }
239 
240  /* GHASH reduce 2nd step */
241  if (f & AES_GCM_F_WITH_GHASH)
242  ghash_reduce2 (gd);
243 
244  /* AES last round(s) */
245  aes_gcm_enc_last_round (r, d, rk, rounds, n);
246 
247  /* store data */
248  for (int i = 0; i < n - ((f & AES_GCM_F_LAST_ROUND) != 0); i++)
249  outv[i] = d[i];
250 
251  if (f & AES_GCM_F_LAST_ROUND)
252  aes_store_partial (outv + n - 1, d[n - 1], last_block_bytes);
253 
254  /* GHASH final step */
255  if (f & AES_GCM_F_WITH_GHASH)
256  T = ghash_final (gd);
257 
258  return T;
259 }
260 
262 aes_gcm_calc_double (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d,
263  aes_gcm_counter_t * ctr, u8x16u * inv, u8x16u * outv,
264  int rounds, aes_gcm_flags_t f)
265 {
266  u8x16 r[4];
267  ghash_data_t _gd, *gd = &_gd;
268  const u8x16 *rk = (u8x16 *) kd->Ke;
269  u8x16 *Hi = (u8x16 *) kd->Hi + NUM_HI - 8;
270 
271  /* AES rounds 0 and 1 */
272  aes_gcm_enc_first_round (r, ctr, rk[0], 4);
273  aes_gcm_enc_round (r, rk[1], 4);
274 
275  /* load 4 blocks of data - decrypt round */
276  if (f & AES_GCM_F_DECRYPT)
277  {
278  d[0] = inv[0];
279  d[1] = inv[1];
280  d[2] = inv[2];
281  d[3] = inv[3];
282  }
283 
284  /* GHASH multiply block 0 */
285  ghash_mul_first (gd, u8x16_reflect (d[0]) ^ T, Hi[0]);
286 
287  /* AES rounds 2 and 3 */
288  aes_gcm_enc_round (r, rk[2], 4);
289  aes_gcm_enc_round (r, rk[3], 4);
290 
291  /* GHASH multiply block 1 */
292  ghash_mul_next (gd, u8x16_reflect (d[1]), Hi[1]);
293 
294  /* AES rounds 4 and 5 */
295  aes_gcm_enc_round (r, rk[4], 4);
296  aes_gcm_enc_round (r, rk[5], 4);
297 
298  /* GHASH multiply block 2 */
299  ghash_mul_next (gd, u8x16_reflect (d[2]), Hi[2]);
300 
301  /* AES rounds 6 and 7 */
302  aes_gcm_enc_round (r, rk[6], 4);
303  aes_gcm_enc_round (r, rk[7], 4);
304 
305  /* GHASH multiply block 3 */
306  ghash_mul_next (gd, u8x16_reflect (d[3]), Hi[3]);
307 
308  /* AES rounds 8 and 9 */
309  aes_gcm_enc_round (r, rk[8], 4);
310  aes_gcm_enc_round (r, rk[9], 4);
311 
312  /* load 4 blocks of data - encrypt round */
313  if (f & AES_GCM_F_ENCRYPT)
314  {
315  d[0] = inv[0];
316  d[1] = inv[1];
317  d[2] = inv[2];
318  d[3] = inv[3];
319  }
320 
321  /* AES last round(s) */
322  aes_gcm_enc_last_round (r, d, rk, rounds, 4);
323 
324  /* store 4 blocks of data */
325  outv[0] = d[0];
326  outv[1] = d[1];
327  outv[2] = d[2];
328  outv[3] = d[3];
329 
330  /* load next 4 blocks of data data - decrypt round */
331  if (f & AES_GCM_F_DECRYPT)
332  {
333  d[0] = inv[4];
334  d[1] = inv[5];
335  d[2] = inv[6];
336  d[3] = inv[7];
337  }
338 
339  /* GHASH multiply block 4 */
340  ghash_mul_next (gd, u8x16_reflect (d[0]), Hi[4]);
341 
342  /* AES rounds 0, 1 and 2 */
343  aes_gcm_enc_first_round (r, ctr, rk[0], 4);
344  aes_gcm_enc_round (r, rk[1], 4);
345  aes_gcm_enc_round (r, rk[2], 4);
346 
347  /* GHASH multiply block 5 */
348  ghash_mul_next (gd, u8x16_reflect (d[1]), Hi[5]);
349 
350  /* AES rounds 3 and 4 */
351  aes_gcm_enc_round (r, rk[3], 4);
352  aes_gcm_enc_round (r, rk[4], 4);
353 
354  /* GHASH multiply block 6 */
355  ghash_mul_next (gd, u8x16_reflect (d[2]), Hi[6]);
356 
357  /* AES rounds 5 and 6 */
358  aes_gcm_enc_round (r, rk[5], 4);
359  aes_gcm_enc_round (r, rk[6], 4);
360 
361  /* GHASH multiply block 7 */
362  ghash_mul_next (gd, u8x16_reflect (d[3]), Hi[7]);
363 
364  /* AES rounds 7 and 8 */
365  aes_gcm_enc_round (r, rk[7], 4);
366  aes_gcm_enc_round (r, rk[8], 4);
367 
368  /* GHASH reduce 1st step */
369  ghash_reduce (gd);
370 
371  /* AES round 9 */
372  aes_gcm_enc_round (r, rk[9], 4);
373 
374  /* load data - encrypt round */
375  if (f & AES_GCM_F_ENCRYPT)
376  {
377  d[0] = inv[4];
378  d[1] = inv[5];
379  d[2] = inv[6];
380  d[3] = inv[7];
381  }
382 
383  /* GHASH reduce 2nd step */
384  ghash_reduce2 (gd);
385 
386  /* AES last round(s) */
387  aes_gcm_enc_last_round (r, d, rk, rounds, 4);
388 
389  /* store data */
390  outv[4] = d[0];
391  outv[5] = d[1];
392  outv[6] = d[2];
393  outv[7] = d[3];
394 
395  /* GHASH final step */
396  return ghash_final (gd);
397 }
398 
400 aes_gcm_ghash_last (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d,
401  int n_blocks, int n_bytes)
402 {
403  ghash_data_t _gd, *gd = &_gd;
404  u8x16 *Hi = (u8x16 *) kd->Hi + NUM_HI - n_blocks;
405 
406  if (n_bytes)
407  d[n_blocks - 1] = aes_byte_mask (d[n_blocks - 1], n_bytes);
408 
409  ghash_mul_first (gd, u8x16_reflect (d[0]) ^ T, Hi[0]);
410  if (n_blocks > 1)
411  ghash_mul_next (gd, u8x16_reflect (d[1]), Hi[1]);
412  if (n_blocks > 2)
413  ghash_mul_next (gd, u8x16_reflect (d[2]), Hi[2]);
414  if (n_blocks > 3)
415  ghash_mul_next (gd, u8x16_reflect (d[3]), Hi[3]);
416  ghash_reduce (gd);
417  ghash_reduce2 (gd);
418  return ghash_final (gd);
419 }
420 #endif
421 
422 #ifdef __VAES__
423 static const u32x16 ctr_inv_1234 = {
424  0, 0, 0, 1 << 24, 0, 0, 0, 2 << 24, 0, 0, 0, 3 << 24, 0, 0, 0, 4 << 24,
425 };
426 
427 static const u32x16 ctr_inv_4444 = {
428  0, 0, 0, 4 << 24, 0, 0, 0, 4 << 24, 0, 0, 0, 4 << 24, 0, 0, 0, 4 << 24
429 };
430 
431 static const u32x16 ctr_1234 = {
432  1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0,
433 };
434 
436 aes4_gcm_enc_first_round (u8x64 * r, aes_gcm_counter_t * ctr, u8x64 k, int n)
437 {
438  u8 last_byte = (u8) ctr->counter;
439  int i = 0;
440 
441  /* As counter is stored in network byte order for performance reasons we
442  are incrementing least significant byte only except in case where we
443  overlow. As we are processing four 512-blocks in parallel except the
444  last round, overflow can happen only when n == 4 */
445 
446  if (n == 4)
447  for (; i < 2; i++)
448  {
449  r[i] = k ^ (u8x64) ctr->Y4;
450  ctr->Y4 += ctr_inv_4444;
451  }
452 
453  if (n == 4 && PREDICT_TRUE (last_byte == 241))
454  {
455  u32x16 Yc, Yr = (u32x16) u8x64_reflect_u8x16 ((u8x64) ctr->Y4);
456 
457  for (; i < n; i++)
458  {
459  r[i] = k ^ (u8x64) ctr->Y4;
460  Yc = u32x16_splat (ctr->counter + 4 * (i + 1)) + ctr_1234;
461  Yr = (u32x16) u32x16_mask_blend (Yr, Yc, 0x1111);
462  ctr->Y4 = (u32x16) u8x64_reflect_u8x16 ((u8x64) Yr);
463  }
464  }
465  else
466  {
467  for (; i < n; i++)
468  {
469  r[i] = k ^ (u8x64) ctr->Y4;
470  ctr->Y4 += ctr_inv_4444;
471  }
472  }
473  ctr->counter += n * 4;
474 }
475 
477 aes4_gcm_enc_round (u8x64 * r, u8x64 k, int n_blocks)
478 {
479  for (int i = 0; i < n_blocks; i++)
480  r[i] = aes_enc_round_x4 (r[i], k);
481 }
482 
484 aes4_gcm_enc_last_round (u8x64 * r, u8x64 * d, u8x64 const *k,
485  int rounds, int n_blocks)
486 {
487 
488  /* additional ronuds for AES-192 and AES-256 */
489  for (int i = 10; i < rounds; i++)
490  aes4_gcm_enc_round (r, k[i], n_blocks);
491 
492  for (int i = 0; i < n_blocks; i++)
493  d[i] ^= aes_enc_last_round_x4 (r[i], k[rounds]);
494 }
495 
497 aes4_gcm_calc (u8x16 T, aes_gcm_key_data_t * kd, u8x64 * d,
498  aes_gcm_counter_t * ctr, u8x16u * in, u8x16u * out,
499  int rounds, int n, int last_4block_bytes, aes_gcm_flags_t f)
500 {
501  ghash4_data_t _gd, *gd = &_gd;
502  const u8x64 *rk = (u8x64 *) kd->Ke4;
503  int i, ghash_blocks, gc = 1;
504  u8x64u *Hi4, *inv = (u8x64u *) in, *outv = (u8x64u *) out;
505  u8x64 r[4];
506  u64 byte_mask = _bextr_u64 (-1LL, 0, last_4block_bytes);
507 
508  if (f & AES_GCM_F_ENCRYPT)
509  {
510  /* during encryption we either hash four 512-bit blocks from previous
511  round or we don't hash at all */
512  ghash_blocks = 4;
513  Hi4 = (u8x64u *) (kd->Hi + NUM_HI - ghash_blocks * 4);
514  }
515  else
516  {
517  /* during deccryption we hash 1..4 512-bit blocks from current round */
518  ghash_blocks = n;
519  int n_128bit_blocks = n * 4;
520  /* if this is last round of decryption, we may have less than 4
521  128-bit blocks in the last 512-bit data block, so we need to adjust
522  Hi4 pointer accordingly */
523  if (f & AES_GCM_F_LAST_ROUND)
524  n_128bit_blocks += ((last_4block_bytes + 15) >> 4) - 4;
525  Hi4 = (u8x64u *) (kd->Hi + NUM_HI - n_128bit_blocks);
526  }
527 
528  /* AES rounds 0 and 1 */
529  aes4_gcm_enc_first_round (r, ctr, rk[0], n);
530  aes4_gcm_enc_round (r, rk[1], n);
531 
532  /* load 4 blocks of data - decrypt round */
533  if (f & AES_GCM_F_DECRYPT)
534  {
535  for (i = 0; i < n - ((f & AES_GCM_F_LAST_ROUND) != 0); i++)
536  d[i] = inv[i];
537 
538  if (f & AES_GCM_F_LAST_ROUND)
539  d[i] = u8x64_mask_load (u8x64_splat (0), inv + i, byte_mask);
540  }
541 
542  /* GHASH multiply block 0 */
543  if (f & AES_GCM_F_WITH_GHASH)
544  ghash4_mul_first (gd, u8x64_reflect_u8x16 (d[0]) ^
545  u8x64_insert_u8x16 (u8x64_splat (0), T, 0), Hi4[0]);
546 
547  /* AES rounds 2 and 3 */
548  aes4_gcm_enc_round (r, rk[2], n);
549  aes4_gcm_enc_round (r, rk[3], n);
550 
551  /* GHASH multiply block 1 */
552  if ((f & AES_GCM_F_WITH_GHASH) && gc++ < ghash_blocks)
553  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[1]), Hi4[1]);
554 
555  /* AES rounds 4 and 5 */
556  aes4_gcm_enc_round (r, rk[4], n);
557  aes4_gcm_enc_round (r, rk[5], n);
558 
559  /* GHASH multiply block 2 */
560  if ((f & AES_GCM_F_WITH_GHASH) && gc++ < ghash_blocks)
561  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[2]), Hi4[2]);
562 
563  /* AES rounds 6 and 7 */
564  aes4_gcm_enc_round (r, rk[6], n);
565  aes4_gcm_enc_round (r, rk[7], n);
566 
567  /* GHASH multiply block 3 */
568  if ((f & AES_GCM_F_WITH_GHASH) && gc++ < ghash_blocks)
569  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[3]), Hi4[3]);
570 
571  /* load 4 blocks of data - decrypt round */
572  if (f & AES_GCM_F_ENCRYPT)
573  {
574  for (i = 0; i < n - ((f & AES_GCM_F_LAST_ROUND) != 0); i++)
575  d[i] = inv[i];
576 
577  if (f & AES_GCM_F_LAST_ROUND)
578  d[i] = u8x64_mask_load (u8x64_splat (0), inv + i, byte_mask);
579  }
580 
581  /* AES rounds 8 and 9 */
582  aes4_gcm_enc_round (r, rk[8], n);
583  aes4_gcm_enc_round (r, rk[9], n);
584 
585  /* AES last round(s) */
586  aes4_gcm_enc_last_round (r, d, rk, rounds, n);
587 
588  /* store 4 blocks of data */
589  for (i = 0; i < n - ((f & AES_GCM_F_LAST_ROUND) != 0); i++)
590  outv[i] = d[i];
591 
592  if (f & AES_GCM_F_LAST_ROUND)
593  u8x64_mask_store (d[i], outv + i, byte_mask);
594 
595  /* GHASH reduce 1st step */
596  ghash4_reduce (gd);
597 
598  /* GHASH reduce 2nd step */
599  ghash4_reduce2 (gd);
600 
601  /* GHASH final step */
602  return ghash4_final (gd);
603 }
604 
606 aes4_gcm_calc_double (u8x16 T, aes_gcm_key_data_t * kd, u8x64 * d,
607  aes_gcm_counter_t * ctr, u8x16u * in, u8x16u * out,
608  int rounds, aes_gcm_flags_t f)
609 {
610  u8x64 r[4];
611  ghash4_data_t _gd, *gd = &_gd;
612  const u8x64 *rk = (u8x64 *) kd->Ke4;
613  u8x64 *Hi4 = (u8x64 *) (kd->Hi + NUM_HI - 32);
614  u8x64u *inv = (u8x64u *) in, *outv = (u8x64u *) out;
615 
616  /* AES rounds 0 and 1 */
617  aes4_gcm_enc_first_round (r, ctr, rk[0], 4);
618  aes4_gcm_enc_round (r, rk[1], 4);
619 
620  /* load 4 blocks of data - decrypt round */
621  if (f & AES_GCM_F_DECRYPT)
622  for (int i = 0; i < 4; i++)
623  d[i] = inv[i];
624 
625  /* GHASH multiply block 0 */
626  ghash4_mul_first (gd, u8x64_reflect_u8x16 (d[0]) ^
627  u8x64_insert_u8x16 (u8x64_splat (0), T, 0), Hi4[0]);
628 
629  /* AES rounds 2 and 3 */
630  aes4_gcm_enc_round (r, rk[2], 4);
631  aes4_gcm_enc_round (r, rk[3], 4);
632 
633  /* GHASH multiply block 1 */
634  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[1]), Hi4[1]);
635 
636  /* AES rounds 4 and 5 */
637  aes4_gcm_enc_round (r, rk[4], 4);
638  aes4_gcm_enc_round (r, rk[5], 4);
639 
640  /* GHASH multiply block 2 */
641  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[2]), Hi4[2]);
642 
643  /* AES rounds 6 and 7 */
644  aes4_gcm_enc_round (r, rk[6], 4);
645  aes4_gcm_enc_round (r, rk[7], 4);
646 
647  /* GHASH multiply block 3 */
648  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[3]), Hi4[3]);
649 
650  /* AES rounds 8 and 9 */
651  aes4_gcm_enc_round (r, rk[8], 4);
652  aes4_gcm_enc_round (r, rk[9], 4);
653 
654  /* load 4 blocks of data - encrypt round */
655  if (f & AES_GCM_F_ENCRYPT)
656  for (int i = 0; i < 4; i++)
657  d[i] = inv[i];
658 
659  /* AES last round(s) */
660  aes4_gcm_enc_last_round (r, d, rk, rounds, 4);
661 
662  /* store 4 blocks of data */
663  for (int i = 0; i < 4; i++)
664  outv[i] = d[i];
665 
666  /* load 4 blocks of data - decrypt round */
667  if (f & AES_GCM_F_DECRYPT)
668  for (int i = 0; i < 4; i++)
669  d[i] = inv[i + 4];
670 
671  /* GHASH multiply block 3 */
672  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[0]), Hi4[4]);
673 
674  /* AES rounds 0 and 1 */
675  aes4_gcm_enc_first_round (r, ctr, rk[0], 4);
676  aes4_gcm_enc_round (r, rk[1], 4);
677 
678  /* GHASH multiply block 5 */
679  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[1]), Hi4[5]);
680 
681  /* AES rounds 2 and 3 */
682  aes4_gcm_enc_round (r, rk[2], 4);
683  aes4_gcm_enc_round (r, rk[3], 4);
684 
685  /* GHASH multiply block 6 */
686  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[2]), Hi4[6]);
687 
688  /* AES rounds 4 and 5 */
689  aes4_gcm_enc_round (r, rk[4], 4);
690  aes4_gcm_enc_round (r, rk[5], 4);
691 
692  /* GHASH multiply block 7 */
693  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[3]), Hi4[7]);
694 
695  /* AES rounds 6 and 7 */
696  aes4_gcm_enc_round (r, rk[6], 4);
697  aes4_gcm_enc_round (r, rk[7], 4);
698 
699  /* GHASH reduce 1st step */
700  ghash4_reduce (gd);
701 
702  /* AES rounds 8 and 9 */
703  aes4_gcm_enc_round (r, rk[8], 4);
704  aes4_gcm_enc_round (r, rk[9], 4);
705 
706  /* GHASH reduce 2nd step */
707  ghash4_reduce2 (gd);
708 
709  /* load 4 blocks of data - encrypt round */
710  if (f & AES_GCM_F_ENCRYPT)
711  for (int i = 0; i < 4; i++)
712  d[i] = inv[i + 4];
713 
714  /* AES last round(s) */
715  aes4_gcm_enc_last_round (r, d, rk, rounds, 4);
716 
717  /* store 4 blocks of data */
718  for (int i = 0; i < 4; i++)
719  outv[i + 4] = d[i];
720 
721  /* GHASH final step */
722  return ghash4_final (gd);
723 }
724 
726 aes4_gcm_ghash_last (u8x16 T, aes_gcm_key_data_t * kd, u8x64 * d,
727  int n, int last_4block_bytes)
728 {
729  ghash4_data_t _gd, *gd = &_gd;
730  u8x64u *Hi4;
731  int n_128bit_blocks;
732  u64 byte_mask = _bextr_u64 (-1LL, 0, last_4block_bytes);
733  n_128bit_blocks = (n - 1) * 4 + ((last_4block_bytes + 15) >> 4);
734  Hi4 = (u8x64u *) (kd->Hi + NUM_HI - n_128bit_blocks);
735 
736  d[n - 1] = u8x64_mask_blend (u8x64_splat (0), d[n - 1], byte_mask);
737  ghash4_mul_first (gd, u8x64_reflect_u8x16 (d[0]) ^
738  u8x64_insert_u8x16 (u8x64_splat (0), T, 0), Hi4[0]);
739  if (n > 1)
740  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[1]), Hi4[1]);
741  if (n > 2)
742  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[2]), Hi4[2]);
743  if (n > 3)
744  ghash4_mul_next (gd, u8x64_reflect_u8x16 (d[3]), Hi4[3]);
745  ghash4_reduce (gd);
746  ghash4_reduce2 (gd);
747  return ghash4_final (gd);
748 }
749 #endif
750 
753  u8x16u * inv, u8x16u * outv, u32 n_left, int rounds)
754 {
756 
757  if (n_left == 0)
758  return T;
759 
760 #if __VAES__
761  u8x64 d4[4];
762  if (n_left < 256)
763  {
765  if (n_left > 192)
766  {
767  n_left -= 192;
768  aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 4, n_left, f);
769  return aes4_gcm_ghash_last (T, kd, d4, 4, n_left);
770  }
771  else if (n_left > 128)
772  {
773  n_left -= 128;
774  aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 3, n_left, f);
775  return aes4_gcm_ghash_last (T, kd, d4, 3, n_left);
776  }
777  else if (n_left > 64)
778  {
779  n_left -= 64;
780  aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 2, n_left, f);
781  return aes4_gcm_ghash_last (T, kd, d4, 2, n_left);
782  }
783  else
784  {
785  aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 1, n_left, f);
786  return aes4_gcm_ghash_last (T, kd, d4, 1, n_left);
787  }
788  }
789 
790  aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 4, 0, f);
791 
792  /* next */
793  n_left -= 256;
794  outv += 16;
795  inv += 16;
796 
798 
799  while (n_left >= 512)
800  {
801  T = aes4_gcm_calc_double (T, kd, d4, ctr, inv, outv, rounds, f);
802 
803  /* next */
804  n_left -= 512;
805  outv += 32;
806  inv += 32;
807  }
808 
809  while (n_left >= 256)
810  {
811  T = aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 4, 0, f);
812 
813  /* next */
814  n_left -= 256;
815  outv += 16;
816  inv += 16;
817  }
818 
819  if (n_left == 0)
820  return aes4_gcm_ghash_last (T, kd, d4, 4, 64);
821 
823 
824  if (n_left > 192)
825  {
826  n_left -= 192;
827  T = aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 4, n_left, f);
828  return aes4_gcm_ghash_last (T, kd, d4, 4, n_left);
829  }
830 
831  if (n_left > 128)
832  {
833  n_left -= 128;
834  T = aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 3, n_left, f);
835  return aes4_gcm_ghash_last (T, kd, d4, 3, n_left);
836  }
837 
838  if (n_left > 64)
839  {
840  n_left -= 64;
841  T = aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 2, n_left, f);
842  return aes4_gcm_ghash_last (T, kd, d4, 2, n_left);
843  }
844 
845  T = aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 1, n_left, f);
846  return aes4_gcm_ghash_last (T, kd, d4, 1, n_left);
847 #else
848  u8x16 d[4];
849  if (n_left < 64)
850  {
852  if (n_left > 48)
853  {
854  n_left -= 48;
855  aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 4, n_left, f);
856  return aes_gcm_ghash_last (T, kd, d, 4, n_left);
857  }
858  else if (n_left > 32)
859  {
860  n_left -= 32;
861  aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 3, n_left, f);
862  return aes_gcm_ghash_last (T, kd, d, 3, n_left);
863  }
864  else if (n_left > 16)
865  {
866  n_left -= 16;
867  aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 2, n_left, f);
868  return aes_gcm_ghash_last (T, kd, d, 2, n_left);
869  }
870  else
871  {
872  aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 1, n_left, f);
873  return aes_gcm_ghash_last (T, kd, d, 1, n_left);
874  }
875  }
876 
877  aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 4, 0, f);
878 
879  /* next */
880  n_left -= 64;
881  outv += 4;
882  inv += 4;
883 
885 
886  while (n_left >= 128)
887  {
888  T = aes_gcm_calc_double (T, kd, d, ctr, inv, outv, rounds, f);
889 
890  /* next */
891  n_left -= 128;
892  outv += 8;
893  inv += 8;
894  }
895 
896  if (n_left >= 64)
897  {
898  T = aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 4, 0, f);
899 
900  /* next */
901  n_left -= 64;
902  outv += 4;
903  inv += 4;
904  }
905 
906  if (n_left == 0)
907  return aes_gcm_ghash_last (T, kd, d, 4, 0);
908 
910 
911  if (n_left > 48)
912  {
913  n_left -= 48;
914  T = aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 4, n_left, f);
915  return aes_gcm_ghash_last (T, kd, d, 4, n_left);
916  }
917 
918  if (n_left > 32)
919  {
920  n_left -= 32;
921  T = aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 3, n_left, f);
922  return aes_gcm_ghash_last (T, kd, d, 3, n_left);
923  }
924 
925  if (n_left > 16)
926  {
927  n_left -= 16;
928  T = aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 2, n_left, f);
929  return aes_gcm_ghash_last (T, kd, d, 2, n_left);
930  }
931 
932  T = aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 1, n_left, f);
933  return aes_gcm_ghash_last (T, kd, d, 1, n_left);
934 #endif
935 }
936 
939  u8x16u * inv, u8x16u * outv, u32 n_left, int rounds)
940 {
942 #ifdef __VAES__
943  u8x64 d4[4] = { };
944 
945  while (n_left >= 512)
946  {
947  T = aes4_gcm_calc_double (T, kd, d4, ctr, inv, outv, rounds, f);
948 
949  /* next */
950  n_left -= 512;
951  outv += 32;
952  inv += 32;
953  }
954 
955  while (n_left >= 256)
956  {
957  T = aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 4, 0, f);
958 
959  /* next */
960  n_left -= 256;
961  outv += 16;
962  inv += 16;
963  }
964 
965  if (n_left == 0)
966  return T;
967 
969 
970  if (n_left > 192)
971  return aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 4,
972  n_left - 192, f);
973  if (n_left > 128)
974  return aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 3,
975  n_left - 128, f);
976  if (n_left > 64)
977  return aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 2,
978  n_left - 64, f);
979  return aes4_gcm_calc (T, kd, d4, ctr, inv, outv, rounds, 1, n_left, f);
980 #else
981  u8x16 d[4];
982  while (n_left >= 128)
983  {
984  T = aes_gcm_calc_double (T, kd, d, ctr, inv, outv, rounds, f);
985 
986  /* next */
987  n_left -= 128;
988  outv += 8;
989  inv += 8;
990  }
991 
992  if (n_left >= 64)
993  {
994  T = aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 4, 0, f);
995 
996  /* next */
997  n_left -= 64;
998  outv += 4;
999  inv += 4;
1000  }
1001 
1002  if (n_left == 0)
1003  return T;
1004 
1005  f |= AES_GCM_F_LAST_ROUND;
1006 
1007  if (n_left > 48)
1008  return aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 4, n_left - 48, f);
1009 
1010  if (n_left > 32)
1011  return aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 3, n_left - 32, f);
1012 
1013  if (n_left > 16)
1014  return aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 2, n_left - 16, f);
1015 
1016  return aes_gcm_calc (T, kd, d, ctr, inv, outv, rounds, 1, n_left, f);
1017 #endif
1018 }
1019 
1021 aes_gcm (u8x16u * in, u8x16u * out, u8x16u * addt, u8x16u * iv, u8x16u * tag,
1022  u32 data_bytes, u32 aad_bytes, u8 tag_len, aes_gcm_key_data_t * kd,
1023  int aes_rounds, int is_encrypt)
1024 {
1025  int i;
1026  u8x16 r, T = { };
1027  u32x4 Y0;
1028  ghash_data_t _gd, *gd = &_gd;
1029  aes_gcm_counter_t _ctr, *ctr = &_ctr;
1030 
1031  clib_prefetch_load (iv);
1032  clib_prefetch_load (in);
1033  clib_prefetch_load (in + 4);
1034 
1035  /* calculate ghash for AAD - optimized for ipsec common cases */
1036  if (aad_bytes == 8)
1037  T = aes_gcm_ghash (T, kd, addt, 8);
1038  else if (aad_bytes == 12)
1039  T = aes_gcm_ghash (T, kd, addt, 12);
1040  else
1041  T = aes_gcm_ghash (T, kd, addt, aad_bytes);
1042 
1043  /* initalize counter */
1044  ctr->counter = 1;
1045  Y0 = (u32x4) aes_load_partial (iv, 12) + ctr_inv_1;
1046 #ifdef __VAES__
1047  ctr->Y4 = u32x16_splat_u32x4 (Y0) + ctr_inv_1234;
1048 #else
1049  ctr->Y = Y0 + ctr_inv_1;
1050 #endif
1051 
1052  /* ghash and encrypt/edcrypt */
1053  if (is_encrypt)
1054  T = aes_gcm_enc (T, kd, ctr, in, out, data_bytes, aes_rounds);
1055  else
1056  T = aes_gcm_dec (T, kd, ctr, in, out, data_bytes, aes_rounds);
1057 
1058  clib_prefetch_load (tag);
1059 
1060  /* Finalize ghash - data bytes and aad bytes converted to bits */
1061  /* *INDENT-OFF* */
1062  r = (u8x16) ((u64x2) {data_bytes, aad_bytes} << 3);
1063  /* *INDENT-ON* */
1064 
1065  /* interleaved computation of final ghash and E(Y0, k) */
1066  ghash_mul_first (gd, r ^ T, kd->Hi[NUM_HI - 1]);
1067  r = kd->Ke[0] ^ (u8x16) Y0;
1068  for (i = 1; i < 5; i += 1)
1069  r = aes_enc_round (r, kd->Ke[i]);
1070  ghash_reduce (gd);
1071  ghash_reduce2 (gd);
1072  for (; i < 9; i += 1)
1073  r = aes_enc_round (r, kd->Ke[i]);
1074  T = ghash_final (gd);
1075  for (; i < aes_rounds; i += 1)
1076  r = aes_enc_round (r, kd->Ke[i]);
1077  r = aes_enc_last_round (r, kd->Ke[aes_rounds]);
1078  T = u8x16_reflect (T) ^ r;
1079 
1080  /* tag_len 16 -> 0 */
1081  tag_len &= 0xf;
1082 
1083  if (is_encrypt)
1084  {
1085  /* store tag */
1086  if (tag_len)
1087  aes_store_partial (tag, T, tag_len);
1088  else
1089  tag[0] = T;
1090  }
1091  else
1092  {
1093  /* check tag */
1094  u16 tag_mask = tag_len ? (1 << tag_len) - 1 : 0xffff;
1095  if ((u8x16_msb_mask (tag[0] == T) & tag_mask) != tag_mask)
1096  return 0;
1097  }
1098  return 1;
1099 }
1100 
1103  u32 n_ops, aes_key_size_t ks)
1104 {
1106  vnet_crypto_op_t *op = ops[0];
1107  aes_gcm_key_data_t *kd;
1108  u32 n_left = n_ops;
1109 
1110 
1111 next:
1112  kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
1113  aes_gcm ((u8x16u *) op->src, (u8x16u *) op->dst, (u8x16u *) op->aad,
1114  (u8x16u *) op->iv, (u8x16u *) op->tag, op->len, op->aad_len,
1115  op->tag_len, kd, AES_KEY_ROUNDS (ks), /* is_encrypt */ 1);
1116  op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
1117 
1118  if (--n_left)
1119  {
1120  op += 1;
1121  goto next;
1122  }
1123 
1124  return n_ops;
1125 }
1126 
1129  aes_key_size_t ks)
1130 {
1132  vnet_crypto_op_t *op = ops[0];
1133  aes_gcm_key_data_t *kd;
1134  u32 n_left = n_ops;
1135  int rv;
1136 
1137 next:
1138  kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
1139  rv = aes_gcm ((u8x16u *) op->src, (u8x16u *) op->dst, (u8x16u *) op->aad,
1140  (u8x16u *) op->iv, (u8x16u *) op->tag, op->len,
1141  op->aad_len, op->tag_len, kd, AES_KEY_ROUNDS (ks),
1142  /* is_encrypt */ 0);
1143 
1144  if (rv)
1145  {
1146  op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
1147  }
1148  else
1149  {
1150  op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
1151  n_ops--;
1152  }
1153 
1154  if (--n_left)
1155  {
1156  op += 1;
1157  goto next;
1158  }
1159 
1160  return n_ops;
1161 }
1162 
1163 static_always_inline void *
1165 {
1166  aes_gcm_key_data_t *kd;
1167  u8x16 H;
1168 
1169  kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
1170 
1171  /* expand AES key */
1172  aes_key_expand ((u8x16 *) kd->Ke, key->data, ks);
1173 
1174  /* pre-calculate H */
1175  H = aes_encrypt_block (u8x16_splat (0), kd->Ke, ks);
1176  H = u8x16_reflect (H);
1177  ghash_precompute (H, (u8x16 *) kd->Hi, NUM_HI);
1178 #ifdef __VAES__
1179  u8x64 *Ke4 = (u8x64 *) kd->Ke4;
1180  for (int i = 0; i < AES_KEY_ROUNDS (ks) + 1; i++)
1181  Ke4[i] = u8x64_splat_u8x16 (kd->Ke[i]);
1182 #endif
1183  return kd;
1184 }
1185 
1186 #define foreach_aes_gcm_handler_type _(128) _(192) _(256)
1187 
1188 #define _(x) \
1189 static u32 aes_ops_dec_aes_gcm_##x \
1190 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
1191 { return aes_ops_dec_aes_gcm (vm, ops, n_ops, AES_KEY_##x); } \
1192 static u32 aes_ops_enc_aes_gcm_##x \
1193 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
1194 { return aes_ops_enc_aes_gcm (vm, ops, n_ops, AES_KEY_##x); } \
1195 static void * aes_gcm_key_exp_##x (vnet_crypto_key_t *key) \
1196 { return aes_gcm_key_exp (key, AES_KEY_##x); }
1197 
1199 #undef _
1200 
1201 clib_error_t *
1202 #ifdef __VAES__
1203 crypto_native_aes_gcm_init_icl (vlib_main_t * vm)
1204 #elif __AVX512F__
1205 crypto_native_aes_gcm_init_skx (vlib_main_t * vm)
1206 #elif __AVX2__
1207 crypto_native_aes_gcm_init_hsw (vlib_main_t * vm)
1208 #elif __aarch64__
1209 crypto_native_aes_gcm_init_neon (vlib_main_t * vm)
1210 #else
1212 #endif
1213 {
1215 
1216 #define _(x) \
1217  vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
1218  VNET_CRYPTO_OP_AES_##x##_GCM_ENC, \
1219  aes_ops_enc_aes_gcm_##x); \
1220  vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
1221  VNET_CRYPTO_OP_AES_##x##_GCM_DEC, \
1222  aes_ops_dec_aes_gcm_##x); \
1223  cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_GCM] = aes_gcm_key_exp_##x;
1225 #undef _
1226  return 0;
1227 }
1228 
1229 /*
1230  * fd.io coding-style-patch-verification: ON
1231  *
1232  * Local Variables:
1233  * eval: (c-set-style "gnu")
1234  * End:
1235  */
static_always_inline u8x16 aes_gcm_ghash_last(u8x16 T, aes_gcm_key_data_t *kd, u8x16 *d, int n_blocks, int n_bytes)
Definition: aes_gcm.c:400
static_always_inline u8x16 aes_gcm_ghash(u8x16 T, aes_gcm_key_data_t *kd, u8x16u *in, u32 n_left)
Definition: aes_gcm.c:127
crypto_native_main_t crypto_native_main
Definition: main.c:23
static_always_inline u8x16 aes_load_partial(u8x16u *p, int n_bytes)
Definition: aes.h:115
static_always_inline void clib_prefetch_load(void *p)
Definition: cache.h:94
static_always_inline void aes_gcm_enc_last_round(u8x16 *r, u8x16 *d, u8x16 const *k, int rounds, int n_blocks)
Definition: aes_gcm.c:99
static_always_inline u32 aes_ops_dec_aes_gcm(vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, aes_key_size_t ks)
Definition: aes_gcm.c:1128
#define PREDICT_TRUE(x)
Definition: clib.h:121
static_always_inline void aes_store_partial(void *p, u8x16 r, int n_bytes)
Definition: aes.h:127
unsigned long u64
Definition: types.h:89
#define foreach_aes_gcm_handler_type
Definition: aes_gcm.c:1186
static_always_inline u8x16 u8x16_reflect(u8x16 v)
Definition: vector_neon.h:195
for(i=1;i<=collision_buckets;i++)
static_always_inline u8x64 u8x64_reflect_u8x16(u8x64 x)
#define AES_KEY_ROUNDS(x)
Definition: aes.h:28
static_always_inline u8x64 u8x64_mask_load(u8x64 a, void *p, u64 mask)
static_always_inline int aes_gcm(u8x16u *in, u8x16u *out, u8x16u *addt, u8x16u *iv, u8x16u *tag, u32 data_bytes, u32 aad_bytes, u8 tag_len, aes_gcm_key_data_t *kd, int aes_rounds, int is_encrypt)
Definition: aes_gcm.c:1021
static_always_inline u8x16 aes_gcm_dec(u8x16 T, aes_gcm_key_data_t *kd, aes_gcm_counter_t *ctr, u8x16u *inv, u8x16u *outv, u32 n_left, int rounds)
Definition: aes_gcm.c:938
unsigned char u8
Definition: types.h:56
static_always_inline void ghash_precompute(u8x16 H, u8x16 *Hi, int n)
Definition: ghash.h:387
static_always_inline u8x16 aes_byte_mask(u8x16 x, u8 n_bytes)
Definition: aes.h:109
const u8x16 Hi[NUM_HI]
Definition: aes_gcm.c:38
static_always_inline u8x16 ghash_final(ghash_data_t *gd)
Definition: ghash.h:244
#define NUM_HI
Definition: aes_gcm.c:32
static_always_inline u8x64 u8x64_splat_u8x16(u8x16 a)
static_always_inline u32x16 u32x16_splat_u32x4(u32x4 a)
#define static_always_inline
Definition: clib.h:108
static_always_inline void ghash_reduce(ghash_data_t *gd)
Definition: ghash.h:212
#define u8x64_insert_u8x16(a, b, n)
static_always_inline void ghash_mul_next(ghash_data_t *gd, u8x16 a, u8x16 b)
Definition: ghash.h:183
unsigned int u32
Definition: types.h:88
static_always_inline void ghash_reduce2(ghash_data_t *gd)
Definition: ghash.h:237
static_always_inline u8x16 aes_enc_round(u8x16 a, u8x16 k)
Definition: aes.h:42
epu8_epi32 epu16_epi32 u64x2
Definition: vector_sse42.h:691
vnet_crypto_main_t * cm
Definition: quic_crypto.c:53
static_always_inline void aes_key_expand(u8x16 *key_schedule, u8 const *key, aes_key_size_t ks)
Definition: aes.h:410
static u8 iv[]
Definition: aes_cbc.c:24
static_always_inline u8x16 aes_enc_last_round(u8x16 a, u8x16 k)
Definition: aes.h:78
static_always_inline u32 aes_ops_enc_aes_gcm(vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, aes_key_size_t ks)
Definition: aes_gcm.c:1102
static_always_inline u8x16 ghash_mul(u8x16 a, u8x16 b)
Definition: ghash.h:251
unsigned short u16
Definition: types.h:57
static_always_inline void ghash_mul_first(ghash_data_t *gd, u8x16 a, u8x16 b)
Definition: ghash.h:168
static_always_inline u32x16 u32x16_mask_blend(u32x16 a, u32x16 b, u16 mask)
const u8x16 Ke[15]
Definition: aes_gcm.c:40
static_always_inline void aes_gcm_enc_first_round(u8x16 *r, aes_gcm_counter_t *ctr, u8x16 k, int n_blocks)
Definition: aes_gcm.c:68
static_always_inline u16 u8x16_msb_mask(u8x16 v)
Definition: vector_neon.h:138
aes_gcm_flags_t
Definition: aes_gcm.c:56
static_always_inline u8x16 aes_gcm_enc(u8x16 T, aes_gcm_key_data_t *kd, aes_gcm_counter_t *ctr, u8x16u *inv, u8x16u *outv, u32 n_left, int rounds)
Definition: aes_gcm.c:752
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static_always_inline void * aes_gcm_key_exp(vnet_crypto_key_t *key, aes_key_size_t ks)
Definition: aes_gcm.c:1164
static_always_inline void aes_gcm_enc_round(u8x16 *r, u8x16 k, int n_blocks)
Definition: aes_gcm.c:92
static_always_inline u8x16 aes_gcm_calc_double(u8x16 T, aes_gcm_key_data_t *kd, u8x16 *d, aes_gcm_counter_t *ctr, u8x16u *inv, u8x16u *outv, int rounds, aes_gcm_flags_t f)
Definition: aes_gcm.c:262
static_always_inline u8x16 aes_gcm_ghash_blocks(u8x16 T, aes_gcm_key_data_t *kd, u8x16u *in, int n_blocks)
Definition: aes_gcm.c:113
typedef key
Definition: ipsec_types.api:85
clib_error_t * crypto_native_aes_gcm_init_slm(vlib_main_t *vm)
Definition: aes_gcm.c:1211
aes_key_size_t
Definition: aes.h:21
vnet_crypto_op_status_t status
Definition: crypto.h:235
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:165
static_always_inline u8x64 u8x64_mask_blend(u8x64 a, u8x64 b, u64 mask)
static_always_inline void u8x64_mask_store(u8x64 a, void *p, u64 mask)
unsigned long long u32x4
Definition: ixge.c:28
static_always_inline u8x16 aes_encrypt_block(u8x16 block, const u8x16 *round_keys, aes_key_size_t ks)
Definition: aes.h:143
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static_always_inline u8x16 aes_gcm_calc(u8x16 T, aes_gcm_key_data_t *kd, u8x16 *d, aes_gcm_counter_t *ctr, u8x16u *inv, u8x16u *outv, int rounds, int n, int last_block_bytes, aes_gcm_flags_t f)
Definition: aes_gcm.c:168
static const u32x4 ctr_inv_1
Definition: aes_gcm.c:64