FD.io VPP  v20.05-21-gb1500e9ff
Vector Packet Processing
cryptodev.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/vnet.h>
22 #include <vpp/app/version.h>
23 
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #include <rte_bus_vdev.h>
28 #include <rte_cryptodev.h>
29 #include <rte_crypto_sym.h>
30 #include <rte_crypto.h>
31 #include <rte_cryptodev_pmd.h>
32 #include <rte_config.h>
33 
34 #define CRYPTODEV_NB_CRYPTO_OPS 1024
35 #define CRYPTODEV_NB_SESSION 10240
36 #define CRYPTODEV_DEF_DRIVE crypto_aesni_mb
37 
38 #define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv))
39 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
40 #define CRYPTODEV_DIGEST_OFFSET (offsetof (cryptodev_op_t, digest))
41 
42 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN */
43 #define foreach_vnet_aead_crypto_conversion \
44  _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \
45  _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \
46  _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \
47  _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \
48  _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \
49  _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12)
50 
51 /**
52  * crypto (alg, cryptodev_alg), hash (alg, digest-size)
53  **/
54 #define foreach_cryptodev_link_async_alg \
55  _ (AES_128_CBC, AES_CBC, SHA1, 12) \
56  _ (AES_192_CBC, AES_CBC, SHA1, 12) \
57  _ (AES_256_CBC, AES_CBC, SHA1, 12) \
58  _ (AES_128_CBC, AES_CBC, SHA224, 14) \
59  _ (AES_192_CBC, AES_CBC, SHA224, 14) \
60  _ (AES_256_CBC, AES_CBC, SHA224, 14) \
61  _ (AES_128_CBC, AES_CBC, SHA256, 16) \
62  _ (AES_192_CBC, AES_CBC, SHA256, 16) \
63  _ (AES_256_CBC, AES_CBC, SHA256, 16) \
64  _ (AES_128_CBC, AES_CBC, SHA384, 24) \
65  _ (AES_192_CBC, AES_CBC, SHA384, 24) \
66  _ (AES_256_CBC, AES_CBC, SHA384, 24) \
67  _ (AES_128_CBC, AES_CBC, SHA512, 32) \
68  _ (AES_192_CBC, AES_CBC, SHA512, 32) \
69  _ (AES_256_CBC, AES_CBC, SHA512, 32)
70 
71 #define foreach_vnet_crypto_status_conversion \
72  _(SUCCESS, COMPLETED) \
73  _(NOT_PROCESSED, WORK_IN_PROGRESS) \
74  _(AUTH_FAILED, FAIL_BAD_HMAC) \
75  _(INVALID_SESSION, FAIL_ENGINE_ERR) \
76  _(INVALID_ARGS, FAIL_ENGINE_ERR) \
77  _(ERROR, FAIL_ENGINE_ERR)
78 
80 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b,
82 #undef _
83 };
84 
85 typedef struct
86 {
87  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
88  struct rte_crypto_op op;
89  struct rte_crypto_sym_op sop;
90  u8 iv[16];
91  u8 aad[16];
95 
96 typedef enum
97 {
102 
103 typedef struct
104 {
105  struct rte_cryptodev_sym_session *keys[CRYPTODEV_N_OP_TYPES];
107 
108 typedef struct
109 {
112  char *desc;
114 
115 typedef struct
116 {
117  struct rte_mempool *cop_pool;
118  struct rte_mempool *sess_pool;
119  struct rte_mempool *sess_priv_pool;
121 
122 typedef struct
123 {
124  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
129  struct rte_ring *ring;
131 
132 typedef struct
133 {
141 
143 
144 static int
145 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
146  cryptodev_op_type_t op_type,
147  const vnet_crypto_key_t * key, u32 aad_len)
148 {
149  struct rte_crypto_aead_xform *aead_xform = &xform->aead;
150  memset (xform, 0, sizeof (*xform));
151  xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
152  xform->next = 0;
153 
154  if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
155  key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
156  key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
157  return -1;
158 
159  aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
160  aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
161  RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
162  aead_xform->aad_length = aad_len;
163  aead_xform->digest_length = 16;
164  aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
165  aead_xform->iv.length = 12;
166  aead_xform->key.data = key->data;
167  aead_xform->key.length = vec_len (key->data);
168 
169  return 0;
170 }
171 
172 static int
173 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
174  cryptodev_op_type_t op_type,
175  const vnet_crypto_key_t * key)
176 {
177  struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
178  vnet_crypto_key_t *key_cipher, *key_auth;
179  enum rte_crypto_cipher_algorithm cipher_algo = ~0;
180  enum rte_crypto_auth_algorithm auth_algo = ~0;
181  u32 digest_len = ~0;
182 
183  key_cipher = vnet_crypto_get_key (key->index_crypto);
184  key_auth = vnet_crypto_get_key (key->index_integ);
185  if (!key_cipher || !key_auth)
186  return -1;
187 
188  if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
189  {
190  xform_cipher = xforms;
191  xform_auth = xforms + 1;
192  xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
193  xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
194  }
195  else
196  {
197  xform_cipher = xforms + 1;
198  xform_auth = xforms;
199  xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
200  xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
201  }
202 
203  xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
204  xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
205  xforms->next = xforms + 1;
206 
207  switch (key->async_alg)
208  {
209 #define _(a, b, c, d) \
210  case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\
211  cipher_algo = RTE_CRYPTO_CIPHER_##b; \
212  auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \
213  digest_len = d; \
214  break;
215 
217 #undef _
218  default:
219  return -1;
220  }
221 
222  xform_cipher->cipher.algo = cipher_algo;
223  xform_cipher->cipher.key.data = key_cipher->data;
224  xform_cipher->cipher.key.length = vec_len (key_cipher->data);
225  xform_cipher->cipher.iv.length = 16;
226  xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
227 
228  xform_auth->auth.algo = auth_algo;
229  xform_auth->auth.digest_length = digest_len;
230  xform_auth->auth.key.data = key_auth->data;
231  xform_auth->auth.key.length = vec_len (key_auth->data);
232 
233  return 0;
234 }
235 
236 static int
238  struct rte_mempool *sess_priv_pool,
239  cryptodev_key_t * session_pair, u32 aad_len)
240 {
241  struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
242  struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
244  cryptodev_inst_t *dev_inst;
245  struct rte_cryptodev *cdev;
246  int ret;
247  uint8_t dev_id = 0;
248 
249  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
250  ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
251  else
252  ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key,
253  aad_len);
254  if (ret)
255  return 0;
256 
257  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
259  else
260  prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
261 
262  vec_foreach (dev_inst, cmt->cryptodev_inst)
263  {
264  dev_id = dev_inst->dev_id;
265  cdev = rte_cryptodev_pmd_get_dev (dev_id);
266 
267  /* if the session is already configured for the driver type, avoid
268  configuring it again to increase the session data's refcnt */
269  if (session_pair->keys[0]->sess_data[cdev->driver_id].data &&
270  session_pair->keys[1]->sess_data[cdev->driver_id].data)
271  continue;
272 
273  ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[0],
274  xforms_enc, sess_priv_pool);
275  ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[1],
276  xforms_dec, sess_priv_pool);
277  if (ret < 0)
278  return ret;
279  }
280  session_pair->keys[0]->opaque_data = aad_len;
281  session_pair->keys[1]->opaque_data = aad_len;
282 
283  return 0;
284 }
285 
286 static void
287 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
288 {
289  u32 n_devs, i;
290 
291  if (sess == NULL)
292  return;
293 
294  n_devs = rte_cryptodev_count ();
295 
296  for (i = 0; i < n_devs; i++)
297  rte_cryptodev_sym_session_clear (i, sess);
298 
299  rte_cryptodev_sym_session_free (sess);
300 }
301 
302 static int
304 {
305  vnet_crypto_alg_t alg;
306  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
307  return 0;
308 
309  alg = key->alg;
310 
311 #define _(a, b, c, d, e, f) \
312  if (alg == VNET_CRYPTO_ALG_##a) \
313  return 0;
314 
316 #undef _
317  return -1;
318 }
319 
322  vnet_crypto_key_index_t idx, u32 aad_len)
323 {
325  cryptodev_numa_data_t *numa_data;
327  struct rte_mempool *sess_pool, *sess_priv_pool;
328  cryptodev_key_t *ckey = 0;
329  int ret = 0;
330 
331  if (kop == VNET_CRYPTO_KEY_OP_DEL)
332  {
333  if (idx >= vec_len (cmt->keys))
334  return;
335 
336  ckey = pool_elt_at_index (cmt->keys, idx);
337  cryptodev_session_del (ckey->keys[0]);
338  cryptodev_session_del (ckey->keys[1]);
339  ckey->keys[0] = 0;
340  ckey->keys[1] = 0;
341  pool_put (cmt->keys, ckey);
342  return;
343  }
344  else if (kop == VNET_CRYPTO_KEY_OP_MODIFY)
345  {
346  if (idx >= vec_len (cmt->keys))
347  return;
348 
349  ckey = pool_elt_at_index (cmt->keys, idx);
350 
351  cryptodev_session_del (ckey->keys[0]);
352  cryptodev_session_del (ckey->keys[1]);
353  ckey->keys[0] = 0;
354  ckey->keys[1] = 0;
355  }
356  else /* create key */
357  pool_get_zero (cmt->keys, ckey);
358 
359  /* do not create session for unsupported alg */
361  return;
362 
363  numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
364  sess_pool = numa_data->sess_pool;
365  sess_priv_pool = numa_data->sess_priv_pool;
366 
367  ckey->keys[0] = rte_cryptodev_sym_session_create (sess_pool);
368  if (!ckey->keys[0])
369  {
370  ret = -1;
371  goto clear_key;
372  }
373 
374  ckey->keys[1] = rte_cryptodev_sym_session_create (sess_pool);
375  if (!ckey->keys[1])
376  {
377  ret = -1;
378  goto clear_key;
379  }
380 
381  ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len);
382 
383 clear_key:
384  if (ret != 0)
385  {
386  cryptodev_session_del (ckey->keys[0]);
387  cryptodev_session_del (ckey->keys[1]);
388  memset (ckey, 0, sizeof (*ckey));
389  pool_put (cmt->keys, ckey);
390  }
391 }
392 
393 /*static*/ void
396 {
397  cryptodev_sess_handler (vm, kop, idx, 8);
398 }
399 
403 {
404  u32 n_elts = f->n_elts, i;
405 
406  for (i = 0; i < n_elts; i++)
407  f->elts[i].status = s;
409 }
410 
411 /* when vlib_buffer in chain is adjusted mbuf is not adjusted along, this
412  * function does that */
413 static_always_inline rte_iova_t
415  vlib_buffer_t * b, u8 * digest)
416 {
417  rte_iova_t digest_iova = 0;
418  struct rte_mbuf *first_mb = mb, *last_mb = mb; /**< last mbuf */
419 
420  first_mb->nb_segs = 1;
421 
422  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
423  {
424  b = vlib_get_buffer (vm, b->next_buffer);
425  mb = rte_mbuf_from_vlib_buffer (b);
426  if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
427  rte_pktmbuf_reset (mb);
428  last_mb->next = mb;
429  last_mb = mb;
430  mb->data_len = b->current_length;
431  mb->pkt_len = b->current_length;
432  mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
433  first_mb->nb_segs++;
434  if (PREDICT_FALSE (b->ref_count > 1))
435  mb->pool =
437 
438  if (b->data <= digest &&
439  b->data + b->current_data + b->current_length > digest)
440  digest_iova = rte_pktmbuf_iova (mb) + digest -
441  rte_pktmbuf_mtod (mb, u8 *);
442  }
443 
444  return digest_iova;
445 }
446 
450  cryptodev_op_type_t op_type,
451  u32 digest_len)
452 {
454  cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
457  cryptodev_op_t **cop;
458  u32 *bi;
459  u32 n_enqueue, n_elts;
461  u32 last_key_index;
462 
463  if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
464  return -1;
465  n_elts = frame->n_elts;
466 
467  if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
468  {
470  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
471  return -1;
472  }
473 
474  if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool,
475  (void **) cet->cops, n_elts) < 0))
476  {
478  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
479  return -1;
480  }
481 
482  cop = cet->cops;
483  fe = frame->elts;
484  bi = frame->buffer_indices;
485  cop[0]->frame = frame;
486  cop[0]->n_elts = n_elts;
487 
488  key = pool_elt_at_index (cmt->keys, fe->key_index);
489  last_key_index = fe->key_index;
490 
491  while (n_elts)
492  {
493  vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
494  struct rte_crypto_sym_op *sop = &cop[0]->sop;
495  i16 crypto_offset = fe->crypto_start_offset;
496  i16 integ_offset = fe->integ_start_offset;
497  u32 offset_diff = crypto_offset - integ_offset;
498 
499  if (n_elts > 2)
500  {
501  CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE);
502  CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE);
503  CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
504  CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
505  }
506  if (last_key_index != fe->key_index)
507  {
508  key = pool_elt_at_index (cmt->keys, fe->key_index);
509  last_key_index = fe->key_index;
510  }
511 
512  sop->m_src = rte_mbuf_from_vlib_buffer (b);
513  sop->m_dst = 0;
514  /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
515  * so we have to manually adjust mbuf data_off here so cryptodev can
516  * correctly compute the data pointer. The prepend here will be later
517  * rewritten by tx. */
518  if (PREDICT_TRUE (fe->integ_start_offset < 0))
519  {
520  rte_pktmbuf_prepend (sop->m_src, -fe->integ_start_offset);
521  integ_offset = 0;
522  crypto_offset = offset_diff;
523  }
524  sop->session = key->keys[op_type];
525  sop->cipher.data.offset = crypto_offset;
526  sop->cipher.data.length = fe->crypto_total_length;
527  sop->auth.data.offset = integ_offset;
528  sop->auth.data.length = fe->crypto_total_length + fe->integ_length_adj;
529  sop->auth.digest.data = fe->digest;
531  sop->auth.digest.phys_addr = rte_pktmbuf_iova (sop->m_src) +
532  fe->digest - rte_pktmbuf_mtod (sop->m_src, u8 *);
533  else
534  sop->auth.digest.phys_addr =
535  cryptodev_validate_mbuf_chain (vm, sop->m_src, b, fe->digest);
536  clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
537  cop++;
538  bi++;
539  fe++;
540  n_elts--;
541  }
542 
543  n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id,
544  cet->cryptodev_q,
545  (struct rte_crypto_op **)
546  cet->cops, frame->n_elts);
547  ASSERT (n_enqueue == frame->n_elts);
548  cet->inflight += n_enqueue;
549 
550  return 0;
551 }
552 
556  cryptodev_op_type_t op_type, u8 aad_len)
557 {
559  cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
562  cryptodev_op_t **cop;
563  u32 *bi;
564  u32 n_enqueue = 0, n_elts;
566  u32 last_key_index;
567 
568  if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
569  return -1;
570  n_elts = frame->n_elts;
571 
572  if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
573  {
575  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
576  return -1;
577  }
578 
579  if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool,
580  (void **) cet->cops, n_elts) < 0))
581  {
583  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
584  return -1;
585  }
586 
587  cop = cet->cops;
588  fe = frame->elts;
589  bi = frame->buffer_indices;
590  cop[0]->frame = frame;
591  cop[0]->n_elts = n_elts;
592  frame->state = VNET_CRYPTO_OP_STATUS_COMPLETED;
593 
594  key = pool_elt_at_index (cmt->keys, fe->key_index);
595  last_key_index = fe->key_index;
596 
597  while (n_elts)
598  {
599  vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
600  struct rte_crypto_sym_op *sop = &cop[0]->sop;
601  u16 crypto_offset = fe->crypto_start_offset;
602 
603  if (n_elts > 2)
604  {
605  CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE);
606  CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE);
607  CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
608  CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
609  }
610  if (last_key_index != fe->key_index)
611  {
612  u8 sess_aad_len;
613  key = pool_elt_at_index (cmt->keys, fe->key_index);
614  sess_aad_len = (u8) key->keys[op_type]->opaque_data;
615  if (PREDICT_FALSE (sess_aad_len != aad_len))
616  {
618  fe->key_index, aad_len);
619  }
620  last_key_index = fe->key_index;
621  }
622 
623  sop->m_src = rte_mbuf_from_vlib_buffer (b);
624  sop->m_dst = 0;
625  /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
626  * so we have to manually adjust mbuf data_off here so cryptodev can
627  * correctly compute the data pointer. The prepend here will be later
628  * rewritten by tx. */
629  if (PREDICT_FALSE (fe->crypto_start_offset < 0))
630  {
631  rte_pktmbuf_prepend (sop->m_src, -fe->crypto_start_offset);
632  crypto_offset = 0;
633  }
634 
635  sop->session = key->keys[op_type];
636  sop->aead.aad.data = cop[0]->aad;
637  sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET;
638  sop->aead.data.length = fe->crypto_total_length;
639  sop->aead.data.offset = crypto_offset;
640  sop->aead.digest.data = fe->tag;
642  sop->aead.digest.phys_addr = rte_pktmbuf_iova (sop->m_src) +
643  fe->tag - rte_pktmbuf_mtod (sop->m_src, u8 *);
644  else
645  sop->aead.digest.phys_addr =
646  cryptodev_validate_mbuf_chain (vm, sop->m_src, b, fe->tag);
647  clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
648  clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
649  cop++;
650  bi++;
651  fe++;
652  n_elts--;
653  }
654 
655  n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id,
656  cet->cryptodev_q,
657  (struct rte_crypto_op **)
658  cet->cops, frame->n_elts);
659  ASSERT (n_enqueue == frame->n_elts);
660  cet->inflight += n_enqueue;
661 
662  return 0;
663 }
664 
666 cryptodev_get_ring_head (struct rte_ring * ring)
667 {
668  cryptodev_op_t **r = (void *) &ring[1];
669  return r[ring->cons.head & ring->mask];
670 }
671 
674 {
676  cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
678  cryptodev_op_t *cop0, **cop = cet->cops;
681  u32 n_elts, n_completed_ops = rte_ring_count (cet->ring);
682  u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0; /* sum of status */
683 
684  if (cet->inflight)
685  {
686  n_elts = clib_min (CRYPTODEV_NB_CRYPTO_OPS - n_completed_ops,
688  n_elts = rte_cryptodev_dequeue_burst
689  (cet->cryptodev_id, cet->cryptodev_q,
690  (struct rte_crypto_op **) cet->cops, n_elts);
691  cet->inflight -= n_elts;
692  n_completed_ops += n_elts;
693 
694  rte_ring_sp_enqueue_burst (cet->ring, (void *) cet->cops, n_elts, NULL);
695  }
696 
697  if (PREDICT_FALSE (n_completed_ops == 0))
698  return 0;
699 
700  cop0 = cryptodev_get_ring_head (cet->ring);
701  /* not a single frame is finished */
702  if (PREDICT_FALSE (cop0->n_elts > rte_ring_count (cet->ring)))
703  return 0;
704 
705  frame = cop0->frame;
706  n_elts = cop0->n_elts;
707  n_elts = rte_ring_sc_dequeue_bulk (cet->ring, (void **) cet->cops,
708  n_elts, 0);
709  fe = frame->elts;
710 
711  while (n_elts > 4)
712  {
713  ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
714  ss1 |= fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
715  ss2 |= fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
716  ss3 |= fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
717 
718  cop += 4;
719  fe += 4;
720  n_elts -= 4;
721  }
722 
723  while (n_elts)
724  {
725  ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
726  fe++;
727  cop++;
728  n_elts--;
729  }
730 
731  frame->state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
733 
734  rte_mempool_put_bulk (numa->cop_pool, (void **) cet->cops, frame->n_elts);
735 
736  return frame;
737 }
738 
739 /* *INDENT-OFF* */
740 
741 #define _(a, b, c, d, e, f) \
742 static_always_inline int \
743 cryptodev_enqueue_##a##_AAD##f##_enc (vlib_main_t * vm, \
744  vnet_crypto_async_frame_t * frame) \
745 { \
746  return cryptodev_frame_gcm_enqueue (vm, frame, \
747  CRYPTODEV_OP_TYPE_ENCRYPT, f); \
748 } \
749 static_always_inline int \
750 cryptodev_enqueue_##a##_AAD##f##_dec (vlib_main_t * vm, \
751  vnet_crypto_async_frame_t * frame) \
752 { \
753  return cryptodev_frame_gcm_enqueue (vm, frame, \
754  CRYPTODEV_OP_TYPE_DECRYPT, f); \
755 }
756 
758 #undef _
759 
760 #define _(a, b, c, d) \
761 static_always_inline int \
762 cryptodev_enqueue_##a##_##c##_TAG##d##_enc (vlib_main_t * vm, \
763  vnet_crypto_async_frame_t * frame) \
764 { \
765  return cryptodev_frame_linked_algs_enqueue (vm, frame, \
766  CRYPTODEV_OP_TYPE_ENCRYPT, d); \
767 } \
768 static_always_inline int \
769 cryptodev_enqueue_##a##_##c##_TAG##d##_dec (vlib_main_t * vm, \
770  vnet_crypto_async_frame_t * frame) \
771 { \
772  return cryptodev_frame_linked_algs_enqueue (vm, frame, \
773  CRYPTODEV_OP_TYPE_DECRYPT, d); \
774 }
775 
777 #undef _
778 
779 typedef enum
780 {
784 
785 /**
786  * assign a cryptodev resource to a worker.
787  * @param cet: the worker thread data
788  * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
789  * @param op: the assignment method.
790  * @return: 0 if successfully, negative number otherwise.
791  **/
794  u32 cryptodev_inst_index,
795  cryptodev_resource_assign_op_t op)
796 {
798  cryptodev_inst_t *cinst = 0;
799  uword idx;
800 
801  /* assign resource is only allowed when no inflight op is in the queue */
802  if (cet->inflight)
803  return -EBUSY;
804 
805  switch (op)
806  {
809  vec_len (cmt->cryptodev_inst))
810  return -1;
811 
812  clib_spinlock_lock (&cmt->tlock);
814  clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
815  cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
816  cet->cryptodev_id = cinst->dev_id;
817  cet->cryptodev_q = cinst->q_id;
818  clib_spinlock_unlock (&cmt->tlock);
819  break;
821  /* assigning a used cryptodev resource is not allowed */
822  if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
823  == 1)
824  return -EBUSY;
826  {
827  cinst = cmt->cryptodev_inst + idx;
828  if (cinst->dev_id == cet->cryptodev_id &&
829  cinst->q_id == cet->cryptodev_q)
830  break;
831  }
832  /* invalid existing worker resource assignment */
833  if (idx == vec_len (cmt->cryptodev_inst))
834  return -EINVAL;
835  clib_spinlock_lock (&cmt->tlock);
838  cryptodev_inst_index, 1);
839  cinst = cmt->cryptodev_inst + cryptodev_inst_index;
840  cet->cryptodev_id = cinst->dev_id;
841  cet->cryptodev_q = cinst->q_id;
842  clib_spinlock_unlock (&cmt->tlock);
843  break;
844  default:
845  return -EINVAL;
846  }
847  return 0;
848 }
849 
850 static u8 *
851 format_cryptodev_inst (u8 * s, va_list * args)
852 {
854  u32 inst = va_arg (*args, u32);
855  cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
856  u32 thread_index = 0;
857  struct rte_cryptodev_info info;
858 
859  rte_cryptodev_info_get (cit->dev_id, &info);
860  s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
861 
862  vec_foreach_index (thread_index, cmt->per_thread_data)
863  {
864  cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
865  if (vlib_num_workers () > 0 && thread_index == 0)
866  continue;
867 
868  if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
869  {
870  s = format (s, "%u (%v)\n", thread_index,
871  vlib_worker_threads[thread_index].name);
872  break;
873  }
874  }
875 
876  if (thread_index == vec_len (cmt->per_thread_data))
877  s = format (s, "%s\n", "free");
878 
879  return s;
880 }
881 
882 static clib_error_t *
884  vlib_cli_command_t * cmd)
885 {
887  u32 inst;
888 
889  vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
890  "Assigned-to");
891  if (vec_len (cmt->cryptodev_inst) == 0)
892  {
893  vlib_cli_output (vm, "(nil)\n");
894  return 0;
895  }
896 
897  vec_foreach_index (inst, cmt->cryptodev_inst)
898  vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
899 
900  return 0;
901 }
902 
903 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
904  .path = "show cryptodev assignment",
905  .short_help = "show cryptodev assignment",
906  .function = cryptodev_show_assignment_fn,
907 };
908 
909 static clib_error_t *
911  vlib_cli_command_t * cmd)
912 {
915  unformat_input_t _line_input, *line_input = &_line_input;
916  u32 thread_index, inst_index;
917  u32 thread_present = 0, inst_present = 0;
918  clib_error_t *error = 0;
919  int ret;
920 
921  /* Get a line of input. */
922  if (!unformat_user (input, unformat_line_input, line_input))
923  return 0;
924 
925  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
926  {
927  if (unformat (line_input, "thread %u", &thread_index))
928  thread_present = 1;
929  else if (unformat (line_input, "resource %u", &inst_index))
930  inst_present = 1;
931  else
932  {
933  error = clib_error_return (0, "unknown input `%U'",
934  format_unformat_error, line_input);
935  return error;
936  }
937  }
938 
939  if (!thread_present || !inst_present)
940  {
941  error = clib_error_return (0, "mandatory argument(s) missing");
942  return error;
943  }
944 
945  if (thread_index == 0 && vlib_num_workers () > 0)
946  {
947  error =
948  clib_error_return (0, "assign crypto resource for master thread");
949  return error;
950  }
951 
952  if (thread_index > vec_len (cmt->per_thread_data) ||
953  inst_index > vec_len (cmt->cryptodev_inst))
954  {
955  error = clib_error_return (0, "wrong thread id or resource id");
956  return error;
957  }
958 
959  cet = cmt->per_thread_data + thread_index;
960  ret = cryptodev_assign_resource (cet, inst_index,
962  if (ret)
963  {
964  error = clib_error_return (0, "cryptodev_assign_resource returned %i",
965  ret);
966  return error;
967  }
968 
969  return 0;
970 }
971 
972 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
973  .path = "set cryptodev assignment",
974  .short_help = "set cryptodev assignment thread <thread_index> "
975  "resource <inst_index>",
976  .function = cryptodev_set_assignment_fn,
977 };
978 
979 static int
981 {
982  const struct rte_cryptodev_symmetric_capability *cap;
983  struct rte_cryptodev_sym_capability_idx cap_idx;
984 
985 #define _(a, b, c, d, e, f) \
986  cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \
987  cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
988  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
989  if (!cap) \
990  return -RTE_CRYPTO_##b##_##c; \
991  else \
992  { \
993  if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \
994  return -RTE_CRYPTO_##b##_##c; \
995  if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \
996  return -RTE_CRYPTO_##b##_##c; \
997  if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \
998  return -RTE_CRYPTO_##b##_##c; \
999  }
1000 
1002 #undef _
1003 
1004 #define _(a, b, c, d) \
1005  cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
1006  cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
1007  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1008  if (!cap) \
1009  return -RTE_CRYPTO_CIPHER_##b; \
1010  cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
1011  cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \
1012  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1013  if (!cap) \
1014  return -RTE_CRYPTO_AUTH_##c;
1015 
1017 #undef _
1018  return 0;
1019 }
1020 
1021 static u32
1023 {
1024  struct rte_cryptodev_info info;
1025  u32 n_cryptodev = rte_cryptodev_count ();
1026  u32 i, q_count = 0;
1027 
1028  for (i = 0; i < n_cryptodev; i++)
1029  {
1030  rte_cryptodev_info_get (i, &info);
1031  if (rte_cryptodev_socket_id (i) != numa)
1032  {
1033  clib_warning ("DPDK crypto resource %s is in different numa node "
1034  "as %u, ignored", info.device->name, numa);
1035  continue;
1036  }
1037  q_count += info.max_nb_queue_pairs;
1038  }
1039 
1040  return q_count;
1041 }
1042 
1043 static int
1044 cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id)
1045 {
1046  struct rte_cryptodev_info info;
1047  struct rte_cryptodev *cdev;
1050  vm->numa_node);
1051  u32 i;
1052  int ret;
1053 
1054  cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
1055  rte_cryptodev_info_get (cryptodev_id, &info);
1056 
1057  ret = check_cryptodev_alg_support (cryptodev_id);
1058  if (ret != 0)
1059  return ret;
1060 
1061  /** If the device is already started, we reuse it, otherwise configure
1062  * both the device and queue pair.
1063  **/
1064  if (!cdev->data->dev_started)
1065  {
1066  struct rte_cryptodev_config cfg;
1067 
1068  cfg.socket_id = vm->numa_node;
1069  cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1070 
1071  rte_cryptodev_configure (cryptodev_id, &cfg);
1072 
1073  for (i = 0; i < info.max_nb_queue_pairs; i++)
1074  {
1075  struct rte_cryptodev_qp_conf qp_cfg;
1076 
1077  int ret;
1078 
1079  qp_cfg.mp_session = numa_data->sess_pool;
1080  qp_cfg.mp_session_private = numa_data->sess_priv_pool;
1081  qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
1082 
1083  ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1084  vm->numa_node);
1085  if (ret)
1086  break;
1087  }
1088  if (i != info.max_nb_queue_pairs)
1089  return -1;
1090  /* start the device */
1091  rte_cryptodev_start (i);
1092  }
1093 
1094  for (i = 0; i < info.max_nb_queue_pairs; i++)
1095  {
1096  cryptodev_inst_t *cdev_inst;
1097  vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
1098  cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
1099  cdev_inst->dev_id = cryptodev_id;
1100  cdev_inst->q_id = i;
1101 
1102  snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
1103  "%s_q%u", info.device->name, i);
1104  }
1105 
1106  return 0;
1107 }
1108 
1109 static int
1111 {
1112  char name[RTE_CRYPTODEV_NAME_MAX_LEN], args[128];
1113  u32 dev_id = 0;
1114  int ret;
1115 
1116  /* find an unused name to create the device */
1117  while (dev_id < RTE_CRYPTO_MAX_DEVS)
1118  {
1119  snprintf (name, RTE_CRYPTODEV_NAME_MAX_LEN - 1, "%s%u",
1120  RTE_STR (CRYPTODEV_DEF_DRIVE), dev_id);
1121  if (rte_cryptodev_get_dev_id (name) < 0)
1122  break;
1123  dev_id++;
1124  }
1125 
1126  if (dev_id == RTE_CRYPTO_MAX_DEVS)
1127  return -1;
1128 
1129  snprintf (args, 127, "socket_id=%u,max_nb_queue_pairs=%u",
1130  vm->numa_node, n_queues);
1131 
1132  ret = rte_vdev_init(name, args);
1133  if (ret < 0)
1134  return ret;
1135 
1136  clib_warning ("Created cryptodev device %s (%s)", name, args);
1137 
1138  return 0;
1139 }
1140 
1141 static int
1143 {
1144  u32 n_queues = cryptodev_count_queue (vm->numa_node);
1145  u32 i;
1146  int ret;
1147 
1148  /* create an AESNI_MB PMD so the service is available */
1149  if (n_queues < n_workers)
1150  {
1151  u32 q_num = max_pow2 (n_workers - n_queues);
1152  ret = cryptodev_create_device (vm, q_num);
1153  if (ret < 0)
1154  return ret;
1155  }
1156 
1157  for (i = 0; i < rte_cryptodev_count (); i++)
1158  {
1159  ret = cryptodev_configure (vm, i);
1160  if (ret)
1161  return ret;
1162  }
1163 
1164  return 0;
1165 }
1166 
1167 static int
1169 {
1170  u32 sess_data_sz = 0, i;
1171  int ret;
1172 
1173  if (rte_cryptodev_count () == 0)
1174  {
1175  clib_warning ("No cryptodev device available, creating...");
1176  ret = cryptodev_create_device (vm, max_pow2 (n_workers));
1177  if (ret < 0)
1178  {
1179  clib_warning ("Failed");
1180  return ret;
1181  }
1182  }
1183 
1184  for (i = 0; i < rte_cryptodev_count (); i++)
1185  {
1186  u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (i);
1187 
1188  sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
1189  }
1190 
1191  return sess_data_sz;
1192 }
1193 
1194 static void
1196 {
1198  cryptodev_numa_data_t *numa_data;
1199 
1200  vec_validate (cmt->per_numa_data, vm->numa_node);
1201  numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
1202 
1203  if (numa_data->sess_pool)
1204  rte_mempool_free (numa_data->sess_pool);
1205  if (numa_data->sess_priv_pool)
1206  rte_mempool_free (numa_data->sess_priv_pool);
1207  if (numa_data->cop_pool)
1208  rte_mempool_free (numa_data->cop_pool);
1209 }
1210 
1211 static void
1212 crypto_op_init (struct rte_mempool *mempool,
1213  void *_arg __attribute__ ((unused)),
1214  void *_obj, unsigned i __attribute__ ((unused)))
1215 {
1216  struct rte_crypto_op *op = _obj;
1217 
1218  op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1219  op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1220  op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1221  op->phys_addr = rte_mempool_virt2iova (_obj);
1222  op->mempool = mempool;
1223 }
1224 
1225 
1226 clib_error_t *
1228 {
1232  cryptodev_numa_data_t *numa_data;
1233  struct rte_mempool *mp;
1234  u32 skip_master = vlib_num_workers () > 0;
1235  u32 n_workers = tm->n_vlib_mains - skip_master;
1236  u32 numa = vm->numa_node;
1237  i32 sess_sz;
1238  u64 n_cop_elts;
1239  u32 eidx;
1240  u32 i;
1241  u8 *name = 0;
1242  clib_error_t *error;
1243  struct rte_crypto_op_pool_private *priv;
1244 
1245  sess_sz = cryptodev_get_session_sz(vm, n_workers);
1246  if (sess_sz < 0)
1247  {
1248  error = clib_error_return (0, "Not enough cryptodevs");
1249  return error;
1250  }
1251 
1252  /* A total of 4 times n_worker threads * frame size as crypto ops */
1253  n_cop_elts = max_pow2 (n_workers * CRYPTODEV_NB_CRYPTO_OPS);
1254 
1255  vec_validate (cmt->per_numa_data, vm->numa_node);
1256  numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1257 
1258  /* create session pool for the numa node */
1259  name = format (0, "vcryptodev_sess_pool_%u", numa);
1260  mp = rte_cryptodev_sym_session_pool_create ((char *) name,
1262  0, 0, 0, numa);
1263  if (!mp)
1264  {
1265  error = clib_error_return (0, "Not enough memory for mp %s", name);
1266  goto err_handling;
1267  }
1268  vec_free (name);
1269 
1270  numa_data->sess_pool = mp;
1271 
1272  /* create session private pool for the numa node */
1273  name = format (0, "cryptodev_sess_pool_%u", numa);
1274  mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0,
1275  0, NULL, NULL, NULL, NULL, numa, 0);
1276  if (!mp)
1277  {
1278  error = clib_error_return (0, "Not enough memory for mp %s", name);
1279  vec_free (name);
1280  goto err_handling;
1281  }
1282 
1283  vec_free (name);
1284 
1285  numa_data->sess_priv_pool = mp;
1286 
1287  /* create cryptodev op pool */
1288  name = format (0, "cryptodev_op_pool_%u", numa);
1289 
1290  mp = rte_mempool_create ((char *) name, n_cop_elts,
1291  sizeof (cryptodev_op_t), VLIB_FRAME_SIZE * 2,
1292  sizeof (struct rte_crypto_op_pool_private), NULL,
1293  NULL, crypto_op_init, NULL, numa, 0);
1294  if (!mp)
1295  {
1296  error = clib_error_return (0, "Not enough memory for mp %s", name);
1297  vec_free (name);
1298  goto err_handling;
1299  }
1300 
1301  priv = rte_mempool_get_priv (mp);
1302  priv->priv_size = sizeof (struct rte_crypto_op_pool_private);
1303  priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1304  vec_free (name);
1305  numa_data->cop_pool = mp;
1306 
1307  /* probe all cryptodev devices and get queue info */
1308  if (cryptodev_probe (vm, n_workers) < 0)
1309  {
1310  error = clib_error_return (0, "Failed to configure cryptodev");
1311  goto err_handling;
1312  }
1313 
1315  clib_spinlock_init (&cmt->tlock);
1316 
1319  for (i = skip_master; i < tm->n_vlib_mains; i++)
1320  {
1321  ptd = cmt->per_thread_data + i;
1323  name = format (0, "frames_ring_%u", i);
1324  ptd->ring = rte_ring_create((char *) name, CRYPTODEV_NB_CRYPTO_OPS,
1325  vm->numa_node, RING_F_SP_ENQ|RING_F_SC_DEQ);
1326  if (!ptd->ring)
1327  {
1328  error = clib_error_return (0, "Not enough memory for mp %s", name);
1329  vec_free (name);
1330  goto err_handling;
1331  }
1333  vec_free(name);
1334  }
1335 
1336  /* register handler */
1337  eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79,
1338  "DPDK Cryptodev Engine");
1339 
1340 #define _(a, b, c, d, e, f) \
1341  vnet_crypto_register_async_handler \
1342  (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
1343  cryptodev_enqueue_##a##_AAD##f##_enc, \
1344  cryptodev_frame_dequeue); \
1345  vnet_crypto_register_async_handler \
1346  (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
1347  cryptodev_enqueue_##a##_AAD##f##_dec, \
1348  cryptodev_frame_dequeue);
1349 
1351 #undef _
1352 
1353 #define _(a, b, c, d) \
1354  vnet_crypto_register_async_handler \
1355  (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \
1356  cryptodev_enqueue_##a##_##c##_TAG##d##_enc, \
1357  cryptodev_frame_dequeue); \
1358  vnet_crypto_register_async_handler \
1359  (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \
1360  cryptodev_enqueue_##a##_##c##_TAG##d##_dec, \
1361  cryptodev_frame_dequeue);
1362 
1364 #undef _
1365 
1367 
1368  return 0;
1369 
1370 err_handling:
1372 
1373  return error;
1374 }
1375 /* *INDENT-On* */
1376 
1377 /*
1378  * fd.io coding-style-patch-verification: ON
1379  *
1380  * Local Variables:
1381  * eval: (c-set-style "gnu")
1382  * End:
1383  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:507
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
struct rte_crypto_sym_op sop
Definition: cryptodev.c:89
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define CRYPTODEV_NB_CRYPTO_OPS
Definition: cryptodev.c:34
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:60
#define clib_min(x, y)
Definition: clib.h:319
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:102
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:80
cryptodev_op_type_t
Definition: cryptodev.c:96
cryptodev_resource_assign_op_t
Definition: cryptodev.c:779
static int prepare_aead_xform(struct rte_crypto_sym_xform *xform, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, u32 aad_len)
Definition: cryptodev.c:145
static clib_error_t * cryptodev_set_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: cryptodev.c:910
#define VNET_CRYPTO_KEY_TYPE_LINK
Definition: crypto.h:189
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
Definition: pool.h:255
#define PREDICT_TRUE(x)
Definition: clib.h:119
#define foreach_vnet_aead_crypto_conversion
Definition: cryptodev.c:43
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
Definition: crypto.h:237
void cryptodev_key_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx)
Definition: cryptodev.c:394
static u32 cryptodev_count_queue(u32 numa)
Definition: cryptodev.c:1022
#define VLIB_BUFFER_PRE_DATA_SIZE
Definition: buffer.h:51
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
u8 aad[16]
Definition: cryptodev.c:91
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:628
static uword * clib_bitmap_set(uword *ai, uword i, uword value)
Sets the ith bit of a bitmap to new_value Removes trailing zeros from the bitmap. ...
Definition: bitmap.h:167
uword unformat_user(unformat_input_t *input, unformat_function_t *func,...)
Definition: unformat.c:989
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u32 numa_node
Definition: main.h:220
#define CRYPTODEV_AAD_OFFSET
Definition: cryptodev.c:39
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:518
clib_bitmap_t * active_cdev_inst_mask
Definition: cryptodev.c:138
static_always_inline int cryptodev_frame_linked_algs_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u32 digest_len)
Definition: cryptodev.c:448
unsigned char u8
Definition: types.h:56
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:133
static_always_inline void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
Definition: cryptodev.c:321
static_always_inline vnet_crypto_async_frame_t * cryptodev_frame_dequeue(vlib_main_t *vm)
Definition: cryptodev.c:673
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
Definition: bitmap.h:141
void vnet_crypto_register_key_handler(vlib_main_t *vm, u32 engine_index, vnet_crypto_key_handler_t *key_handler)
Definition: crypto.c:307
static int prepare_linked_xform(struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key)
Definition: cryptodev.c:173
#define static_always_inline
Definition: clib.h:106
vnet_crypto_key_op_t
Definition: crypto.h:105
static const vnet_crypto_op_status_t cryptodev_status_conversion[]
Definition: cryptodev.c:79
static int cryptodev_configure(vlib_main_t *vm, uint32_t cryptodev_id)
Definition: cryptodev.c:1044
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
Definition: vec.h:350
static_always_inline int cryptodev_frame_gcm_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u8 aad_len)
Definition: cryptodev.c:554
struct rte_mempool * sess_pool
Definition: cryptodev.c:118
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
unsigned int u32
Definition: types.h:88
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg), hash (alg, digest-size)
Definition: cryptodev.c:54
static_always_inline void cryptodev_mark_frame_err_status(vnet_crypto_async_frame_t *f, vnet_crypto_op_status_t s)
Definition: cryptodev.c:401
#define VLIB_FRAME_SIZE
Definition: node.h:380
static void crypto_op_init(struct rte_mempool *mempool, void *_arg, void *_obj, unsigned i)
Definition: cryptodev.c:1212
vnet_crypto_alg_t
Definition: crypto.h:121
#define VNET_CRYPTO_FRAME_SIZE
Definition: crypto.h:21
unformat_function_t unformat_line_input
Definition: format.h:283
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:63
vlib_worker_thread_t * vlib_worker_threads
Definition: threads.c:34
cryptodev_engine_thread_t * per_thread_data
Definition: cryptodev.c:136
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
static u8 iv[]
Definition: aes_cbc.c:24
clib_spinlock_t tlock
Definition: cryptodev.c:139
static int cryptodev_session_create(vnet_crypto_key_t *const key, struct rte_mempool *sess_priv_pool, cryptodev_key_t *session_pair, u32 aad_len)
Definition: cryptodev.c:237
static int cryptodev_get_session_sz(vlib_main_t *vm, uint32_t n_workers)
Definition: cryptodev.c:1168
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
#define rte_mbuf_from_vlib_buffer(x)
Definition: buffer.h:19
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:302
static_always_inline int cryptodev_assign_resource(cryptodev_engine_thread_t *cet, u32 cryptodev_inst_index, cryptodev_resource_assign_op_t op)
assign a cryptodev resource to a worker.
Definition: cryptodev.c:793
#define PREDICT_FALSE(x)
Definition: clib.h:118
vlib_main_t * vm
Definition: in2out_ed.c:1599
vnet_crypto_alg_t alg
Definition: crypto.h:179
struct rte_crypto_op op
Definition: cryptodev.c:88
clib_error_t * dpdk_cryptodev_init(vlib_main_t *vm)
Definition: cryptodev.c:1227
vnet_crypto_async_alg_t async_alg
Definition: crypto.h:185
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:329
#define UNFORMAT_END_OF_INPUT
Definition: format.h:145
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static int cryptodev_create_device(vlib_main_t *vm, u32 n_queues)
Definition: cryptodev.c:1110
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
cryptodev_main_t cryptodev_main
Definition: cryptodev.c:142
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
cryptodev_op_t ** cops
Definition: cryptodev.c:128
static_always_inline cryptodev_op_t * cryptodev_get_ring_head(struct rte_ring *ring)
Definition: cryptodev.c:666
#define clib_warning(format, args...)
Definition: error.h:59
u8 data[]
Packet data.
Definition: buffer.h:181
#define VNET_CRYPTO_FRAME_STATE_SUCCESS
Definition: crypto.h:323
static uword max_pow2(uword x)
Definition: clib.h:241
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
Definition: bitmap.h:197
#define VNET_CRYPTO_FRAME_STATE_ELT_ERROR
Definition: crypto.h:324
struct rte_ring * ring
Definition: cryptodev.c:129
cryptodev_numa_data_t * per_numa_data
Definition: cryptodev.c:134
string name[64]
Definition: ip.api:44
cryptodev_inst_t * cryptodev_inst
Definition: cryptodev.c:137
#define clib_bitmap_vec_validate(v, i)
Definition: bitmap.h:112
static int cryptodev_probe(vlib_main_t *vm, u32 n_workers)
Definition: cryptodev.c:1142
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:152
static void cryptodev_session_del(struct rte_cryptodev_sym_session *sess)
Definition: cryptodev.c:287
vnet_crypto_op_status_t
Definition: crypto.h:112
signed int i32
Definition: types.h:77
#define ASSERT(truth)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:689
struct rte_mempool * cop_pool
Definition: cryptodev.c:117
static u8 * format_cryptodev_inst(u8 *s, va_list *args)
Definition: cryptodev.c:851
static clib_error_t * cryptodev_show_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: cryptodev.c:883
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
Definition: crypto.h:315
u32 vnet_crypto_key_index_t
Definition: crypto.h:342
struct rte_mempool * sess_priv_pool
Definition: cryptodev.c:119
static int check_cryptodev_alg_support(u32 dev_id)
Definition: cryptodev.c:980
typedef key
Definition: ipsec_types.api:85
static uword clib_bitmap_count_set_bits(uword *ai)
Return the number of set bits in a bitmap.
Definition: bitmap.h:462
#define CRYPTODEV_IV_OFFSET
Definition: cryptodev.c:38
#define foreach_vnet_crypto_status_conversion
Definition: cryptodev.c:71
static_always_inline vnet_crypto_key_t * vnet_crypto_get_key(vnet_crypto_key_index_t index)
Definition: crypto.h:503
struct rte_cryptodev_sym_session * keys[CRYPTODEV_N_OP_TYPES]
Definition: cryptodev.c:105
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
Definition: buffer.c:34
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static int cryptodev_check_supported_vnet_alg(vnet_crypto_key_t *key)
Definition: cryptodev.c:303
static void dpdk_disable_cryptodev_engine(vlib_main_t *vm)
Definition: cryptodev.c:1195
#define CRYPTODEV_DEF_DRIVE
Definition: cryptodev.c:36
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
static u32 vlib_num_workers()
Definition: threads.h:376
#define CRYPTODEV_NB_SESSION
Definition: cryptodev.c:35
uword clib_bitmap_t
Definition: bitmap.h:50
#define vec_foreach(var, vec)
Vector iterator.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
vnet_crypto_async_frame_t * frame
Definition: cryptodev.c:92
static uword clib_bitmap_first_clear(uword *ai)
Return the lowest numbered clear bit in a bitmap.
Definition: bitmap.h:445
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)
Definition: crypto.c:112
static_always_inline rte_iova_t cryptodev_validate_mbuf_chain(vlib_main_t *vm, struct rte_mbuf *mb, vlib_buffer_t *b, u8 *digest)
Definition: cryptodev.c:414
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:130
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
vnet_crypto_op_status_t status
Definition: crypto.h:301
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:328
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:171
signed short i16
Definition: types.h:46
cryptodev_key_t * keys
Definition: cryptodev.c:135