FD.io VPP  v20.05-21-gb1500e9ff
Vector Packet Processing
output.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vppinfra/ring.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/devices/devices.h>
24 #include <rdma/rdma.h>
25 
26 #define RDMA_TX_RETRIES 5
27 
28 #define RDMA_TXQ_DV_DSEG_SZ(txq) (RDMA_MLX5_WQE_DS * RDMA_TXQ_DV_SQ_SZ(txq))
29 #define RDMA_TXQ_DV_DSEG2WQE(d) (((d) + RDMA_MLX5_WQE_DS - 1) / RDMA_MLX5_WQE_DS)
30 
31 /*
32  * MLX5 direct verbs tx/free functions
33  */
34 
37  const vlib_node_runtime_t * node,
38  rdma_txq_t * txq)
39 {
40  u16 idx = txq->dv_cq_idx;
41  u32 cq_mask = pow2_mask (txq->dv_cq_log2sz);
42  u32 sq_mask = pow2_mask (txq->dv_sq_log2sz);
43  u32 mask = pow2_mask (txq->bufs_log2sz);
44  u32 buf_sz = RDMA_TXQ_BUF_SZ (txq);
45  u32 log2_cq_sz = txq->dv_cq_log2sz;
46  struct mlx5_cqe64 *cqes = txq->dv_cq_cqes, *cur = cqes + (idx & cq_mask);
47  u8 op_own, saved;
48  const rdma_mlx5_wqe_t *wqe;
49 
50  for (;;)
51  {
52  op_own = *(volatile u8 *) &cur->op_own;
53  if (((idx >> log2_cq_sz) & MLX5_CQE_OWNER_MASK) !=
54  (op_own & MLX5_CQE_OWNER_MASK) || (op_own >> 4) == MLX5_CQE_INVALID)
55  break;
56  if (PREDICT_FALSE ((op_own >> 4)) != MLX5_CQE_REQ)
57  vlib_error_count (vm, node->node_index, RDMA_TX_ERROR_COMPLETION, 1);
58  idx++;
59  cur = cqes + (idx & cq_mask);
60  }
61 
62  if (idx == txq->dv_cq_idx)
63  return; /* nothing to do */
64 
65  cur = cqes + ((idx - 1) & cq_mask);
66  saved = cur->op_own;
67  (void) saved;
68  cur->op_own = 0xf0;
69  txq->dv_cq_idx = idx;
70 
71  /* retrieve original WQE and get new tail counter */
72  wqe = txq->dv_sq_wqes + (be16toh (cur->wqe_counter) & sq_mask);
73  if (PREDICT_FALSE (wqe->ctrl.imm == RDMA_TXQ_DV_INVALID_ID))
74  return; /* can happen if CQE reports error for an intermediate WQE */
75 
76  ASSERT (RDMA_TXQ_USED_SZ (txq->head, wqe->ctrl.imm) <= buf_sz &&
77  RDMA_TXQ_USED_SZ (wqe->ctrl.imm, txq->tail) < buf_sz);
78 
79  /* free sent buffers and update txq head */
80  vlib_buffer_free_from_ring (vm, txq->bufs, txq->head & mask, buf_sz,
81  RDMA_TXQ_USED_SZ (txq->head, wqe->ctrl.imm));
82  txq->head = wqe->ctrl.imm;
83 
84  /* ring doorbell */
86  txq->dv_cq_dbrec[0] = htobe32 (idx);
87 }
88 
91  const u16 tail, u32 sq_mask)
92 {
93  last->ctrl.imm = tail; /* register item to free */
94  last->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; /* generate a CQE so we can free buffers */
95 
96  ASSERT (tail != txq->tail &&
97  RDMA_TXQ_AVAIL_SZ (txq, txq->head, txq->tail) >=
98  RDMA_TXQ_USED_SZ (txq->tail, tail));
99 
101  txq->dv_sq_dbrec[MLX5_SND_DBR] = htobe32 (tail);
103  txq->dv_sq_db[0] = *(u64 *) (txq->dv_sq_wqes + (txq->tail & sq_mask));
104 }
105 
107 rdma_mlx5_wqe_init (rdma_mlx5_wqe_t * wqe, const void *tmpl,
108  vlib_buffer_t * b, const u16 tail)
109 {
110  u16 sz = b->current_length;
111  const void *cur = vlib_buffer_get_current (b);
112  uword addr = pointer_to_uword (cur);
113 
114  clib_memcpy_fast (wqe, tmpl, RDMA_MLX5_WQE_SZ);
115  /* speculatively copy at least MLX5_ETH_L2_INLINE_HEADER_SIZE (18-bytes) */
116  STATIC_ASSERT (STRUCT_SIZE_OF (struct mlx5_wqe_eth_seg, inline_hdr_start) +
117  STRUCT_SIZE_OF (struct mlx5_wqe_eth_seg,
118  inline_hdr) >=
119  MLX5_ETH_L2_INLINE_HEADER_SIZE, "wrong size");
120  clib_memcpy_fast (wqe->eseg.inline_hdr_start, cur,
122 
123  wqe->wqe_index_lo = tail;
124  wqe->wqe_index_hi = tail >> 8;
126  {
127  /* inline_hdr_sz is set to MLX5_ETH_L2_INLINE_HEADER_SIZE
128  in the template */
129  wqe->dseg.byte_count = htobe32 (sz - MLX5_ETH_L2_INLINE_HEADER_SIZE);
130  wqe->dseg.addr = htobe64 (addr + MLX5_ETH_L2_INLINE_HEADER_SIZE);
131  }
132  else
133  {
134  /* dseg.byte_count and desg.addr are set to 0 in the template */
135  wqe->eseg.inline_hdr_sz = htobe16 (sz);
136  }
137 }
138 
139 /*
140  * specific data path for chained buffers, supporting ring wrap-around
141  * contrary to the normal path - otherwise we may fail to enqueue chained
142  * buffers because we are close to the end of the ring while we still have
143  * plenty of descriptors available
144  */
147  const vlib_node_runtime_t * node,
148  const rdma_device_t * rd,
149  rdma_txq_t * txq, u32 n_left_from, u32 n,
150  u32 * bi, vlib_buffer_t ** b,
151  rdma_mlx5_wqe_t * wqe, u16 tail)
152 {
153  rdma_mlx5_wqe_t *last = wqe;
154  u32 wqe_n = RDMA_TXQ_AVAIL_SZ (txq, txq->head, tail);
155  u32 sq_mask = pow2_mask (txq->dv_sq_log2sz);
156  u32 mask = pow2_mask (txq->bufs_log2sz);
157  u32 dseg_mask = RDMA_TXQ_DV_DSEG_SZ (txq) - 1;
158  const u32 lkey = wqe[0].dseg.lkey;
159 
160  vlib_buffer_copy_indices (txq->bufs + (txq->tail & mask), bi,
161  n_left_from - n);
162 
163  while (n >= 1 && wqe_n >= 1)
164  {
165  u32 *bufs = txq->bufs + (tail & mask);
166  rdma_mlx5_wqe_t *wqe = txq->dv_sq_wqes + (tail & sq_mask);
167 
168  /* setup the head WQE */
169  rdma_mlx5_wqe_init (wqe, txq->dv_wqe_tmpl, b[0], tail);
170 
171  bufs[0] = bi[0];
172 
173  if (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
174  {
175  /*
176  * max number of available dseg:
177  * - 4 dseg per WQEBB available
178  * - max 32 dseg per WQE (5-bits length field in WQE ctrl)
179  */
180 #define RDMA_MLX5_WQE_DS_MAX (1 << 5)
181  const u32 dseg_max =
183  vlib_buffer_t *chained_b = b[0];
184  u32 chained_n = 0;
185 
186  /* there are exactly 4 dseg per WQEBB and we rely on that */
188  sizeof (struct mlx5_wqe_data_seg) ==
189  MLX5_SEND_WQE_BB, "wrong size");
190 
191  /*
192  * iterate over fragments, supporting ring wrap-around contrary to
193  * the normal path - otherwise we may fail to enqueue chained
194  * buffers because we are close to the end of the ring while we
195  * still have plenty of descriptors available
196  */
197  while (chained_n < dseg_max
198  && chained_b->flags & VLIB_BUFFER_NEXT_PRESENT)
199  {
200  struct mlx5_wqe_data_seg *dseg = (void *) txq->dv_sq_wqes;
201  dseg += ((tail + 1) * RDMA_MLX5_WQE_DS + chained_n) & dseg_mask;
202  if (((clib_address_t) dseg & (MLX5_SEND_WQE_BB - 1)) == 0)
203  {
204  /*
205  * start of new WQEBB
206  * head/tail are shared between buffers and descriptor
207  * In order to maintain 1:1 correspondance between
208  * buffer index and descriptor index, we build
209  * 4-fragments chains and save the head
210  */
211  chained_b->flags &= ~(VLIB_BUFFER_NEXT_PRESENT |
212  VLIB_BUFFER_TOTAL_LENGTH_VALID);
213  u32 idx = tail + 1 + RDMA_TXQ_DV_DSEG2WQE (chained_n);
214  idx &= mask;
215  txq->bufs[idx] = chained_b->next_buffer;
216  }
217 
218  chained_b = vlib_get_buffer (vm, chained_b->next_buffer);
219  dseg->byte_count = htobe32 (chained_b->current_length);
220  dseg->lkey = lkey;
221  dseg->addr = htobe64 (vlib_buffer_get_current_va (chained_b));
222 
223  chained_n += 1;
224  }
225 
226  if (chained_b->flags & VLIB_BUFFER_NEXT_PRESENT)
227  {
228  /*
229  * no descriptors left: drop the chain including 1st WQE
230  * skip the problematic packet and continue
231  */
232  vlib_buffer_free_from_ring (vm, txq->bufs, tail & mask,
233  RDMA_TXQ_BUF_SZ (txq), 1 +
234  RDMA_TXQ_DV_DSEG2WQE (chained_n));
235  vlib_error_count (vm, node->node_index,
236  dseg_max == chained_n ?
237  RDMA_TX_ERROR_SEGMENT_SIZE_EXCEEDED :
238  RDMA_TX_ERROR_NO_FREE_SLOTS, 1);
239 
240  /* fixup tail to overwrite wqe head with next packet */
241  tail -= 1;
242  }
243  else
244  {
245  /* update WQE descriptor with new dseg number */
246  ((u8 *) & wqe[0].ctrl.qpn_ds)[3] = RDMA_MLX5_WQE_DS + chained_n;
247 
248  tail += RDMA_TXQ_DV_DSEG2WQE (chained_n);
249  wqe_n -= RDMA_TXQ_DV_DSEG2WQE (chained_n);
250 
251  last = wqe;
252  }
253  }
254  else
255  {
256  /* not chained */
257  last = wqe;
258  }
259 
260  tail += 1;
261  bi += 1;
262  b += 1;
263  wqe_n -= 1;
264  n -= 1;
265  }
266 
267  if (n == n_left_from)
268  return 0; /* we fail to enqueue even a single packet */
269 
270  rdma_device_output_tx_mlx5_doorbell (txq, last, tail, sq_mask);
271  return n_left_from - n;
272 }
273 
276  const vlib_node_runtime_t * node,
277  const rdma_device_t * rd, rdma_txq_t * txq,
278  const u32 n_left_from, u32 * bi,
279  vlib_buffer_t ** b)
280 {
281 
282  u32 sq_mask = pow2_mask (txq->dv_sq_log2sz);
283  u32 mask = pow2_mask (txq->bufs_log2sz);
284  rdma_mlx5_wqe_t *wqe;
285  u32 n, n_wrap;
286  u16 tail = txq->tail;
287 
288  ASSERT (RDMA_TXQ_BUF_SZ (txq) <= RDMA_TXQ_DV_SQ_SZ (txq));
289 
290  /* avoid wrap-around logic in core loop */
291  n = clib_min (n_left_from, RDMA_TXQ_BUF_SZ (txq) - (tail & mask));
292  n_wrap = n_left_from - n;
293 
294 wrap_around:
295  wqe = txq->dv_sq_wqes + (tail & sq_mask);
296 
297  while (n >= 8)
298  {
299  u32 flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
300  if (PREDICT_FALSE (flags & VLIB_BUFFER_NEXT_PRESENT))
301  return rdma_device_output_tx_mlx5_chained (vm, node, rd, txq,
302  n_left_from, n, bi, b, wqe,
303  tail);
304 
305  vlib_prefetch_buffer_header (b[4], LOAD);
306  rdma_mlx5_wqe_init (wqe + 0, txq->dv_wqe_tmpl, b[0], tail + 0);
307 
308  vlib_prefetch_buffer_header (b[5], LOAD);
309  rdma_mlx5_wqe_init (wqe + 1, txq->dv_wqe_tmpl, b[1], tail + 1);
310 
311  vlib_prefetch_buffer_header (b[6], LOAD);
312  rdma_mlx5_wqe_init (wqe + 2, txq->dv_wqe_tmpl, b[2], tail + 2);
313 
314  vlib_prefetch_buffer_header (b[7], LOAD);
315  rdma_mlx5_wqe_init (wqe + 3, txq->dv_wqe_tmpl, b[3], tail + 3);
316 
317  b += 4;
318  tail += 4;
319  wqe += 4;
320  n -= 4;
321  }
322 
323  while (n >= 1)
324  {
325  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT))
326  return rdma_device_output_tx_mlx5_chained (vm, node, rd, txq,
327  n_left_from, n, bi, b, wqe,
328  tail);
329 
330  rdma_mlx5_wqe_init (wqe, txq->dv_wqe_tmpl, b[0], tail);
331 
332  b += 1;
333  tail += 1;
334  wqe += 1;
335  n -= 1;
336  }
337 
338  if (n_wrap)
339  {
340  n = n_wrap;
341  n_wrap = 0;
342  goto wrap_around;
343  }
344 
345  rdma_device_output_tx_mlx5_doorbell (txq, &wqe[-1], tail, sq_mask);
346  return n_left_from;
347 }
348 
349 /*
350  * standard ibverb tx/free functions
351  */
352 
355  const vlib_node_runtime_t * node,
356  rdma_txq_t * txq)
357 {
358  struct ibv_wc wc[VLIB_FRAME_SIZE];
359  u32 mask = pow2_mask (txq->bufs_log2sz);
360  u16 tail;
361  int n;
362 
363  n = ibv_poll_cq (txq->ibv_cq, VLIB_FRAME_SIZE, wc);
364  if (n <= 0)
365  {
366  if (PREDICT_FALSE (n < 0))
367  vlib_error_count (vm, node->node_index, RDMA_TX_ERROR_COMPLETION, 1);
368  return;
369  }
370 
371  while (PREDICT_FALSE (IBV_WC_SUCCESS != wc[n - 1].status))
372  {
373  vlib_error_count (vm, node->node_index, RDMA_TX_ERROR_COMPLETION, 1);
374  n--;
375  if (0 == n)
376  return;
377  }
378 
379  tail = wc[n - 1].wr_id;
380  vlib_buffer_free_from_ring (vm, txq->bufs, txq->head & mask,
381  RDMA_TXQ_BUF_SZ (txq),
382  RDMA_TXQ_USED_SZ (txq->head, tail));
383  txq->head = tail;
384 }
385 
388  const vlib_node_runtime_t * node,
389  const rdma_device_t * rd, rdma_txq_t * txq,
390  u32 n_left_from, u32 * bi, vlib_buffer_t ** b)
391 {
392  struct ibv_send_wr wr[VLIB_FRAME_SIZE], *w = wr;
393  struct ibv_sge sge[VLIB_FRAME_SIZE], *s = sge;
394  u32 n = n_left_from;
395 
396  while (n >= 8)
397  {
398  vlib_prefetch_buffer_header (b[4], LOAD);
399  s[0].addr = vlib_buffer_get_current_va (b[0]);
400  s[0].length = b[0]->current_length;
401  s[0].lkey = rd->lkey;
402 
403  vlib_prefetch_buffer_header (b[5], LOAD);
404  s[1].addr = vlib_buffer_get_current_va (b[1]);
405  s[1].length = b[1]->current_length;
406  s[1].lkey = rd->lkey;
407 
408  vlib_prefetch_buffer_header (b[6], LOAD);
409  s[2].addr = vlib_buffer_get_current_va (b[2]);
410  s[2].length = b[2]->current_length;
411  s[2].lkey = rd->lkey;
412 
413  vlib_prefetch_buffer_header (b[7], LOAD);
414  s[3].addr = vlib_buffer_get_current_va (b[3]);
415  s[3].length = b[3]->current_length;
416  s[3].lkey = rd->lkey;
417 
418  clib_memset_u8 (&w[0], 0, sizeof (w[0]));
419  w[0].next = &w[0] + 1;
420  w[0].sg_list = &s[0];
421  w[0].num_sge = 1;
422  w[0].opcode = IBV_WR_SEND;
423 
424  clib_memset_u8 (&w[1], 0, sizeof (w[1]));
425  w[1].next = &w[1] + 1;
426  w[1].sg_list = &s[1];
427  w[1].num_sge = 1;
428  w[1].opcode = IBV_WR_SEND;
429 
430  clib_memset_u8 (&w[2], 0, sizeof (w[2]));
431  w[2].next = &w[2] + 1;
432  w[2].sg_list = &s[2];
433  w[2].num_sge = 1;
434  w[2].opcode = IBV_WR_SEND;
435 
436  clib_memset_u8 (&w[3], 0, sizeof (w[3]));
437  w[3].next = &w[3] + 1;
438  w[3].sg_list = &s[3];
439  w[3].num_sge = 1;
440  w[3].opcode = IBV_WR_SEND;
441 
442  s += 4;
443  w += 4;
444  b += 4;
445  n -= 4;
446  }
447 
448  while (n >= 1)
449  {
450  s[0].addr = vlib_buffer_get_current_va (b[0]);
451  s[0].length = b[0]->current_length;
452  s[0].lkey = rd->lkey;
453 
454  clib_memset_u8 (&w[0], 0, sizeof (w[0]));
455  w[0].next = &w[0] + 1;
456  w[0].sg_list = &s[0];
457  w[0].num_sge = 1;
458  w[0].opcode = IBV_WR_SEND;
459 
460  s += 1;
461  w += 1;
462  b += 1;
463  n -= 1;
464  }
465 
466  w[-1].wr_id = txq->tail; /* register item to free */
467  w[-1].next = 0; /* fix next pointer in WR linked-list */
468  w[-1].send_flags = IBV_SEND_SIGNALED; /* generate a CQE so we can free buffers */
469 
470  w = wr;
471  if (PREDICT_FALSE (0 != ibv_post_send (txq->ibv_qp, w, &w)))
472  {
473  vlib_error_count (vm, node->node_index, RDMA_TX_ERROR_SUBMISSION,
474  n_left_from - (w - wr));
475  n_left_from = w - wr;
476  }
477 
478  return n_left_from;
479 }
480 
481 /*
482  * common tx/free functions
483  */
484 
487  rdma_txq_t * txq, int is_mlx5dv)
488 {
489  if (is_mlx5dv)
490  rdma_device_output_free_mlx5 (vm, node, txq);
491  else
492  rdma_device_output_free_ibverb (vm, node, txq);
493 }
494 
497  const rdma_device_t * rd, rdma_txq_t * txq,
498  u32 n_left_from, u32 * bi, int is_mlx5dv)
499 {
501  const u32 mask = pow2_mask (txq->bufs_log2sz);
502 
503  /* do not enqueue more packet than ring space */
504  n_left_from = clib_min (n_left_from, RDMA_TXQ_AVAIL_SZ (txq, txq->head,
505  txq->tail));
506  /* if ring is full, do nothing */
507  if (PREDICT_FALSE (n_left_from == 0))
508  return 0;
509 
510  vlib_get_buffers (vm, bi, b, n_left_from);
511 
512  n_left_from = is_mlx5dv ?
513  rdma_device_output_tx_mlx5 (vm, node, rd, txq, n_left_from, bi, b) :
514  rdma_device_output_tx_ibverb (vm, node, rd, txq, n_left_from, bi, b);
515 
516  vlib_buffer_copy_indices_to_ring (txq->bufs, bi, txq->tail & mask,
517  RDMA_TXQ_BUF_SZ (txq), n_left_from);
518  txq->tail += n_left_from;
519 
520  return n_left_from;
521 }
522 
526  int is_mlx5dv)
527 {
528  u32 thread_index = vm->thread_index;
529  rdma_txq_t *txq =
530  vec_elt_at_index (rd->txqs, thread_index % vec_len (rd->txqs));
531  u32 *from;
532  u32 n_left_from;
533  int i;
534 
536 
537  from = vlib_frame_vector_args (frame);
538  n_left_from = frame->n_vectors;
539 
541 
542  for (i = 0; i < RDMA_TX_RETRIES && n_left_from > 0; i++)
543  {
544  u32 n_enq;
545  rdma_device_output_free (vm, node, txq, is_mlx5dv);
546  n_enq = rdma_device_output_tx_try (vm, node, rd, txq, n_left_from, from,
547  is_mlx5dv);
548 
549  n_left_from -= n_enq;
550  from += n_enq;
551  }
552 
554 
555  if (PREDICT_FALSE (n_left_from))
556  {
557  vlib_buffer_free (vm, from, n_left_from);
558  vlib_error_count (vm, node->node_index,
559  RDMA_TX_ERROR_NO_FREE_SLOTS, n_left_from);
560  }
561 
562  return frame->n_vectors - n_left_from;
563 }
564 
568 {
569  rdma_main_t *rm = &rdma_main;
570  vnet_interface_output_runtime_t *ord = (void *) node->runtime_data;
572 
573  if (PREDICT_TRUE (rd->flags & RDMA_DEVICE_F_MLX5DV))
574  return rdma_device_output_tx (vm, node, frame, rd, 1 /* is_mlx5dv */ );
575 
576  return rdma_device_output_tx (vm, node, frame, rd, 0 /* is_mlx5dv */ );
577 }
578 
579 /*
580  * fd.io coding-style-patch-verification: ON
581  *
582  * Local Variables:
583  * eval: (c-set-style "gnu")
584  * End:
585  */
static_always_inline u32 rdma_device_output_tx_try(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, u32 n_left_from, u32 *bi, int is_mlx5dv)
Definition: output.c:496
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u16 head
Definition: rdma.h:119
volatile u32 * dv_sq_dbrec
Definition: rdma.h:111
struct mlx5_cqe64 * dv_cq_cqes
Definition: rdma.h:113
struct mlx5_wqe_data_seg dseg
Definition: rdma.h:61
#define clib_min(x, y)
Definition: clib.h:319
static uword vlib_buffer_get_current_va(vlib_buffer_t *b)
Definition: buffer.h:237
rdma_mlx5_wqe_t * dv_sq_wqes
Definition: rdma.h:110
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:937
u16 tail
Definition: rdma.h:120
#define CLIB_COMPILER_BARRIER()
Definition: clib.h:127
#define PREDICT_TRUE(x)
Definition: clib.h:119
unsigned long u64
Definition: types.h:89
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:133
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
u64 clib_address_t
Definition: types.h:121
static_always_inline u32 rdma_device_output_tx_ibverb(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, u32 n_left_from, u32 *bi, vlib_buffer_t **b)
Definition: output.c:387
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:110
u32 thread_index
Definition: main.h:218
#define RDMA_TXQ_DV_INVALID_ID
Definition: rdma.h:141
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
struct mlx5_wqe_ctrl_seg ctrl
Definition: rdma.h:51
static_always_inline void rdma_device_output_tx_mlx5_doorbell(rdma_txq_t *txq, rdma_mlx5_wqe_t *last, const u16 tail, u32 sq_mask)
Definition: output.c:90
volatile u32 * dv_cq_dbrec
Definition: rdma.h:114
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
rdma_main_t rdma_main
Definition: device.c:46
static void vlib_buffer_copy_indices_to_ring(u32 *ring, u32 *src, u32 start, u32 ring_size, u32 n_buffers)
Definition: buffer_funcs.h:162
vhost_vring_addr_t addr
Definition: vhost_user.h:254
unsigned char u8
Definition: types.h:56
clib_spinlock_t lock
Definition: rdma.h:97
#define RDMA_TXQ_DV_DSEG2WQE(d)
Definition: output.c:29
#define static_always_inline
Definition: clib.h:106
static uword pow2_mask(uword x)
Definition: clib.h:235
rdma_device_t * devices
Definition: rdma.h:198
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define VNET_DEVICE_CLASS_TX_FN(devclass)
Definition: interface.h:305
unsigned int u32
Definition: types.h:88
#define RDMA_TXQ_DV_SQ_SZ(txq)
Definition: rdma.h:144
struct mlx5_wqe_eth_seg eseg
Definition: rdma.h:60
#define VLIB_FRAME_SIZE
Definition: node.h:380
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
Definition: buffer_funcs.h:984
#define RDMA_MLX5_WQE_DS_MAX
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
u8 wqe_index_hi
Definition: rdma.h:55
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
struct ibv_cq * ibv_cq
Definition: rdma.h:104
#define PREDICT_FALSE(x)
Definition: clib.h:118
u32 node_index
Node index.
Definition: node.h:498
#define RDMA_TXQ_DV_DSEG_SZ(txq)
Definition: output.c:28
vlib_main_t * vm
Definition: in2out_ed.c:1599
static_always_inline void rdma_device_output_free_ibverb(vlib_main_t *vm, const vlib_node_runtime_t *node, rdma_txq_t *txq)
Definition: output.c:354
u8 wqe_index_lo
Definition: rdma.h:56
u32 flags
Definition: vhost_user.h:248
u16 n_vectors
Definition: node.h:399
VNET_DEVICE_CLASS_TX_FN() rdma_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: output.c:565
u16 dv_cq_idx
Definition: rdma.h:121
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static_always_inline void rdma_device_output_free(vlib_main_t *vm, const vlib_node_runtime_t *node, rdma_txq_t *txq, int is_mlx5dv)
Definition: output.c:486
static_always_inline u32 rdma_device_output_tx_mlx5_chained(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, u32 n_left_from, u32 n, u32 *bi, vlib_buffer_t **b, rdma_mlx5_wqe_t *wqe, u16 tail)
Definition: output.c:146
u32 lkey
Definition: rdma.h:161
#define RDMA_TXQ_USED_SZ(head, tail)
Definition: rdma.h:147
static_always_inline void rdma_mlx5_wqe_init(rdma_mlx5_wqe_t *wqe, const void *tmpl, vlib_buffer_t *b, const u16 tail)
Definition: output.c:107
#define RDMA_TXQ_BUF_SZ(txq)
Definition: rdma.h:143
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1599
#define RDMA_TXQ_AVAIL_SZ(txq, head, tail)
Definition: rdma.h:148
#define ASSERT(truth)
u8 bufs_log2sz
Definition: rdma.h:122
volatile u64 * dv_sq_db
Definition: rdma.h:112
rdma_txq_t * txqs
Definition: rdma.h:156
u8 dv_cq_log2sz
Definition: rdma.h:124
u8 dv_sq_log2sz
Definition: rdma.h:123
static_always_inline void rdma_device_output_free_mlx5(vlib_main_t *vm, const vlib_node_runtime_t *node, rdma_txq_t *txq)
Definition: output.c:36
static_always_inline void clib_memset_u8(void *p, u8 val, uword count)
Definition: string.h:424
static uword pointer_to_uword(const void *p)
Definition: types.h:131
u8 dv_wqe_tmpl[64]
Definition: rdma.h:128
static_always_inline void vlib_buffer_copy_indices(u32 *dst, u32 *src, u32 n_indices)
Definition: buffer_funcs.h:102
static_always_inline u32 rdma_device_output_tx_mlx5(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, const u32 n_left_from, u32 *bi, vlib_buffer_t **b)
Definition: output.c:275
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
#define STATIC_ASSERT(truth,...)
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
u32 * bufs
Definition: rdma.h:118
static_always_inline uword rdma_device_output_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, rdma_device_t *rd, int is_mlx5dv)
Definition: output.c:524
#define STRUCT_SIZE_OF(t, f)
Definition: clib.h:71
#define MLX5_ETH_L2_INLINE_HEADER_SIZE
Definition: rdma.h:43
#define RDMA_MLX5_WQE_DS
Definition: rdma.h:64
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:95
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
#define RDMA_MLX5_WQE_SZ
Definition: rdma.h:63
struct ibv_qp * ibv_qp
Definition: rdma.h:105