FD.io VPP  v20.05-21-gb1500e9ff
Vector Packet Processing
vhost_user_input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * vhost-user-input
4  *
5  * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at:
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *------------------------------------------------------------------
18  */
19 
20 #include <fcntl.h> /* for open */
21 #include <sys/ioctl.h>
22 #include <sys/socket.h>
23 #include <sys/un.h>
24 #include <sys/stat.h>
25 #include <sys/types.h>
26 #include <sys/uio.h> /* for iovec */
27 #include <netinet/in.h>
28 #include <sys/vfs.h>
29 
30 #include <linux/if_arp.h>
31 #include <linux/if_tun.h>
32 
33 #include <vlib/vlib.h>
34 #include <vlib/unix/unix.h>
35 
36 #include <vnet/ip/ip.h>
37 
38 #include <vnet/ethernet/ethernet.h>
39 #include <vnet/devices/devices.h>
40 #include <vnet/feature/feature.h>
41 
45 
46 /*
47  * When an RX queue is down but active, received packets
48  * must be discarded. This value controls up to how many
49  * packets will be discarded during each round.
50  */
51 #define VHOST_USER_DOWN_DISCARD_COUNT 256
52 
53 /*
54  * When the number of available buffers gets under this threshold,
55  * RX node will start discarding packets.
56  */
57 #define VHOST_USER_RX_BUFFER_STARVATION 32
58 
59 /*
60  * On the receive side, the host should free descriptors as soon
61  * as possible in order to avoid TX drop in the VM.
62  * This value controls the number of copy operations that are stacked
63  * before copy is done for all and descriptors are given back to
64  * the guest.
65  * The value 64 was obtained by testing (48 and 128 were not as good).
66  */
67 #define VHOST_USER_RX_COPY_THRESHOLD 64
68 
70 
71 #define foreach_vhost_user_input_func_error \
72  _(NO_ERROR, "no error") \
73  _(NO_BUFFER, "no available buffer") \
74  _(MMAP_FAIL, "mmap failure") \
75  _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
76  _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
77  _(NOT_READY, "vhost interface not ready or down") \
78  _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
79 
80 typedef enum
81 {
82 #define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
84 #undef _
87 
88 static __clib_unused char *vhost_user_input_func_error_strings[] = {
89 #define _(n,s) s,
91 #undef _
92 };
93 
96  vhost_user_intf_t * vui, u16 qid,
98  u16 last_avail_idx)
99 {
101  u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
102  vring_desc_t *hdr_desc = 0;
103  virtio_net_hdr_mrg_rxbuf_t *hdr;
104  u32 hint = 0;
105 
106  clib_memset (t, 0, sizeof (*t));
107  t->device_index = vui - vum->vhost_user_interfaces;
108  t->qid = qid;
109 
110  hdr_desc = &txvq->desc[desc_current];
111  if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
112  {
113  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
114  /* Header is the first here */
115  hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
116  }
117  if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
118  {
119  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
120  }
121  if (!(txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
122  !(txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
123  {
124  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
125  }
126 
127  t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
128 
129  if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
130  {
131  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
132  }
133  else
134  {
135  u32 len = vui->virtio_net_hdr_sz;
136  memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
137  }
138 }
139 
142  u16 copy_len, u32 * map_hint)
143 {
144  void *src0, *src1, *src2, *src3;
145  if (PREDICT_TRUE (copy_len >= 4))
146  {
147  if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
148  return 1;
149  if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
150  return 1;
151 
152  while (PREDICT_TRUE (copy_len >= 4))
153  {
154  src0 = src2;
155  src1 = src3;
156 
157  if (PREDICT_FALSE
158  (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
159  return 1;
160  if (PREDICT_FALSE
161  (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
162  return 1;
163 
164  CLIB_PREFETCH (src2, 64, LOAD);
165  CLIB_PREFETCH (src3, 64, LOAD);
166 
167  clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
168  clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
169  copy_len -= 2;
170  cpy += 2;
171  }
172  }
173  while (copy_len)
174  {
175  if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
176  return 1;
177  clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
178  copy_len -= 1;
179  cpy += 1;
180  }
181  return 0;
182 }
183 
184 /**
185  * Try to discard packets from the tx ring (VPP RX path).
186  * Returns the number of discarded packets.
187  */
190  vhost_user_intf_t * vui,
191  vhost_user_vring_t * txvq, u32 discard_max)
192 {
193  /*
194  * On the RX side, each packet corresponds to one descriptor
195  * (it is the same whether it is a shallow descriptor, chained, or indirect).
196  * Therefore, discarding a packet is like discarding a descriptor.
197  */
198  u32 discarded_packets = 0;
199  u32 avail_idx = txvq->avail->idx;
200  u16 mask = txvq->qsz_mask;
201  u16 last_avail_idx = txvq->last_avail_idx;
202  u16 last_used_idx = txvq->last_used_idx;
203  while (discarded_packets != discard_max)
204  {
205  if (avail_idx == last_avail_idx)
206  goto out;
207 
208  u16 desc_chain_head = txvq->avail->ring[last_avail_idx & mask];
209  last_avail_idx++;
210  txvq->used->ring[last_used_idx & mask].id = desc_chain_head;
211  txvq->used->ring[last_used_idx & mask].len = 0;
212  vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
213  last_used_idx++;
214  discarded_packets++;
215  }
216 
217 out:
218  txvq->last_avail_idx = last_avail_idx;
219  txvq->last_used_idx = last_used_idx;
221  txvq->used->idx = txvq->last_used_idx;
222  vhost_user_log_dirty_ring (vui, txvq, idx);
223  return discarded_packets;
224 }
225 
226 /*
227  * In case of overflow, we need to rewind the array of allocated buffers.
228  */
231  vhost_cpu_t * cpu, vlib_buffer_t * b_head)
232 {
233  u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
234  vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
235  b_current->current_length = 0;
236  b_current->flags = 0;
237  while (b_current != b_head)
238  {
239  cpu->rx_buffers_len++;
240  bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
241  b_current = vlib_get_buffer (vm, bi_current);
242  b_current->current_length = 0;
243  b_current->flags = 0;
244  }
245  cpu->rx_buffers_len++;
246 }
247 
250  virtio_net_hdr_t * hdr)
251 {
252  u8 l4_hdr_sz = 0;
253  u8 l4_proto = 0;
254  ethernet_header_t *eh = (ethernet_header_t *) b0_data;
255  u16 ethertype = clib_net_to_host_u16 (eh->type);
256  u16 l2hdr_sz = sizeof (ethernet_header_t);
257 
258  if (ethernet_frame_is_tagged (ethertype))
259  {
260  ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
261 
262  ethertype = clib_net_to_host_u16 (vlan->type);
263  l2hdr_sz += sizeof (*vlan);
264  if (ethertype == ETHERNET_TYPE_VLAN)
265  {
266  vlan++;
267  ethertype = clib_net_to_host_u16 (vlan->type);
268  l2hdr_sz += sizeof (*vlan);
269  }
270  }
271  vnet_buffer (b0)->l2_hdr_offset = 0;
272  vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
273  vnet_buffer (b0)->l4_hdr_offset = hdr->csum_start;
274  b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
275  VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
276  VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
277 
278  if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
279  {
280  ip4_header_t *ip4 = (ip4_header_t *) (b0_data + l2hdr_sz);
281  l4_proto = ip4->protocol;
282  b0->flags |= VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
283  }
284  else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
285  {
286  ip6_header_t *ip6 = (ip6_header_t *) (b0_data + l2hdr_sz);
287  l4_proto = ip6->protocol;
288  b0->flags |= VNET_BUFFER_F_IS_IP6;
289  }
290 
291  if (l4_proto == IP_PROTOCOL_TCP)
292  {
293  tcp_header_t *tcp = (tcp_header_t *)
294  (b0_data + vnet_buffer (b0)->l4_hdr_offset);
295  l4_hdr_sz = tcp_header_bytes (tcp);
296  tcp->checksum = 0;
297  b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
298  }
299  else if (l4_proto == IP_PROTOCOL_UDP)
300  {
301  udp_header_t *udp =
302  (udp_header_t *) (b0_data + vnet_buffer (b0)->l4_hdr_offset);
303  l4_hdr_sz = sizeof (*udp);
304  udp->checksum = 0;
305  b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
306  }
307 
308  if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP)
309  {
310  vnet_buffer2 (b0)->gso_size = hdr->gso_size;
311  vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
312  b0->flags |= VNET_BUFFER_F_GSO;
313  }
314  else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
315  {
316  vnet_buffer2 (b0)->gso_size = hdr->gso_size;
317  vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
318  b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
319  }
320  else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
321  {
322  vnet_buffer2 (b0)->gso_size = hdr->gso_size;
323  vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
324  b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
325  }
326 }
327 
330  vhost_user_vring_t * rxvq)
331 {
332  f64 now = vlib_time_now (vm);
333 
334  if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
335  vhost_user_send_call (vm, txvq);
336 
337  if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
338  vhost_user_send_call (vm, rxvq);
339 }
340 
343  vhost_user_intf_t * vui,
344  u32 * current_config_index, u32 * next_index,
345  u32 ** to_next, u32 * n_left_to_next)
346 {
348  u8 feature_arc_idx = fm->device_input_feature_arc_index;
349 
350  if (PREDICT_FALSE (vnet_have_features (feature_arc_idx, vui->sw_if_index)))
351  {
353  cm = &fm->feature_config_mains[feature_arc_idx];
354  *current_config_index = vec_elt (cm->config_index_by_sw_if_index,
355  vui->sw_if_index);
356  vnet_get_config_data (&cm->config_main, current_config_index,
357  next_index, 0);
358  }
359 
360  vlib_get_new_next_frame (vm, node, *next_index, *to_next, *n_left_to_next);
361 
362  if (*next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT)
363  {
364  /* give some hints to ethernet-input */
365  vlib_next_frame_t *nf;
366  vlib_frame_t *f;
368  nf = vlib_node_runtime_get_next_frame (vm, node, *next_index);
369  f = vlib_get_frame (vm, nf->frame);
371 
372  ef = vlib_frame_scalar_args (f);
373  ef->sw_if_index = vui->sw_if_index;
374  ef->hw_if_index = vui->hw_if_index;
376  }
377 }
378 
381  vhost_user_main_t * vum,
382  vhost_user_intf_t * vui,
384  vnet_hw_interface_rx_mode mode, u8 enable_csum)
385 {
386  vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
388  u16 n_rx_packets = 0;
389  u32 n_rx_bytes = 0;
390  u16 n_left;
391  u32 n_left_to_next, *to_next;
393  u32 n_trace = vlib_get_trace_count (vm, node);
394  u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
395  u32 map_hint = 0;
396  vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
397  u16 copy_len = 0;
398  u8 feature_arc_idx = fm->device_input_feature_arc_index;
399  u32 current_config_index = ~(u32) 0;
400  u16 mask = txvq->qsz_mask;
401 
402  /* The descriptor table is not ready yet */
403  if (PREDICT_FALSE (txvq->avail == 0))
404  goto done;
405 
406  {
407  /* do we have pending interrupts ? */
408  vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
409  vhost_user_input_do_interrupt (vm, txvq, rxvq);
410  }
411 
412  /*
413  * For adaptive mode, it is optimized to reduce interrupts.
414  * If the scheduler switches the input node to polling due
415  * to burst of traffic, we tell the driver no interrupt.
416  * When the traffic subsides, the scheduler switches the node back to
417  * interrupt mode. We must tell the driver we want interrupt.
418  */
420  {
421  if ((node->flags &
423  !(node->flags &
425  /* Tell driver we want notification */
426  txvq->used->flags = 0;
427  else
428  /* Tell driver we don't want notification */
430  }
431 
432  if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
433  goto done;
434 
435  n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
436 
437  /* nothing to do */
438  if (PREDICT_FALSE (n_left == 0))
439  goto done;
440 
441  if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
442  {
443  /*
444  * Discard input packet if interface is admin down or vring is not
445  * enabled.
446  * "For example, for a networking device, in the disabled state
447  * client must not supply any new RX packets, but must process
448  * and discard any TX packets."
449  */
450  vhost_user_rx_discard_packet (vm, vui, txvq,
452  goto done;
453  }
454 
455  if (PREDICT_FALSE (n_left == (mask + 1)))
456  {
457  /*
458  * Informational error logging when VPP is not
459  * receiving packets fast enough.
460  */
461  vlib_error_count (vm, node->node_index,
462  VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
463  }
464 
465  if (n_left > VLIB_FRAME_SIZE)
466  n_left = VLIB_FRAME_SIZE;
467 
468  /*
469  * For small packets (<2kB), we will not need more than one vlib buffer
470  * per packet. In case packets are bigger, we will just yield at some point
471  * in the loop and come back later. This is not an issue as for big packet,
472  * processing cost really comes from the memory copy.
473  * The assumption is that big packets will fit in 40 buffers.
474  */
475  if (PREDICT_FALSE (cpu->rx_buffers_len < n_left + 1 ||
476  cpu->rx_buffers_len < 40))
477  {
478  u32 curr_len = cpu->rx_buffers_len;
479  cpu->rx_buffers_len +=
480  vlib_buffer_alloc (vm, cpu->rx_buffers + curr_len,
481  VHOST_USER_RX_BUFFERS_N - curr_len);
482 
483  if (PREDICT_FALSE
485  {
486  /* In case of buffer starvation, discard some packets from the queue
487  * and log the event.
488  * We keep doing best effort for the remaining packets. */
489  u32 flush = (n_left + 1 > cpu->rx_buffers_len) ?
490  n_left + 1 - cpu->rx_buffers_len : 1;
491  flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
492 
493  n_left -= flush;
495  interface_main.sw_if_counters +
497  vm->thread_index, vui->sw_if_index,
498  flush);
499 
501  VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
502  }
503  }
504 
505  vhost_user_input_setup_frame (vm, node, vui, &current_config_index,
506  &next_index, &to_next, &n_left_to_next);
507 
508  u16 last_avail_idx = txvq->last_avail_idx;
509  u16 last_used_idx = txvq->last_used_idx;
510 
511  while (n_left > 0)
512  {
513  vlib_buffer_t *b_head, *b_current;
514  u32 bi_current;
515  u16 desc_current;
516  u32 desc_data_offset;
517  vring_desc_t *desc_table = txvq->desc;
518 
519  if (PREDICT_FALSE (cpu->rx_buffers_len <= 1))
520  {
521  /* Not enough rx_buffers
522  * Note: We yeld on 1 so we don't need to do an additional
523  * check for the next buffer prefetch.
524  */
525  n_left = 0;
526  break;
527  }
528 
529  desc_current = txvq->avail->ring[last_avail_idx & mask];
530  cpu->rx_buffers_len--;
531  bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
532  b_head = b_current = vlib_get_buffer (vm, bi_current);
533  to_next[0] = bi_current; //We do that now so we can forget about bi_current
534  to_next++;
535  n_left_to_next--;
536 
538  (vm, cpu->rx_buffers[cpu->rx_buffers_len - 1], LOAD);
539 
540  /* Just preset the used descriptor id and length for later */
541  txvq->used->ring[last_used_idx & mask].id = desc_current;
542  txvq->used->ring[last_used_idx & mask].len = 0;
543  vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
544 
545  /* The buffer should already be initialized */
547  b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
548 
549  if (PREDICT_FALSE (n_trace))
550  {
551  vlib_trace_buffer (vm, node, next_index, b_head,
552  /* follow_chain */ 0);
553  vhost_trace_t *t0 =
554  vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
555  vhost_user_rx_trace (t0, vui, qid, b_head, txvq, last_avail_idx);
556  n_trace--;
557  vlib_set_trace_count (vm, node, n_trace);
558  }
559 
560  /* This depends on the setup but is very consistent
561  * So I think the CPU branch predictor will make a pretty good job
562  * at optimizing the decision. */
563  if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
564  {
565  desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
566  &map_hint);
567  desc_current = 0;
568  if (PREDICT_FALSE (desc_table == 0))
569  {
570  vlib_error_count (vm, node->node_index,
571  VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
572  goto out;
573  }
574  }
575 
576  desc_data_offset = vui->virtio_net_hdr_sz;
577 
578  if (enable_csum)
579  {
580  virtio_net_hdr_mrg_rxbuf_t *hdr;
581  u8 *b_data;
582  u16 current;
583 
584  hdr = map_guest_mem (vui, desc_table[desc_current].addr, &map_hint);
585  if (PREDICT_FALSE (hdr == 0))
586  {
587  vlib_error_count (vm, node->node_index,
588  VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
589  goto out;
590  }
591  if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
592  {
593  if ((desc_data_offset == desc_table[desc_current].len) &&
594  (desc_table[desc_current].flags & VIRTQ_DESC_F_NEXT))
595  {
596  current = desc_table[desc_current].next;
597  b_data = map_guest_mem (vui, desc_table[current].addr,
598  &map_hint);
599  if (PREDICT_FALSE (b_data == 0))
600  {
601  vlib_error_count (vm, node->node_index,
602  VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL,
603  1);
604  goto out;
605  }
606  }
607  else
608  b_data = (u8 *) hdr + desc_data_offset;
609 
610  vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
611  }
612  }
613 
614  while (1)
615  {
616  /* Get more input if necessary. Or end of packet. */
617  if (desc_data_offset == desc_table[desc_current].len)
618  {
619  if (PREDICT_FALSE (desc_table[desc_current].flags &
621  {
622  desc_current = desc_table[desc_current].next;
623  desc_data_offset = 0;
624  }
625  else
626  {
627  goto out;
628  }
629  }
630 
631  /* Get more output if necessary. Or end of packet. */
632  if (PREDICT_FALSE (b_current->current_length == buffer_data_size))
633  {
634  if (PREDICT_FALSE (cpu->rx_buffers_len == 0))
635  {
636  /* Cancel speculation */
637  to_next--;
638  n_left_to_next++;
639 
640  /*
641  * Checking if there are some left buffers.
642  * If not, just rewind the used buffers and stop.
643  * Note: Scheduled copies are not cancelled. This is
644  * not an issue as they would still be valid. Useless,
645  * but valid.
646  */
647  vhost_user_input_rewind_buffers (vm, cpu, b_head);
648  n_left = 0;
649  goto stop;
650  }
651 
652  /* Get next output */
653  cpu->rx_buffers_len--;
654  u32 bi_next = cpu->rx_buffers[cpu->rx_buffers_len];
655  b_current->next_buffer = bi_next;
656  b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
657  bi_current = bi_next;
658  b_current = vlib_get_buffer (vm, bi_current);
659  }
660 
661  /* Prepare a copy order executed later for the data */
662  ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
663  vhost_copy_t *cpy = &cpu->copy[copy_len];
664  copy_len++;
665  u32 desc_data_l = desc_table[desc_current].len - desc_data_offset;
666  cpy->len = buffer_data_size - b_current->current_length;
667  cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
668  cpy->dst = (uword) (vlib_buffer_get_current (b_current) +
669  b_current->current_length);
670  cpy->src = desc_table[desc_current].addr + desc_data_offset;
671 
672  desc_data_offset += cpy->len;
673 
674  b_current->current_length += cpy->len;
676  }
677 
678  out:
679 
680  n_rx_bytes += b_head->total_length_not_including_first_buffer;
681  n_rx_packets++;
682 
684  b_head->current_length;
685 
686  /* consume the descriptor and return it as used */
687  last_avail_idx++;
688  last_used_idx++;
689 
691 
692  vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
693  vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
694  b_head->error = 0;
695 
696  if (current_config_index != ~(u32) 0)
697  {
698  b_head->current_config_index = current_config_index;
699  vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
700  }
701 
702  n_left--;
703 
704  /*
705  * Although separating memory copies from virtio ring parsing
706  * is beneficial, we can offer to perform the copies from time
707  * to time in order to free some space in the ring.
708  */
710  {
711  if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy,
712  copy_len, &map_hint)))
713  {
714  vlib_error_count (vm, node->node_index,
715  VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
716  }
717  copy_len = 0;
718 
719  /* give buffers back to driver */
721  txvq->used->idx = last_used_idx;
722  vhost_user_log_dirty_ring (vui, txvq, idx);
723  }
724  }
725 stop:
726  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
727 
728  txvq->last_used_idx = last_used_idx;
729  txvq->last_avail_idx = last_avail_idx;
730 
731  /* Do the memory copies */
732  if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy, copy_len,
733  &map_hint)))
734  {
735  vlib_error_count (vm, node->node_index,
736  VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
737  }
738 
739  /* give buffers back to driver */
741  txvq->used->idx = txvq->last_used_idx;
742  vhost_user_log_dirty_ring (vui, txvq, idx);
743 
744  /* interrupt (call) handling */
745  if ((txvq->callfd_idx != ~0) &&
747  {
748  txvq->n_since_last_int += n_rx_packets;
749 
750  if (txvq->n_since_last_int > vum->coalesce_frames)
751  vhost_user_send_call (vm, txvq);
752  }
753 
754  /* increase rx counters */
758  n_rx_packets, n_rx_bytes);
759 
761 
762 done:
763  return n_rx_packets;
764 }
765 
768  vhost_user_vring_t * txvq, u16 desc_head,
769  u16 n_descs_processed)
770 {
771  vring_packed_desc_t *desc_table = txvq->packed_desc;
772  u16 desc_idx;
773  u16 mask = txvq->qsz_mask;
774 
775  for (desc_idx = 0; desc_idx < n_descs_processed; desc_idx++)
776  {
777  if (txvq->used_wrap_counter)
778  desc_table[(desc_head + desc_idx) & mask].flags |=
780  else
781  desc_table[(desc_head + desc_idx) & mask].flags &=
784  }
785 }
786 
789  u16 qid, vhost_user_vring_t * txvq,
790  u16 desc_current)
791 {
793  vring_packed_desc_t *hdr_desc;
794  virtio_net_hdr_mrg_rxbuf_t *hdr;
795  u32 hint = 0;
796 
797  clib_memset (t, 0, sizeof (*t));
798  t->device_index = vui - vum->vhost_user_interfaces;
799  t->qid = qid;
800 
801  hdr_desc = &txvq->packed_desc[desc_current];
802  if (txvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
803  {
804  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
805  /* Header is the first here */
806  hdr_desc = map_guest_mem (vui, txvq->packed_desc[desc_current].addr,
807  &hint);
808  }
809  if (txvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
810  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
811 
812  if (!(txvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
813  !(txvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
814  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
815 
816  t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
817 
818  if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
819  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
820  else
821  {
822  u32 len = vui->virtio_net_hdr_sz;
823  clib_memcpy_fast (&t->hdr, hdr,
824  len > hdr_desc->len ? hdr_desc->len : len);
825  }
826 }
827 
830  vhost_user_intf_t * vui,
831  vhost_user_vring_t * txvq,
832  u32 discard_max)
833 {
834  u32 discarded_packets = 0;
835  u16 mask = txvq->qsz_mask;
836  u16 desc_current, desc_head;
837 
838  desc_head = desc_current = txvq->last_used_idx & mask;
839 
840  /*
841  * On the RX side, each packet corresponds to one descriptor
842  * (it is the same whether it is a shallow descriptor, chained, or indirect).
843  * Therefore, discarding a packet is like discarding a descriptor.
844  */
845  while ((discarded_packets != discard_max) &&
846  vhost_user_packed_desc_available (txvq, desc_current))
847  {
849  discarded_packets++;
850  desc_current = (desc_current + 1) & mask;
851  }
852 
853  if (PREDICT_TRUE (discarded_packets))
854  vhost_user_mark_desc_consumed (vui, txvq, desc_head, discarded_packets);
855  return (discarded_packets);
856 }
857 
860  u16 copy_len, u32 * map_hint)
861 {
862  void *src0, *src1, *src2, *src3, *src4, *src5, *src6, *src7;
863  u8 bad;
864  u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
865 
866  if (PREDICT_TRUE (copy_len >= 8))
867  {
868  src4 = map_guest_mem (vui, cpy[0].src, map_hint);
869  src5 = map_guest_mem (vui, cpy[1].src, map_hint);
870  src6 = map_guest_mem (vui, cpy[2].src, map_hint);
871  src7 = map_guest_mem (vui, cpy[3].src, map_hint);
872  bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
873  if (PREDICT_FALSE (bad))
874  goto one_by_one;
875  CLIB_PREFETCH (src4, 64, LOAD);
876  CLIB_PREFETCH (src5, 64, LOAD);
877  CLIB_PREFETCH (src6, 64, LOAD);
878  CLIB_PREFETCH (src7, 64, LOAD);
879 
880  while (PREDICT_TRUE (copy_len >= 8))
881  {
882  src0 = src4;
883  src1 = src5;
884  src2 = src6;
885  src3 = src7;
886 
887  src4 = map_guest_mem (vui, cpy[4].src, map_hint);
888  src5 = map_guest_mem (vui, cpy[5].src, map_hint);
889  src6 = map_guest_mem (vui, cpy[6].src, map_hint);
890  src7 = map_guest_mem (vui, cpy[7].src, map_hint);
891  bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
892  if (PREDICT_FALSE (bad))
893  break;
894 
895  CLIB_PREFETCH (src4, 64, LOAD);
896  CLIB_PREFETCH (src5, 64, LOAD);
897  CLIB_PREFETCH (src6, 64, LOAD);
898  CLIB_PREFETCH (src7, 64, LOAD);
899 
900  clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
901  clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
902  clib_memcpy_fast ((void *) cpy[2].dst, src2, cpy[2].len);
903  clib_memcpy_fast ((void *) cpy[3].dst, src3, cpy[3].len);
904  copy_len -= 4;
905  cpy += 4;
906  }
907  }
908 
909 one_by_one:
910  while (copy_len)
911  {
912  if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
913  {
914  rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
915  break;
916  }
917  clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
918  copy_len -= 1;
919  cpy += 1;
920  }
921  return rc;
922 }
923 
926  vring_packed_desc_t * desc_table, u16 desc_current,
927  u16 mask, vlib_buffer_t * b_head, u32 * map_hint)
928 {
929  u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
930  virtio_net_hdr_mrg_rxbuf_t *hdr;
931  u8 *b_data;
932  u32 desc_data_offset = vui->virtio_net_hdr_sz;
933 
934  hdr = map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
935  if (PREDICT_FALSE (hdr == 0))
936  rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
937  else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
938  {
939  if (desc_data_offset == desc_table[desc_current].len)
940  {
941  desc_current = (desc_current + 1) & mask;
942  b_data =
943  map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
944  if (PREDICT_FALSE (b_data == 0))
945  rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
946  else
947  vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
948  }
949  else
950  {
951  b_data = (u8 *) hdr + desc_data_offset;
952  vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
953  }
954  }
955 
956  return rc;
957 }
958 
960 vhost_user_compute_buffers_required (u32 desc_len, u32 buffer_data_size)
961 {
962  div_t result;
963  u32 buffers_required;
964 
965  if (PREDICT_TRUE (buffer_data_size == 2048))
966  {
967  buffers_required = desc_len >> 11;
968  if ((desc_len & 2047) != 0)
969  buffers_required++;
970  return (buffers_required);
971  }
972 
973  if (desc_len < buffer_data_size)
974  return 1;
975 
976  result = div (desc_len, buffer_data_size);
977  if (result.rem)
978  buffers_required = result.quot + 1;
979  else
980  buffers_required = result.quot;
981 
982  return (buffers_required);
983 }
984 
987  vhost_user_vring_t * txvq,
988  u32 buffer_data_size, u16 desc_current,
989  u32 * map_hint)
990 {
991  vring_packed_desc_t *desc_table = txvq->packed_desc;
992  u32 desc_len = 0;
993  u16 desc_data_offset = vui->virtio_net_hdr_sz;
994  u16 desc_idx = desc_current;
995  u32 n_descs;
996 
997  n_descs = desc_table[desc_idx].len >> 4;
998  desc_table = map_guest_mem (vui, desc_table[desc_idx].addr, map_hint);
999  if (PREDICT_FALSE (desc_table == 0))
1000  return 0;
1001 
1002  for (desc_idx = 0; desc_idx < n_descs; desc_idx++)
1003  desc_len += desc_table[desc_idx].len;
1004 
1005  if (PREDICT_TRUE (desc_len > desc_data_offset))
1006  desc_len -= desc_data_offset;
1007 
1008  return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1009 }
1010 
1013  vhost_user_vring_t * txvq,
1014  u32 buffer_data_size, u16 * current,
1015  u16 * n_left)
1016 {
1017  vring_packed_desc_t *desc_table = txvq->packed_desc;
1018  u32 desc_len = 0;
1019  u16 mask = txvq->qsz_mask;
1020 
1021  while (desc_table[*current].flags & VIRTQ_DESC_F_NEXT)
1022  {
1023  desc_len += desc_table[*current].len;
1024  (*n_left)++;
1025  *current = (*current + 1) & mask;
1027  }
1028  desc_len += desc_table[*current].len;
1029  (*n_left)++;
1030  *current = (*current + 1) & mask;
1032 
1033  if (PREDICT_TRUE (desc_len > vui->virtio_net_hdr_sz))
1034  desc_len -= vui->virtio_net_hdr_sz;
1035 
1036  return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1037 }
1038 
1040 vhost_user_assemble_packet (vring_packed_desc_t * desc_table,
1041  u16 * desc_idx, vlib_buffer_t * b_head,
1042  vlib_buffer_t ** b_current, u32 ** next,
1043  vlib_buffer_t *** b, u32 * bi_current,
1044  vhost_cpu_t * cpu, u16 * copy_len,
1045  u32 * buffers_used, u32 buffers_required,
1046  u32 * desc_data_offset, u32 buffer_data_size,
1047  u16 mask)
1048 {
1049  u32 desc_data_l;
1050 
1051  while (*desc_data_offset < desc_table[*desc_idx].len)
1052  {
1053  /* Get more output if necessary. Or end of packet. */
1054  if (PREDICT_FALSE ((*b_current)->current_length == buffer_data_size))
1055  {
1056  /* Get next output */
1057  u32 bi_next = **next;
1058  (*next)++;
1059  (*b_current)->next_buffer = bi_next;
1060  (*b_current)->flags |= VLIB_BUFFER_NEXT_PRESENT;
1061  *bi_current = bi_next;
1062  *b_current = **b;
1063  (*b)++;
1064  (*buffers_used)++;
1065  ASSERT (*buffers_used <= buffers_required);
1066  }
1067 
1068  /* Prepare a copy order executed later for the data */
1069  ASSERT (*copy_len < VHOST_USER_COPY_ARRAY_N);
1070  vhost_copy_t *cpy = &cpu->copy[*copy_len];
1071  (*copy_len)++;
1072  desc_data_l = desc_table[*desc_idx].len - *desc_data_offset;
1073  cpy->len = buffer_data_size - (*b_current)->current_length;
1074  cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
1075  cpy->dst = (uword) (vlib_buffer_get_current (*b_current) +
1076  (*b_current)->current_length);
1077  cpy->src = desc_table[*desc_idx].addr + *desc_data_offset;
1078 
1079  *desc_data_offset += cpy->len;
1080 
1081  (*b_current)->current_length += cpy->len;
1083  }
1084  *desc_idx = (*desc_idx + 1) & mask;;
1085  *desc_data_offset = 0;
1086 }
1087 
1090  vhost_user_intf_t * vui, u16 qid,
1092  vnet_hw_interface_rx_mode mode, u8 enable_csum)
1093 {
1094  vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1096  u8 feature_arc_idx = fm->device_input_feature_arc_index;
1097  u16 n_rx_packets = 0;
1098  u32 n_rx_bytes = 0;
1099  u16 n_left = 0;
1100  u32 buffers_required = 0;
1101  u32 n_left_to_next, *to_next;
1103  u32 n_trace = vlib_get_trace_count (vm, node);
1104  u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
1105  u32 map_hint = 0;
1106  vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
1107  u16 copy_len = 0;
1108  u32 current_config_index = ~0;
1109  u16 mask = txvq->qsz_mask;
1110  u16 desc_current, desc_head, last_used_idx;
1111  vring_packed_desc_t *desc_table = 0;
1112  u32 n_descs_processed = 0;
1113  u32 rv;
1114  vlib_buffer_t **b;
1115  u32 *next;
1116  u32 buffers_used = 0;
1117  u16 current, n_descs_to_process;
1118 
1119  /* The descriptor table is not ready yet */
1120  if (PREDICT_FALSE (txvq->packed_desc == 0))
1121  goto done;
1122 
1123  /* do we have pending interrupts ? */
1124  vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
1125  vhost_user_input_do_interrupt (vm, txvq, rxvq);
1126 
1127  /*
1128  * For adaptive mode, it is optimized to reduce interrupts.
1129  * If the scheduler switches the input node to polling due
1130  * to burst of traffic, we tell the driver no interrupt.
1131  * When the traffic subsides, the scheduler switches the node back to
1132  * interrupt mode. We must tell the driver we want interrupt.
1133  */
1135  {
1136  if ((node->flags &
1138  !(node->flags &
1140  /* Tell driver we want notification */
1141  txvq->used_event->flags = 0;
1142  else
1143  /* Tell driver we don't want notification */
1144  txvq->used_event->flags = VRING_EVENT_F_DISABLE;
1145  }
1146 
1147  last_used_idx = txvq->last_used_idx & mask;
1148  desc_head = desc_current = last_used_idx;
1149 
1150  if (vhost_user_packed_desc_available (txvq, desc_current) == 0)
1151  goto done;
1152 
1153  if (PREDICT_FALSE (!vui->admin_up || !vui->is_ready || !(txvq->enabled)))
1154  {
1155  /*
1156  * Discard input packet if interface is admin down or vring is not
1157  * enabled.
1158  * "For example, for a networking device, in the disabled state
1159  * client must not supply any new RX packets, but must process
1160  * and discard any TX packets."
1161  */
1162  rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq,
1165  VHOST_USER_INPUT_FUNC_ERROR_NOT_READY, rv);
1166  goto done;
1167  }
1168 
1169  vhost_user_input_setup_frame (vm, node, vui, &current_config_index,
1170  &next_index, &to_next, &n_left_to_next);
1171 
1172  /*
1173  * Compute n_left and total buffers needed
1174  */
1175  desc_table = txvq->packed_desc;
1176  current = desc_current;
1177  while (vhost_user_packed_desc_available (txvq, current) &&
1178  (n_left < VLIB_FRAME_SIZE))
1179  {
1180  if (desc_table[current].flags & VIRTQ_DESC_F_INDIRECT)
1181  {
1182  buffers_required +=
1183  vhost_user_compute_indirect_desc_len (vui, txvq, buffer_data_size,
1184  current, &map_hint);
1185  n_left++;
1186  current = (current + 1) & mask;
1188  }
1189  else
1190  {
1191  buffers_required +=
1192  vhost_user_compute_chained_desc_len (vui, txvq, buffer_data_size,
1193  &current, &n_left);
1194  }
1195  }
1196 
1197  /* Something is broken if we need more than 10000 buffers */
1198  if (PREDICT_FALSE ((buffers_required == 0) || (buffers_required > 10000)))
1199  {
1200  rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1202  VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1203  goto done;
1204  }
1205 
1206  vec_validate (cpu->to_next_list, buffers_required);
1207  rv = vlib_buffer_alloc (vm, cpu->to_next_list, buffers_required);
1208  if (PREDICT_FALSE (rv != buffers_required))
1209  {
1210  vlib_buffer_free (vm, cpu->to_next_list, rv);
1211  rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1213  VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1214  goto done;
1215  }
1216 
1217  next = cpu->to_next_list;
1218  vec_validate (cpu->rx_buffers_pdesc, buffers_required);
1219  vlib_get_buffers (vm, next, cpu->rx_buffers_pdesc, buffers_required);
1220  b = cpu->rx_buffers_pdesc;
1221  n_descs_processed = n_left;
1222 
1223  while (n_left)
1224  {
1225  vlib_buffer_t *b_head, *b_current;
1226  u32 bi_current;
1227  u32 desc_data_offset;
1228  u16 desc_idx = desc_current;
1229  u32 n_descs;
1230 
1231  desc_table = txvq->packed_desc;
1232  to_next[0] = bi_current = next[0];
1233  b_head = b_current = b[0];
1234  b++;
1235  buffers_used++;
1236  ASSERT (buffers_used <= buffers_required);
1237  to_next++;
1238  next++;
1239  n_left_to_next--;
1240 
1241  /* The buffer should already be initialized */
1243  b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1244  desc_data_offset = vui->virtio_net_hdr_sz;
1245  n_descs_to_process = 1;
1246 
1247  if (desc_table[desc_idx].flags & VIRTQ_DESC_F_INDIRECT)
1248  {
1249  n_descs = desc_table[desc_idx].len >> 4;
1250  desc_table = map_guest_mem (vui, desc_table[desc_idx].addr,
1251  &map_hint);
1252  desc_idx = 0;
1253  if (PREDICT_FALSE (desc_table == 0) ||
1254  (enable_csum &&
1255  (PREDICT_FALSE
1257  (vui, desc_table, desc_idx, mask, b_head,
1258  &map_hint) != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))))
1259  {
1260  vlib_error_count (vm, node->node_index,
1261  VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1262  to_next--;
1263  next--;
1264  n_left_to_next++;
1265  buffers_used--;
1266  b--;
1267  goto out;
1268  }
1269  while (n_descs)
1270  {
1271  vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1272  &b_current, &next, &b, &bi_current,
1273  cpu, &copy_len, &buffers_used,
1274  buffers_required, &desc_data_offset,
1275  buffer_data_size, mask);
1276  n_descs--;
1277  }
1278  }
1279  else
1280  {
1281  if (enable_csum)
1282  {
1283  rv = vhost_user_do_offload (vui, desc_table, desc_idx, mask,
1284  b_head, &map_hint);
1285  if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1286  {
1287  vlib_error_count (vm, node->node_index, rv, 1);
1288  to_next--;
1289  next--;
1290  n_left_to_next++;
1291  buffers_used--;
1292  b--;
1293  goto out;
1294  }
1295  }
1296  /*
1297  * For chained descriptor, we process all chains in a single while
1298  * loop. So count how many descriptors in the chain.
1299  */
1300  n_descs_to_process = 1;
1301  while (desc_table[desc_idx].flags & VIRTQ_DESC_F_NEXT)
1302  {
1303  vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1304  &b_current, &next, &b, &bi_current,
1305  cpu, &copy_len, &buffers_used,
1306  buffers_required, &desc_data_offset,
1307  buffer_data_size, mask);
1308  n_descs_to_process++;
1309  }
1310  vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1311  &b_current, &next, &b, &bi_current,
1312  cpu, &copy_len, &buffers_used,
1313  buffers_required, &desc_data_offset,
1314  buffer_data_size, mask);
1315  }
1316 
1317  n_rx_bytes += b_head->total_length_not_including_first_buffer;
1318  n_rx_packets++;
1319 
1321  b_head->current_length;
1322 
1324 
1325  vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
1326  vnet_buffer (b_head)->sw_if_index[VLIB_TX] = ~0;
1327  b_head->error = 0;
1328 
1329  if (current_config_index != ~0)
1330  {
1331  b_head->current_config_index = current_config_index;
1332  vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
1333  }
1334 
1335  out:
1336  ASSERT (n_left >= n_descs_to_process);
1337  n_left -= n_descs_to_process;
1338 
1339  /* advance to next descrptor */
1340  desc_current = (desc_current + n_descs_to_process) & mask;
1341 
1342  /*
1343  * Although separating memory copies from virtio ring parsing
1344  * is beneficial, we can offer to perform the copies from time
1345  * to time in order to free some space in the ring.
1346  */
1347  if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
1348  {
1349  rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len,
1350  &map_hint);
1351  if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1352  vlib_error_count (vm, node->node_index, rv, 1);
1353  copy_len = 0;
1354  }
1355  }
1356  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1357 
1358  /* Do the memory copies */
1359  rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len, &map_hint);
1360  if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1361  vlib_error_count (vm, node->node_index, rv, 1);
1362 
1363  /* Must do the tracing before giving buffers back to driver */
1364  if (PREDICT_FALSE (n_trace))
1365  {
1366  u32 left = n_rx_packets;
1367 
1368  b = cpu->rx_buffers_pdesc;
1369  while (n_trace && left)
1370  {
1371  vhost_trace_t *t0;
1372 
1373  vlib_trace_buffer (vm, node, next_index, b[0],
1374  /* follow_chain */ 0);
1375  t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
1376  b++;
1377  vhost_user_rx_trace_packed (t0, vui, qid, txvq, last_used_idx);
1378  last_used_idx = (last_used_idx + 1) & mask;
1379  n_trace--;
1380  left--;
1381  vlib_set_trace_count (vm, node, n_trace);
1382  }
1383  }
1384 
1385  /*
1386  * Give buffers back to driver.
1387  */
1388  vhost_user_mark_desc_consumed (vui, txvq, desc_head, n_descs_processed);
1389 
1390  /* interrupt (call) handling */
1391  if ((txvq->callfd_idx != ~0) &&
1392  (txvq->avail_event->flags != VRING_EVENT_F_DISABLE))
1393  {
1394  txvq->n_since_last_int += n_rx_packets;
1395  if (txvq->n_since_last_int > vum->coalesce_frames)
1396  vhost_user_send_call (vm, txvq);
1397  }
1398 
1399  /* increase rx counters */
1403  n_rx_packets, n_rx_bytes);
1404 
1405  vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
1406 
1407  if (PREDICT_FALSE (buffers_used < buffers_required))
1408  vlib_buffer_free (vm, next, buffers_required - buffers_used);
1409 
1410 done:
1411  return n_rx_packets;
1412 }
1413 
1416  vlib_frame_t * frame)
1417 {
1419  uword n_rx_packets = 0;
1420  vhost_user_intf_t *vui;
1422  (vnet_device_input_runtime_t *) node->runtime_data;
1424 
1426  {
1427  if ((node->state == VLIB_NODE_STATE_POLLING) ||
1428  clib_atomic_swap_acq_n (&dq->interrupt_pending, 0))
1429  {
1430  vui =
1431  pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
1433  {
1434  if (vui->features & (1ULL << FEAT_VIRTIO_NET_F_CSUM))
1435  n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
1436  dq->queue_id, node,
1437  dq->mode, 1);
1438  else
1439  n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
1440  dq->queue_id, node,
1441  dq->mode, 0);
1442  }
1443  else
1444  {
1445  if (vui->features & (1ULL << FEAT_VIRTIO_NET_F_CSUM))
1446  n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
1447  node, dq->mode, 1);
1448  else
1449  n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
1450  node, dq->mode, 0);
1451  }
1452  }
1453  }
1454 
1455  return n_rx_packets;
1456 }
1457 
1458 /* *INDENT-OFF* */
1460  .type = VLIB_NODE_TYPE_INPUT,
1461  .name = "vhost-user-input",
1462  .sibling_of = "device-input",
1464 
1465  /* Will be enabled if/when hardware is detected. */
1466  .state = VLIB_NODE_STATE_DISABLED,
1467 
1468  .format_buffer = format_ethernet_header_with_length,
1469  .format_trace = format_vhost_trace,
1470 
1471  .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
1472  .error_strings = vhost_user_input_func_error_strings,
1473 };
1474 /* *INDENT-ON* */
1475 
1476 /*
1477  * fd.io coding-style-patch-verification: ON
1478  *
1479  * Local Variables:
1480  * eval: (c-set-style "gnu")
1481  * End:
1482  */
vnet_config_main_t config_main
Definition: feature.h:82
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:507
u32 * to_next_list
Definition: vhost_user.h:413
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u32 len
Definition: pci.h:191
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
Definition: devices.h:110
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
vring_desc_t * desc
Definition: vhost_user.h:291
u32 virtio_ring_flags
The device index.
Definition: vhost_user.h:393
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
Definition: vhost_user.h:395
vhost_user_input_func_error_t
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:187
vhost_cpu_t * cpus
Per-CPU data for vhost-user.
Definition: vhost_user.h:428
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:937
vring_desc_event_t * used_event
Definition: vhost_user.h:302
static_always_inline void vhost_user_assemble_packet(vring_packed_desc_t *desc_table, u16 *desc_idx, vlib_buffer_t *b_head, vlib_buffer_t **b_current, u32 **next, vlib_buffer_t ***b, u32 *bi_current, vhost_cpu_t *cpu, u16 *copy_len, u32 *buffers_used, u32 buffers_required, u32 *desc_data_offset, u32 buffer_data_size, u16 mask)
#define vnet_buffer2(b)
Definition: buffer.h:482
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:119
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:133
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
vring_used_elem_t ring[0]
Definition: pci.h:214
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:308
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:291
vring_packed_desc_t * packed_desc
Definition: vhost_user.h:292
vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N]
Definition: vhost_user.h:407
u16 next
Definition: pci.h:193
vring_avail_t * avail
Definition: vhost_user.h:296
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:216
#define foreach_vhost_user_input_func_error
vl_api_address_t src
Definition: gre.api:54
#define VRING_AVAIL_F_NO_INTERRUPT
Definition: vhost_user.h:53
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
static_always_inline void vhost_user_advance_last_avail_idx(vhost_user_vring_t *vring)
static_always_inline int vnet_have_features(u8 arc, u32 sw_if_index)
Definition: feature.h:251
u16 idx
Definition: pci.h:199
#define VHOST_USER_DOWN_DISCARD_COUNT
#define VLIB_NODE_FN(node)
Definition: node.h:202
static_always_inline u32 vhost_user_compute_buffers_required(u32 desc_len, u32 buffer_data_size)
static_always_inline u32 vhost_user_rx_discard_packet(vlib_main_t *vm, vhost_user_intf_t *vui, vhost_user_vring_t *txvq, u32 discard_max)
Try to discard packets from the tx ring (VPP RX path).
u16 flags
Definition: pci.h:212
struct _tcp_header tcp_header_t
vring_used_t * used
Definition: vhost_user.h:301
vhost_vring_addr_t addr
Definition: vhost_user.h:254
unsigned char u8
Definition: types.h:56
double f64
Definition: types.h:142
#define fm
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:130
u64 addr
Definition: pci.h:190
static_always_inline u32 vhost_user_if_input(vlib_main_t *vm, vhost_user_main_t *vum, vhost_user_intf_t *vui, u16 qid, vlib_node_runtime_t *node, vnet_hw_interface_rx_mode mode, u8 enable_csum)
vnet_hw_interface_rx_mode
Definition: interface.h:53
static_always_inline u32 vhost_user_compute_chained_desc_len(vhost_user_intf_t *vui, vhost_user_vring_t *txvq, u32 buffer_data_size, u16 *current, u16 *n_left)
#define static_always_inline
Definition: clib.h:106
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:476
static_always_inline void vhost_user_rx_trace_packed(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vhost_user_vring_t *txvq, u16 desc_current)
#define VRING_EVENT_F_DISABLE
Definition: vhost_user.h:35
vl_api_ip6_address_t ip6
Definition: one.api:424
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:867
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
#define VHOST_VRING_IDX_TX(qid)
Definition: vhost_user.h:24
#define VRING_USED_F_NO_NOTIFY
Definition: vhost_user.h:52
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:343
#define VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE
Definition: node.h:306
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:380
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:264
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
vnet_crypto_main_t * cm
Definition: quic_crypto.c:53
#define VHOST_USER_COPY_ARRAY_N
Definition: vhost_user.h:399
vlib_node_registration_t vhost_user_input_node
(constructor) VLIB_REGISTER_NODE (vhost_user_input_node)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
#define VHOST_USER_RX_BUFFER_STARVATION
unsigned short u16
Definition: types.h:57
#define VIRTQ_DESC_F_INDIRECT
Definition: vhost_user.h:29
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
Definition: config.h:123
static_always_inline void vhost_user_handle_rx_offload(vlib_buffer_t *b0, u8 *b0_data, virtio_net_hdr_t *hdr)
static_always_inline void vhost_user_mark_desc_consumed(vhost_user_intf_t *vui, vhost_user_vring_t *txvq, u16 desc_head, u16 n_descs_processed)
#define PREDICT_FALSE(x)
Definition: clib.h:118
vl_api_ip4_address_t ip4
Definition: one.api:376
vhost_user_main_t vhost_user_main
Definition: vhost_user.c:56
vnet_main_t vnet_main
Definition: misc.c:43
u32 node_index
Node index.
Definition: node.h:498
vl_api_address_t dst
Definition: gre.api:55
vlib_main_t * vm
Definition: in2out_ed.c:1599
vl_api_tunnel_mode_t mode
Definition: gre.api:48
u8 len
Definition: ip_types.api:92
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
#define VHOST_VRING_IDX_RX(qid)
Definition: vhost_user.h:23
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
u16 device_index
The interface queue index (Not the virtio vring idx)
Definition: vhost_user.h:392
vhost_user_intf_t * vhost_user_interfaces
Definition: vhost_user.h:421
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 flags
Definition: vhost_user.h:248
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline void vhost_user_input_rewind_buffers(vlib_main_t *vm, vhost_cpu_t *cpu, vlib_buffer_t *b_head)
static_always_inline u64 vhost_user_is_packed_ring_supported(vhost_user_intf_t *vui)
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:258
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
u16 first_desc_len
Runtime queue flags.
Definition: vhost_user.h:394
static_always_inline u8 vhost_user_packed_desc_available(vhost_user_vring_t *vring, u16 idx)
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1599
u16 flags
Definition: pci.h:198
static_always_inline u32 vhost_user_compute_indirect_desc_len(vhost_user_intf_t *vui, vhost_user_vring_t *txvq, u32 buffer_data_size, u16 desc_current, u32 *map_hint)
u32 rx_buffers[VHOST_USER_RX_BUFFERS_N]
Definition: vhost_user.h:404
static __clib_unused char * vhost_user_input_func_error_strings[]
#define ASSERT(truth)
#define VIRTQ_DESC_F_USED
Definition: vhost_user.h:32
#define VHOST_USER_RX_BUFFERS_N
Definition: vhost_user.h:398
static_always_inline u32 vhost_user_rx_discard_packet_packed(vlib_main_t *vm, vhost_user_intf_t *vui, vhost_user_vring_t *txvq, u32 discard_max)
#define VIRTQ_DESC_F_AVAIL
Definition: vhost_user.h:31
vlib_frame_t * frame
Definition: node.h:408
#define clib_atomic_swap_acq_n(a, b)
Definition: atomics.h:51
#define VIRTQ_DESC_F_NEXT
Definition: vhost_user.h:27
u16 flags
Definition: node.h:390
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_vring_t *vq)
static_always_inline u32 vhost_user_input_copy(vhost_user_intf_t *vui, vhost_copy_t *cpy, u16 copy_len, u32 *map_hint)
vlib_buffer_t ** rx_buffers_pdesc
Definition: vhost_user.h:414
static_always_inline int ethernet_frame_is_tagged(u16 type)
Definition: ethernet.h:78
vring_desc_event_t * avail_event
Definition: vhost_user.h:297
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_elt(v, i)
Get vector value at index i.
u8 device_input_feature_arc_index
Feature arc index for device-input.
Definition: feature.h:112
u16 ring[0]
Definition: pci.h:200
#define VHOST_USER_RX_COPY_THRESHOLD
struct _vlib_node_registration vlib_node_registration_t
Definition: defs.h:47
static_always_inline u32 vhost_user_do_offload(vhost_user_intf_t *vui, vring_packed_desc_t *desc_table, u16 desc_current, u16 mask, vlib_buffer_t *b_head, u32 *map_hint)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
static_always_inline void vhost_user_input_setup_frame(vlib_main_t *vm, vlib_node_runtime_t *node, vhost_user_intf_t *vui, u32 *current_config_index, u32 *next_index, u32 **to_next, u32 *n_left_to_next)
vhost_user_vring_t vrings[VHOST_VRING_MAX_N]
Definition: vhost_user.h:362
static_always_inline u32 vhost_user_input_copy_packed(vhost_user_intf_t *vui, vhost_copy_t *cpy, u16 copy_len, u32 *map_hint)
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:492
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static_always_inline void vhost_user_rx_trace(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *txvq, u16 last_avail_idx)
static_always_inline void vhost_user_input_do_interrupt(vlib_main_t *vm, vhost_user_vring_t *txvq, vhost_user_vring_t *rxvq)
left
#define vhost_user_log_dirty_ring(vui, vq, member)
u32 rx_buffers_len
Definition: vhost_user.h:403
u16 idx
Definition: pci.h:213
#define vnet_buffer(b)
Definition: buffer.h:417
static_always_inline u32 vhost_user_if_input_packed(vlib_main_t *vm, vhost_user_main_t *vum, vhost_user_intf_t *vui, u16 qid, vlib_node_runtime_t *node, vnet_hw_interface_rx_mode mode, u8 enable_csum)
static int tcp_header_bytes(tcp_header_t *t)
Definition: tcp_packet.h:93
u16 flags
Definition: pci.h:192
#define vec_foreach(var, vec)
Vector iterator.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
u16 flags
Copy of main node flags.
Definition: node.h:511
static_always_inline void vhost_user_advance_last_used_idx(vhost_user_vring_t *vring)
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:224
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:203
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
vnet_feature_config_main_t * feature_config_mains
feature config main objects
Definition: feature.h:100
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:677
vnet_feature_main_t feature_main
Definition: feature.c:19
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
Definition: node.h:307
Definition: defs.h:46