FD.io VPP  v19.08-27-gf4dcae4
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
23 #include <vnet/ip/ip6_packet.h>
24 #include <vnet/ip/ip4_packet.h>
25 #include <vnet/udp/udp_packet.h>
26 
27 #include <vmxnet3/vmxnet3.h>
28 
29 #define foreach_vmxnet3_input_error \
30  _(BUFFER_ALLOC, "buffer alloc error") \
31  _(RX_PACKET_NO_SOP, "Rx packet error - no SOP") \
32  _(RX_PACKET, "Rx packet error") \
33  _(RX_PACKET_EOP, "Rx packet error found on EOP") \
34  _(NO_BUFFER, "Rx no buffer error")
35 
36 typedef enum
37 {
38 #define _(f,s) VMXNET3_INPUT_ERROR_##f,
40 #undef _
43 
44 static __clib_unused char *vmxnet3_input_error_strings[] = {
45 #define _(n,s) s,
47 #undef _
48 };
49 
51 vmxnet3_find_rid (vmxnet3_device_t * vd, vmxnet3_rx_comp * rx_comp)
52 {
53  u32 rid;
54 
55  // rid is bits 16-25 (10 bits number)
56  rid = rx_comp->index & (0xffffffff >> 6);
57  rid >>= 16;
58  if ((rid >= vd->num_rx_queues) && (rid < (vd->num_rx_queues << 1)))
59  return 1;
60  else
61  return 0;
62 }
63 
66 {
67  vmxnet3_rx_comp_ring *comp_ring = &rxq->rx_comp_ring;
68 
69  comp_ring->next++;
70  if (PREDICT_FALSE (comp_ring->next == rxq->size))
71  {
72  comp_ring->next = 0;
73  comp_ring->gen ^= VMXNET3_RXCF_GEN;
74  }
75 }
76 
78 vmxnet3_handle_offload (vmxnet3_rx_comp * rx_comp, vlib_buffer_t * hb,
79  u16 gso_size)
80 {
81  u8 l4_hdr_sz = 0;
82 
83  if (rx_comp->flags & VMXNET3_RXCF_IP4)
84  {
85  ip4_header_t *ip4 = (ip4_header_t *) (hb->data +
86  sizeof (ethernet_header_t));
87 
88  vnet_buffer (hb)->l2_hdr_offset = 0;
89  vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
90  vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
91  ip4_header_bytes (ip4);
92  hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
93  VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
94  VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP4;
95 
96  /* checksum offload */
97  if (!(rx_comp->index & VMXNET3_RXCI_CNC))
98  {
99  if (!(rx_comp->flags & VMXNET3_RXCF_IPC))
100  {
101  hb->flags |= VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
102  ip4->checksum = 0;
103  }
104  if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
105  {
106  if (rx_comp->flags & VMXNET3_RXCF_TCP)
107  {
108  tcp_header_t *tcp =
109  (tcp_header_t *) (hb->data +
110  vnet_buffer (hb)->l4_hdr_offset);
111  hb->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
112  tcp->checksum = 0;
113  }
114  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
115  {
116  udp_header_t *udp =
117  (udp_header_t *) (hb->data +
118  vnet_buffer (hb)->l4_hdr_offset);
119  hb->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
120  udp->checksum = 0;
121  }
122  }
123  }
124 
125  if (gso_size)
126  {
127  if (rx_comp->flags & VMXNET3_RXCF_TCP)
128  {
129  tcp_header_t *tcp =
130  (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
131  l4_hdr_sz = tcp_header_bytes (tcp);
132  }
133  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
134  {
135  udp_header_t *udp =
136  (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
137  l4_hdr_sz = sizeof (*udp);
138  }
139  vnet_buffer2 (hb)->gso_size = gso_size;
140  vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
141  hb->flags |= VNET_BUFFER_F_GSO;
142  }
143  }
144  else if (rx_comp->flags & VMXNET3_RXCF_IP6)
145  {
146  vnet_buffer (hb)->l2_hdr_offset = 0;
147  vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
148  vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
149  sizeof (ip6_header_t);
150  hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
151  VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
152  VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP6;
153 
154  /* checksum offload */
155  if (!(rx_comp->index & VMXNET3_RXCI_CNC))
156  {
157  if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
158  {
159  if (rx_comp->flags & VMXNET3_RXCF_TCP)
160  {
161  tcp_header_t *tcp =
162  (tcp_header_t *) (hb->data +
163  vnet_buffer (hb)->l4_hdr_offset);
164  hb->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
165  tcp->checksum = 0;
166  }
167  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
168  {
169  udp_header_t *udp =
170  (udp_header_t *) (hb->data +
171  vnet_buffer (hb)->l4_hdr_offset);
172  hb->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
173  udp->checksum = 0;
174  }
175  }
176  }
177 
178  if (gso_size)
179  {
180  if (rx_comp->flags & VMXNET3_RXCF_TCP)
181  {
182  tcp_header_t *tcp =
183  (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
184  l4_hdr_sz = tcp_header_bytes (tcp);
185  }
186  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
187  {
188  udp_header_t *udp =
189  (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
190  l4_hdr_sz = sizeof (*udp);
191  }
192  vnet_buffer2 (hb)->gso_size = gso_size;
193  vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
194  hb->flags |= VNET_BUFFER_F_GSO;
195  }
196  }
197 }
198 
201  vlib_frame_t * frame, vmxnet3_device_t * vd,
202  u16 qid)
203 {
204  vnet_main_t *vnm = vnet_get_main ();
205  uword n_trace = vlib_get_trace_count (vm, node);
206  u32 n_rx_packets = 0, n_rx_bytes = 0;
207  vmxnet3_rx_comp *rx_comp;
208  u32 desc_idx;
209  vmxnet3_rxq_t *rxq;
210  u32 thread_index = vm->thread_index;
211  u32 buffer_indices[VLIB_FRAME_SIZE], *bi;
212  u16 nexts[VLIB_FRAME_SIZE], *next;
213  vmxnet3_rx_ring *ring;
214  vmxnet3_rx_comp_ring *comp_ring;
215  u16 rid;
216  vlib_buffer_t *prev_b0 = 0, *hb = 0;
218  u8 known_next = 0, got_packet = 0;
219  vmxnet3_rx_desc *rxd;
220  clib_error_t *error;
221  u16 gso_size = 0;
222 
223  rxq = vec_elt_at_index (vd->rxqs, qid);
224  comp_ring = &rxq->rx_comp_ring;
225  bi = buffer_indices;
226  next = nexts;
227  rx_comp = &rxq->rx_comp[comp_ring->next];
228 
229  while (PREDICT_TRUE ((n_rx_packets < VLIB_FRAME_SIZE) &&
230  (comp_ring->gen ==
231  (rx_comp->flags & VMXNET3_RXCF_GEN))))
232  {
233  vlib_buffer_t *b0;
234  u32 bi0;
235 
236  rid = vmxnet3_find_rid (vd, rx_comp);
237  ring = &rxq->rx_ring[rid];
238 
239  if (PREDICT_TRUE (ring->fill >= 1))
240  ring->fill--;
241  else
242  {
243  vlib_error_count (vm, node->node_index,
244  VMXNET3_INPUT_ERROR_NO_BUFFER, 1);
245  if (hb)
246  {
248  hb = 0;
249  }
250  prev_b0 = 0;
251  break;
252  }
253 
254  desc_idx = rx_comp->index & VMXNET3_RXC_INDEX;
255  ring->consume = desc_idx;
256  rxd = &rxq->rx_desc[rid][desc_idx];
257 
258  bi0 = ring->bufs[desc_idx];
259  ring->bufs[desc_idx] = ~0;
260 
261  b0 = vlib_get_buffer (vm, bi0);
262  vnet_buffer (b0)->sw_if_index[VLIB_RX] = vd->sw_if_index;
263  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
264  vnet_buffer (b0)->feature_arc_index = 0;
265  b0->current_length = rx_comp->len & VMXNET3_RXCL_LEN_MASK;
266  b0->current_data = 0;
268  b0->next_buffer = 0;
269  b0->flags = 0;
270  b0->error = 0;
271  b0->current_config_index = 0;
272  ASSERT (b0->current_length != 0);
273 
274  if (PREDICT_FALSE ((rx_comp->index & VMXNET3_RXCI_EOP) &&
275  (rx_comp->len & VMXNET3_RXCL_ERROR)))
276  {
277  vlib_buffer_free_one (vm, bi0);
278  vlib_error_count (vm, node->node_index,
279  VMXNET3_INPUT_ERROR_RX_PACKET_EOP, 1);
280  if (hb && vlib_get_buffer_index (vm, hb) != bi0)
281  {
283  hb = 0;
284  }
285  prev_b0 = 0;
286  goto next;
287  }
288 
289  if (rx_comp->index & VMXNET3_RXCI_SOP)
290  {
291  ASSERT (!(rxd->flags & VMXNET3_RXF_BTYPE));
292  /* start segment */
293  if ((vd->lro_enable) &&
294  (rx_comp->flags & VMXNET3_RXCF_CT) == VMXNET3_RXCOMP_TYPE_LRO)
295  {
296  vmxnet3_rx_comp_ext *lro = (vmxnet3_rx_comp_ext *) rx_comp;
297 
298  gso_size = lro->flags & VMXNET3_RXECF_MSS_MASK;
299  }
300 
301  hb = b0;
302  bi[0] = bi0;
303  if (!(rx_comp->index & VMXNET3_RXCI_EOP))
304  {
305  hb->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
306  prev_b0 = b0;
307  }
308  else
309  {
310  /*
311  * Both start and end of packet is set. It is a complete packet
312  */
313  prev_b0 = 0;
314  got_packet = 1;
315  }
316  }
317  else if (rx_comp->index & VMXNET3_RXCI_EOP)
318  {
319  /* end of segment */
320  if (prev_b0)
321  {
322  prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
323  prev_b0->next_buffer = bi0;
324  hb->total_length_not_including_first_buffer +=
325  b0->current_length;
326  prev_b0 = 0;
327  got_packet = 1;
328  }
329  else
330  {
331  /* EOP without SOP, error */
332  vlib_error_count (vm, node->node_index,
333  VMXNET3_INPUT_ERROR_RX_PACKET_NO_SOP, 1);
334  vlib_buffer_free_one (vm, bi0);
335  if (hb && vlib_get_buffer_index (vm, hb) != bi0)
336  {
338  hb = 0;
339  }
340  goto next;
341  }
342  }
343  else if (prev_b0) // !sop && !eop
344  {
345  /* mid chain */
346  ASSERT (rxd->flags & VMXNET3_RXF_BTYPE);
347  prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
348  prev_b0->next_buffer = bi0;
349  prev_b0 = b0;
351  }
352  else
353  {
354  vlib_error_count (vm, node->node_index,
355  VMXNET3_INPUT_ERROR_RX_PACKET, 1);
356  vlib_buffer_free_one (vm, bi0);
357  if (hb && vlib_get_buffer_index (vm, hb) != bi0)
358  {
360  hb = 0;
361  }
362  goto next;
363  }
364 
365  n_rx_bytes += b0->current_length;
366 
367  if (got_packet)
368  {
369  if (PREDICT_FALSE (vd->per_interface_next_index != ~0))
370  {
371  next_index = vd->per_interface_next_index;
372  known_next = 1;
373  }
374 
375  if (PREDICT_FALSE
377  {
379  &next_index, hb);
380  known_next = 1;
381  }
382 
383  if (PREDICT_FALSE (known_next))
384  next[0] = next_index;
385  else
386  {
387  ethernet_header_t *e = (ethernet_header_t *) hb->data;
388 
390  if (!ethernet_frame_is_tagged (e->type))
391  vmxnet3_handle_offload (rx_comp, hb, gso_size);
392  }
393 
394  n_rx_packets++;
395  next++;
396  bi++;
397  hb = 0;
398  got_packet = 0;
399  gso_size = 0;
400  }
401 
402  next:
404  rx_comp = &rxq->rx_comp[comp_ring->next];
405  }
406 
407  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
408  {
409  u32 n_left = n_rx_packets;
410 
411  bi = buffer_indices;
412  next = nexts;
413  while (n_trace && n_left)
414  {
415  vlib_buffer_t *b;
417 
418  b = vlib_get_buffer (vm, bi[0]);
419  vlib_trace_buffer (vm, node, next[0], b, /* follow_chain */ 0);
420  tr = vlib_add_trace (vm, node, b, sizeof (*tr));
421  tr->next_index = next[0];
422  tr->hw_if_index = vd->hw_if_index;
423  tr->buffer = *b;
424 
425  n_trace--;
426  n_left--;
427  bi++;
428  next++;
429  }
430  vlib_set_trace_count (vm, node, n_trace);
431  }
432 
433  if (PREDICT_TRUE (n_rx_packets))
434  {
435  vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts,
436  n_rx_packets);
439  VNET_INTERFACE_COUNTER_RX, thread_index,
440  vd->sw_if_index, n_rx_packets, n_rx_bytes);
441  }
442 
443  error = vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
444  if (PREDICT_FALSE (error != 0))
445  {
446  vlib_error_count (vm, node->node_index,
447  VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
448  }
449  error = vmxnet3_rxq_refill_ring1 (vm, vd, rxq);
450  if (PREDICT_FALSE (error != 0))
451  {
452  vlib_error_count (vm, node->node_index,
453  VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
454  }
455 
456  return n_rx_packets;
457 }
458 
460  vlib_node_runtime_t * node,
461  vlib_frame_t * frame)
462 {
463  u32 n_rx = 0;
464  vmxnet3_main_t *vmxm = &vmxnet3_main;
465  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
467 
469  {
470  vmxnet3_device_t *vd;
471  vd = vec_elt_at_index (vmxm->devices, dq->dev_instance);
472  if ((vd->flags & VMXNET3_DEVICE_F_ADMIN_UP) == 0)
473  continue;
474  n_rx += vmxnet3_device_input_inline (vm, node, frame, vd, dq->queue_id);
475  }
476  return n_rx;
477 }
478 
479 #ifndef CLIB_MARCH_VARIANT
480 /* *INDENT-OFF* */
482  .name = "vmxnet3-input",
483  .sibling_of = "device-input",
484  .format_trace = format_vmxnet3_input_trace,
485  .type = VLIB_NODE_TYPE_INPUT,
486  .state = VLIB_NODE_STATE_DISABLED,
487  .n_errors = VMXNET3_INPUT_N_ERROR,
488  .error_strings = vmxnet3_input_error_strings,
489 };
490 #endif
491 
492 /* *INDENT-ON* */
493 
494 /*
495  * fd.io coding-style-patch-verification: ON
496  *
497  * Local Variables:
498  * eval: (c-set-style "gnu")
499  * End:
500  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
static_always_inline clib_error_t * vmxnet3_rxq_refill_ring0(vlib_main_t *vm, vmxnet3_device_t *vd, vmxnet3_rxq_t *rxq)
Definition: vmxnet3.h:688
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:171
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
#define VMXNET3_RXCF_TCP
Definition: vmxnet3.h:124
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
#define vnet_buffer2(b)
Definition: buffer.h:420
vmxnet3_rx_desc * rx_desc[VMXNET3_RX_RING_SIZE]
Definition: vmxnet3.h:520
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:112
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define VMXNET3_RXCF_IP4
Definition: vmxnet3.h:127
u32 thread_index
Definition: main.h:197
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
u8 data[0]
Packet data.
Definition: buffer.h:181
vmxnet3_main_t vmxnet3_main
Definition: vmxnet3.c:28
#define VLIB_NODE_FN(node)
Definition: node.h:201
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
#define VMXNET3_RXCF_GEN
Definition: vmxnet3.h:129
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:124
#define VMXNET3_RXCF_IPC
Definition: vmxnet3.h:125
vmxnet3_rxq_t * rxqs
Definition: vmxnet3.h:569
static __clib_unused char * vmxnet3_input_error_strings[]
Definition: input.c:44
#define static_always_inline
Definition: clib.h:99
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:842
#define VMXNET3_RXCF_UDP
Definition: vmxnet3.h:123
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:301
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define VMXNET3_RXC_INDEX
Definition: vmxnet3.h:131
#define VMXNET3_RXCL_LEN_MASK
Definition: vmxnet3.h:175
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:376
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:257
#define VMXNET3_RXCF_CT
Definition: vmxnet3.h:128
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define VMXNET3_RXECF_MSS_MASK
Definition: vmxnet3.h:185
#define VMXNET3_RXCF_TUC
Definition: vmxnet3.h:122
static_always_inline void vmxnet3_rx_comp_ring_advance_next(vmxnet3_rxq_t *rxq)
Definition: input.c:65
unsigned short u16
Definition: types.h:57
vmxnet3_rx_comp_ring rx_comp_ring
Definition: vmxnet3.h:522
#define PREDICT_FALSE(x)
Definition: clib.h:111
u32 node_index
Node index.
Definition: node.h:494
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
vlib_main_t * vm
Definition: buffer.c:312
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
vlib_buffer_t buffer
Definition: vmxnet3.h:619
u32 per_interface_next_index
Definition: vmxnet3.h:558
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
#define ASSERT(truth)
static_always_inline int ethernet_frame_is_tagged(u16 type)
Definition: ethernet.h:78
#define VMXNET3_RXCI_EOP
Definition: vmxnet3.h:178
#define foreach_vmxnet3_input_error
Definition: input.c:29
static_always_inline uword vmxnet3_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vmxnet3_device_t *vd, u16 qid)
Definition: input.c:200
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
vlib_node_registration_t vmxnet3_input_node
(constructor) VLIB_REGISTER_NODE (vmxnet3_input_node)
Definition: input.c:481
#define foreach_device_and_queue(var, vec)
Definition: devices.h:161
Definition: defs.h:47
#define VMXNET3_RXCL_ERROR
Definition: vmxnet3.h:176
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
#define VMXNET3_RXF_BTYPE
Definition: vmxnet3.h:118
static_always_inline void vmxnet3_handle_offload(vmxnet3_rx_comp *rx_comp, vlib_buffer_t *hb, u16 gso_size)
Definition: input.c:78
vmxnet3_rx_comp * rx_comp
Definition: vmxnet3.h:521
#define VMXNET3_RXCI_CNC
Definition: vmxnet3.h:180
vmxnet3_rx_ring rx_ring[VMXNET3_RX_RING_SIZE]
Definition: vmxnet3.h:519
vmxnet3_input_error_t
Definition: input.c:36
format_function_t format_vmxnet3_input_trace
Definition: vmxnet3.h:632
#define vnet_buffer(b)
Definition: buffer.h:361
#define VMXNET3_RXCF_IP6
Definition: vmxnet3.h:126
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:308
static int tcp_header_bytes(tcp_header_t *t)
Definition: tcp_packet.h:93
vmxnet3_device_t * devices
Definition: vmxnet3.h:592
static_always_inline clib_error_t * vmxnet3_rxq_refill_ring1(vlib_main_t *vm, vmxnet3_device_t *vd, vmxnet3_rxq_t *rxq)
Definition: vmxnet3.h:734
#define VMXNET3_RXCI_SOP
Definition: vmxnet3.h:179
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
Definition: buffer_funcs.h:898
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:235
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:187
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
#define VMXNET3_RXCOMP_TYPE_LRO
Definition: vmxnet3.h:183
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
Definition: defs.h:46
static_always_inline u16 vmxnet3_find_rid(vmxnet3_device_t *vd, vmxnet3_rx_comp *rx_comp)
Definition: input.c:51