FD.io VPP  v17.04-9-g99c0734
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/devices/devices.h>
28 #include <vnet/feature/feature.h>
29 
30 #include <memif/memif.h>
31 
32 #define foreach_memif_input_error
33 
34 typedef enum
35 {
36 #define _(f,s) MEMIF_INPUT_ERROR_##f,
38 #undef _
41 
42 static char *memif_input_error_strings[] = {
43 #define _(n,s) s,
45 #undef _
46 };
47 
48 typedef struct
49 {
54 
55 static u8 *
56 format_memif_input_trace (u8 * s, va_list * args)
57 {
58  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60  memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
61  uword indent = format_get_indent (s);
62 
63  s = format (s, "memif: hw_if_index %d next-index %d",
64  t->hw_if_index, t->next_index);
65  s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
66  t->ring);
67  return s;
68 }
69 
72 {
73  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
74  vlib_prefetch_buffer_header (b, STORE);
76 }
77 
80  vlib_frame_t * frame, memif_if_t * mif,
81  memif_ring_type_t type)
82 {
83  vnet_main_t *vnm = vnet_get_main ();
84  u8 rid = 0; /* Ring id */
85  memif_ring_t *ring = memif_get_ring (mif, type, rid);
86  memif_ring_data_t *rd =
87  vec_elt_at_index (mif->ring_data, rid + type * mif->num_s2m_rings);
88  u16 head;
89 
91  uword n_trace = vlib_get_trace_count (vm, node);
92  memif_main_t *nm = &memif_main;
93  u32 n_rx_packets = 0;
94  u32 n_rx_bytes = 0;
95  u32 *to_next = 0;
96  u32 n_free_bufs;
97  u32 cpu_index = os_get_cpu_number ();
98  u32 bi0, bi1;
99  vlib_buffer_t *b0, *b1;
100  u16 ring_size = 1 << mif->log2_ring_size;
101  u16 mask = ring_size - 1;
102  u16 num_slots;
103  void *mb0, *mb1;
104 
105  if (mif->per_interface_next_index != ~0)
106  next_index = mif->per_interface_next_index;
107 
108  n_free_bufs = vec_len (nm->rx_buffers[cpu_index]);
109  if (PREDICT_FALSE (n_free_bufs < ring_size))
110  {
111  vec_validate (nm->rx_buffers[cpu_index], ring_size + n_free_bufs - 1);
112  n_free_bufs +=
113  vlib_buffer_alloc (vm, &nm->rx_buffers[cpu_index][n_free_bufs],
114  ring_size);
115  _vec_len (nm->rx_buffers[cpu_index]) = n_free_bufs;
116  }
117 
118  head = ring->head;
119  if (head == rd->last_head)
120  return 0;
121 
122  if (head > rd->last_head)
123  num_slots = head - rd->last_head;
124  else
125  num_slots = ring_size - rd->last_head + head;
126 
127  while (num_slots)
128  {
129  u32 n_left_to_next;
130  u32 next0 = next_index;
131  u32 next1 = next_index;
132  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
133 
134  while (num_slots > 5 && n_left_to_next > 2)
135  {
136  if (PREDICT_TRUE (rd->last_head + 5 < ring_size))
137  {
138  CLIB_PREFETCH (memif_get_buffer (mif, ring, rd->last_head + 2),
139  CLIB_CACHE_LINE_BYTES, LOAD);
140  CLIB_PREFETCH (memif_get_buffer (mif, ring, rd->last_head + 3),
141  CLIB_CACHE_LINE_BYTES, LOAD);
142  CLIB_PREFETCH (&ring->desc[rd->last_head + 4],
143  CLIB_CACHE_LINE_BYTES, LOAD);
144  CLIB_PREFETCH (&ring->desc[rd->last_head + 5],
145  CLIB_CACHE_LINE_BYTES, LOAD);
146  }
147  else
148  {
150  (mif, ring, (rd->last_head + 2) % mask),
151  CLIB_CACHE_LINE_BYTES, LOAD);
153  (mif, ring, (rd->last_head + 3) % mask),
154  CLIB_CACHE_LINE_BYTES, LOAD);
155  CLIB_PREFETCH (&ring->desc[(rd->last_head + 4) % mask],
156  CLIB_CACHE_LINE_BYTES, LOAD);
157  CLIB_PREFETCH (&ring->desc[(rd->last_head + 5) % mask],
158  CLIB_CACHE_LINE_BYTES, LOAD);
159  }
160  /* get empty buffer */
161  u32 last_buf = vec_len (nm->rx_buffers[cpu_index]) - 1;
162  bi0 = nm->rx_buffers[cpu_index][last_buf];
163  bi1 = nm->rx_buffers[cpu_index][last_buf - 1];
164  _vec_len (nm->rx_buffers[cpu_index]) -= 2;
165 
166  if (last_buf > 4)
167  {
168  memif_prefetch (vm, nm->rx_buffers[cpu_index][last_buf - 2]);
169  memif_prefetch (vm, nm->rx_buffers[cpu_index][last_buf - 3]);
170  }
171 
172  /* enqueue buffer */
173  to_next[0] = bi0;
174  to_next[1] = bi1;
175  to_next += 2;
176  n_left_to_next -= 2;
177 
178  /* fill buffer metadata */
179  b0 = vlib_get_buffer (vm, bi0);
180  b1 = vlib_get_buffer (vm, bi1);
181 
182  vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
183  vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
184 
185  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
186  vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
187 
188  /* copy buffer */
189  mb0 = memif_get_buffer (mif, ring, rd->last_head);
192  b0->current_length = ring->desc[rd->last_head].length;
193  rd->last_head = (rd->last_head + 1) & mask;
194 
195  mb1 = memif_get_buffer (mif, ring, rd->last_head);
198  b1->current_length = ring->desc[rd->last_head].length;
199  rd->last_head = (rd->last_head + 1) & mask;
200 
203  mb0 + CLIB_CACHE_LINE_BYTES,
205 
208  mb1 + CLIB_CACHE_LINE_BYTES,
210 
211  /* trace */
214 
215  if (PREDICT_FALSE (n_trace > 0))
216  {
217  /* b0 */
219  vlib_trace_buffer (vm, node, next0, b0,
220  /* follow_chain */ 0);
221  vlib_set_trace_count (vm, node, --n_trace);
222  tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
223  tr->next_index = next0;
224  tr->hw_if_index = mif->hw_if_index;
225  tr->ring = rid;
226 
227  if (n_trace)
228  {
229  /* b1 */
231  vlib_trace_buffer (vm, node, next1, b1,
232  /* follow_chain */ 0);
233  vlib_set_trace_count (vm, node, --n_trace);
234  tr = vlib_add_trace (vm, node, b1, sizeof (*tr));
235  tr->next_index = next1;
236  tr->hw_if_index = mif->hw_if_index;
237  tr->ring = rid;
238  }
239  }
240 
241  /* redirect if feature path enabled */
243  &next0, &next1, b0, b1);
244 
245  /* enqueue */
246  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
247  n_left_to_next,
248  bi0, bi1, next0, next1);
249 
250  /* next packet */
251  num_slots -= 2;
252  n_rx_packets += 2;
253  n_rx_bytes += b0->current_length;
254  n_rx_bytes += b1->current_length;
255  }
256  while (num_slots && n_left_to_next)
257  {
258  /* get empty buffer */
259  u32 last_buf = vec_len (nm->rx_buffers[cpu_index]) - 1;
260  bi0 = nm->rx_buffers[cpu_index][last_buf];
261  _vec_len (nm->rx_buffers[cpu_index]) = last_buf;
262 
263  /* enqueue buffer */
264  to_next[0] = bi0;
265  to_next += 1;
266  n_left_to_next--;
267 
268  /* fill buffer metadata */
269  b0 = vlib_get_buffer (vm, bi0);
270  b0->current_length = ring->desc[rd->last_head].length;
271  vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
272  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
273 
274  /* copy buffer */
275  mb0 = memif_get_buffer (mif, ring, rd->last_head);
280  mb0 + CLIB_CACHE_LINE_BYTES,
282 
283  /* trace */
285 
286  if (PREDICT_FALSE (n_trace > 0))
287  {
289  vlib_trace_buffer (vm, node, next0, b0,
290  /* follow_chain */ 0);
291  vlib_set_trace_count (vm, node, --n_trace);
292  tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
293  tr->next_index = next0;
294  tr->hw_if_index = mif->hw_if_index;
295  tr->ring = rid;
296  }
297 
298 
299  /* redirect if feature path enabled */
301 
302  /* enqueue */
303  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
304  n_left_to_next, bi0, next0);
305 
306  /* next packet */
307  rd->last_head = (rd->last_head + 1) & mask;
308  num_slots--;
309  n_rx_packets++;
310  n_rx_bytes += b0->current_length;
311  }
312  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
313  }
315  ring->tail = head;
316 
318  + VNET_INTERFACE_COUNTER_RX, cpu_index,
319  mif->hw_if_index, n_rx_packets,
320  n_rx_bytes);
321 
322  return n_rx_packets;
323 }
324 
325 static uword
327  vlib_frame_t * frame)
328 {
329  u32 n_rx_packets = 0;
330  u32 cpu_index = os_get_cpu_number ();
331  memif_main_t *nm = &memif_main;
332  memif_if_t *mif;
333 
334  /* *INDENT-OFF* */
335  pool_foreach (mif, nm->interfaces,
336  ({
337  if (mif->flags & MEMIF_IF_FLAG_ADMIN_UP &&
338  mif->flags & MEMIF_IF_FLAG_CONNECTED &&
339  (mif->if_index % nm->input_cpu_count) ==
340  (cpu_index - nm->input_cpu_first_index))
341  {
342  if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
343  n_rx_packets +=
344  memif_device_input_inline (vm, node, frame, mif,
345  MEMIF_RING_M2S);
346  else
347  n_rx_packets +=
348  memif_device_input_inline (vm, node, frame, mif,
349  MEMIF_RING_S2M);
350  }
351  }));
352  /* *INDENT-ON* */
353 
354  return n_rx_packets;
355 }
356 
357 /* *INDENT-OFF* */
359  .function = memif_input_fn,
360  .name = "memif-input",
361  .sibling_of = "device-input",
362  .format_trace = format_memif_input_trace,
363  .type = VLIB_NODE_TYPE_INPUT,
364  .state = VLIB_NODE_STATE_INTERRUPT,
365  .n_errors = MEMIF_INPUT_N_ERROR,
366  .error_strings = memif_input_error_strings,
367 };
368 
370 /* *INDENT-ON* */
371 
372 
373 /*
374  * fd.io coding-style-patch-verification: ON
375  *
376  * Local Variables:
377  * eval: (c-set-style "gnu")
378  * End:
379  */
memif_if_t * interfaces
Definition: memif.h:143
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:436
memif_ring_type_t
Definition: memif.h:218
#define CLIB_UNUSED(x)
Definition: clib.h:79
memif_desc_t desc[0]
Definition: memif.h:61
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:143
static u8 * format_memif_input_trace(u8 *s, va_list *args)
Definition: node.c:56
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
vnet_interface_main_t interface_main
Definition: vnet.h:57
#define PREDICT_TRUE(x)
Definition: clib.h:98
static_always_inline void * memif_get_buffer(memif_if_t *mif, memif_ring_t *ring, u16 slot)
Definition: memif.h:240
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:104
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
u16 head
Definition: memif.h:59
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:418
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:104
#define static_always_inline
Definition: clib.h:85
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:376
static uword format_get_indent(u8 *s)
Definition: format.h:72
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:626
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
u8 log2_ring_size
Definition: memif.h:123
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:164
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u32 per_interface_next_index
Definition: memif.h:114
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:358
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:71
uword os_get_cpu_number(void)
Definition: unix-misc.c:224
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:188
#define PREDICT_FALSE(x)
Definition: clib.h:97
static uword memif_input_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:326
#define foreach_memif_input_error
Definition: node.c:32
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:350
static_always_inline void vnet_feature_start_device_input_x2(u32 sw_if_index, u32 *next0, u32 *next1, vlib_buffer_t *b0, vlib_buffer_t *b1)
Definition: feature.h:259
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
vlib_main_t * vm
Definition: buffer.c:276
#define clib_memcpy(a, b, c)
Definition: string.h:69
static_always_inline uword memif_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type)
Definition: node.c:79
u32 ** rx_buffers
Definition: memif.h:155
memif_input_error_t
Definition: node.c:34
unsigned int u32
Definition: types.h:88
static_always_inline void memif_prefetch(vlib_main_t *vm, u32 bi)
Definition: node.c:71
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
static char * memif_input_error_strings[]
Definition: node.c:42
memif_ring_data_t * ring_data
Definition: memif.h:128
u32 length
Definition: memif.h:49
u32 hw_if_index
Definition: memif.h:111
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
u8 num_s2m_rings
Definition: memif.h:124
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:484
static_always_inline memif_ring_t * memif_get_ring(memif_if_t *mif, memif_ring_type_t type, u16 ring_num)
Definition: memif.h:225
#define vnet_buffer(b)
Definition: buffer.h:294
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:227
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
u8 data[0]
Packet data.
Definition: buffer.h:152
u16 tail
Definition: memif.h:60
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:159
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
memif_main_t memif_main
Definition: memif.c:47
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:245
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
u32 sw_if_index
Definition: memif.h:112
VLIB_NODE_FUNCTION_MULTIARCH(ethernet_input_not_l2_node, ethernet_input_not_l2)
Definition: node.c:1170
Definition: defs.h:46