FD.io VPP  v20.05-21-gb1500e9ff
Vector Packet Processing
ip4_sv_reass.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * @file
18  * @brief IPv4 Shallow Virtual Reassembly.
19  *
20  * This file contains the source code for IPv4 Shallow Virtual reassembly.
21  */
22 
23 #include <vppinfra/vec.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ip/ip.h>
26 #include <vnet/ip/ip4_to_ip6.h>
27 #include <vppinfra/fifo.h>
28 #include <vppinfra/bihash_16_8.h>
30 
31 #define MSEC_PER_SEC 1000
32 #define IP4_SV_REASS_TIMEOUT_DEFAULT_MS 100
33 #define IP4_SV_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
34 #define IP4_SV_REASS_MAX_REASSEMBLIES_DEFAULT 1024
35 #define IP4_SV_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
36 #define IP4_SV_REASS_HT_LOAD_FACTOR (0.75)
37 
38 typedef enum
39 {
44 
45 typedef struct
46 {
47  union
48  {
49  struct
50  {
57  };
58  u64 as_u64[2];
59  };
61 
62 typedef union
63 {
64  struct
65  {
68  };
71 
72 typedef union
73 {
74  struct
75  {
78  };
81 
82 typedef struct
83 {
84  // hash table key
86  // time when last packet was received
88  // internal id of this reassembly
90  // trace operation counter
92  // minimum fragment length for this reassembly - used to estimate MTU
94  // buffer indexes of buffers in this reassembly in chronological order -
95  // including overlaps and duplicate fragments
97  // set to true when this reassembly is completed
99  // ip protocol
104  // l4 src port
106  // l4 dst port
109  // lru indexes
113 
114 typedef struct
115 {
120  // lru indexes
123 
125 
126 typedef struct
127 {
128  // IPv4 config
132  // maximum number of fragments in one reassembly
134  // maximum number of reassemblies
136 
137  // IPv4 runtime
138  clib_bihash_16_8_t hash;
139  // per-thread data
141 
142  // convenience
145 
146  // node index of ip4-drop node
149 
150  /** Worker handoff */
153 
154  // reference count for enabling/disabling feature - per interface
156 
157  // reference count for enabling/disabling feature - per interface
159 
161 
163 
164 #ifndef CLIB_MARCH_VARIANT
166 #endif /* CLIB_MARCH_VARIANT */
167 
168 typedef enum
169 {
175 
176 typedef enum
177 {
183 
184 typedef struct
185 {
186  ip4_sv_reass_trace_operation_e action;
193 
196 
197 static u8 *
198 format_ip4_sv_reass_trace (u8 * s, va_list * args)
199 {
200  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
201  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
202  ip4_sv_reass_trace_t *t = va_arg (*args, ip4_sv_reass_trace_t *);
203  if (REASS_PASSTHROUGH != t->action)
204  {
205  s = format (s, "reass id: %u, op id: %u ", t->reass_id, t->op_id);
206  }
207  switch (t->action)
208  {
210  s = format (s, "[cached]");
211  break;
212  case REASS_FINISH:
213  s =
214  format (s, "[finish, ip proto=%u, src_port=%u, dst_port=%u]",
215  t->ip_proto, clib_net_to_host_u16 (t->l4_src_port),
216  clib_net_to_host_u16 (t->l4_dst_port));
217  break;
219  s =
220  format (s, "[forward, ip proto=%u, src_port=%u, dst_port=%u]",
221  t->ip_proto, clib_net_to_host_u16 (t->l4_src_port),
222  clib_net_to_host_u16 (t->l4_dst_port));
223  break;
224  case REASS_PASSTHROUGH:
225  s = format (s, "[not-fragmented]");
226  break;
227  }
228  return s;
229 }
230 
231 static void
233  ip4_sv_reass_main_t * rm, ip4_sv_reass_t * reass,
234  u32 bi, ip4_sv_reass_trace_operation_e action,
235  u32 ip_proto, u16 l4_src_port, u16 l4_dst_port)
236 {
237  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
238  ip4_sv_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
239  if (reass)
240  {
241  t->reass_id = reass->id;
242  t->op_id = reass->trace_op_counter;
243  ++reass->trace_op_counter;
244  }
245  t->action = action;
246  t->ip_proto = ip_proto;
247  t->l4_src_port = l4_src_port;
248  t->l4_dst_port = l4_dst_port;
249 #if 0
250  static u8 *s = NULL;
251  s = format (s, "%U", format_ip4_sv_reass_trace, NULL, NULL, t);
252  printf ("%.*s\n", vec_len (s), s);
253  fflush (stdout);
254  vec_reset_length (s);
255 #endif
256 }
257 
258 
259 always_inline void
262 {
264  kv.key[0] = reass->key.as_u64[0];
265  kv.key[1] = reass->key.as_u64[1];
266  clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
267  vlib_buffer_free (vm, reass->cached_buffers,
268  vec_len (reass->cached_buffers));
269  vec_free (reass->cached_buffers);
270  reass->cached_buffers = NULL;
271  if (~0 != reass->lru_prev)
272  {
273  ip4_sv_reass_t *lru_prev =
274  pool_elt_at_index (rt->pool, reass->lru_prev);
275  lru_prev->lru_next = reass->lru_next;
276  }
277  if (~0 != reass->lru_next)
278  {
279  ip4_sv_reass_t *lru_next =
280  pool_elt_at_index (rt->pool, reass->lru_next);
281  lru_next->lru_prev = reass->lru_prev;
282  }
283  if (rt->lru_first == reass - rt->pool)
284  {
285  rt->lru_first = reass->lru_next;
286  }
287  if (rt->lru_last == reass - rt->pool)
288  {
289  rt->lru_last = reass->lru_prev;
290  }
291  pool_put (rt->pool, reass);
292  --rt->reass_n;
293 }
294 
295 always_inline void
297 {
298  reass->cached_buffers = NULL;
299  reass->is_complete = false;
300 }
301 
305  ip4_sv_reass_kv_t * kv, u8 * do_handoff)
306 {
307  ip4_sv_reass_t *reass = NULL;
308  f64 now = vlib_time_now (vm);
309 
310  if (!clib_bihash_search_16_8 (&rm->hash, &kv->kv, &kv->kv))
311  {
312  if (vm->thread_index != kv->v.thread_index)
313  {
314  *do_handoff = 1;
315  return NULL;
316  }
317  reass = pool_elt_at_index (rt->pool, kv->v.reass_index);
318 
319  if (now > reass->last_heard + rm->timeout)
320  {
321  ip4_sv_reass_free (vm, rm, rt, reass);
322  reass = NULL;
323  }
324  }
325 
326  if (reass)
327  {
328  reass->last_heard = now;
329  return reass;
330  }
331 
332  if (rt->reass_n >= rm->max_reass_n && rm->max_reass_n)
333  {
334  reass = pool_elt_at_index (rt->pool, rt->lru_last);
335  ip4_sv_reass_free (vm, rm, rt, reass);
336  }
337 
338  pool_get (rt->pool, reass);
339  clib_memset (reass, 0, sizeof (*reass));
340  reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
341  ++rt->id_counter;
342  ip4_sv_reass_init (reass);
343  ++rt->reass_n;
344  reass->lru_prev = reass->lru_next = ~0;
345 
346  if (~0 != rt->lru_last)
347  {
348  ip4_sv_reass_t *lru_last = pool_elt_at_index (rt->pool, rt->lru_last);
349  reass->lru_prev = rt->lru_last;
350  lru_last->lru_next = rt->lru_last = reass - rt->pool;
351  }
352 
353  if (~0 == rt->lru_first)
354  {
355  rt->lru_first = rt->lru_last = reass - rt->pool;
356  }
357 
358  reass->key.as_u64[0] = kv->kv.key[0];
359  reass->key.as_u64[1] = kv->kv.key[1];
360  kv->v.reass_index = (reass - rt->pool);
361  kv->v.thread_index = vm->thread_index;
362  reass->last_heard = now;
363 
364  if (clib_bihash_add_del_16_8 (&rm->hash, &kv->kv, 1))
365  {
366  ip4_sv_reass_free (vm, rm, rt, reass);
367  reass = NULL;
368  }
369 
370  return reass;
371 }
372 
373 always_inline ip4_sv_reass_rc_t
376  ip4_header_t * ip0, ip4_sv_reass_t * reass, u32 bi0)
377 {
378  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
379  ip4_sv_reass_rc_t rc = IP4_SV_REASS_RC_OK;
380  const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
381  if (0 == fragment_first)
382  {
383  reass->ip_proto = ip0->protocol;
384  reass->l4_src_port = ip4_get_port (ip0, 1);
385  reass->l4_dst_port = ip4_get_port (ip0, 0);
386  if (!reass->l4_src_port || !reass->l4_dst_port)
388  if (IP_PROTOCOL_TCP == reass->ip_proto)
389  {
390  reass->icmp_type_or_tcp_flags = ((tcp_header_t *) (ip0 + 1))->flags;
391  reass->tcp_ack_number = ((tcp_header_t *) (ip0 + 1))->ack_number;
392  reass->tcp_seq_number = ((tcp_header_t *) (ip0 + 1))->seq_number;
393  }
394  else if (IP_PROTOCOL_ICMP == reass->ip_proto)
395  {
396  reass->icmp_type_or_tcp_flags =
397  ((icmp46_header_t *) (ip0 + 1))->type;
398  }
399  reass->is_complete = true;
400  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
401  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
402  {
403  ip4_sv_reass_add_trace (vm, node, rm, reass, bi0, REASS_FINISH,
404  reass->ip_proto, reass->l4_src_port,
405  reass->l4_dst_port);
406  }
407  }
408  vec_add1 (reass->cached_buffers, bi0);
409  if (!reass->is_complete)
410  {
411  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
412  {
413  ip4_sv_reass_add_trace (vm, node, rm, reass, bi0,
414  REASS_FRAGMENT_CACHE, ~0, ~0, ~0);
415  }
416  if (vec_len (reass->cached_buffers) > rm->max_reass_len)
417  {
419  }
420  }
421  return rc;
422 }
423 
426  vlib_frame_t * frame, bool is_feature,
427  bool is_output_feature, bool is_custom)
428 {
429  u32 *from = vlib_frame_vector_args (frame);
430  u32 n_left_from, n_left_to_next, *to_next, next_index;
433  clib_spinlock_lock (&rt->lock);
434 
435  n_left_from = frame->n_vectors;
436  next_index = node->cached_next_index;
437 
438  while (n_left_from > 0)
439  {
440  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
441 
442  while (n_left_from > 0 && n_left_to_next > 0)
443  {
444  u32 bi0;
445  vlib_buffer_t *b0;
446  u32 next0;
447  u32 error0 = IP4_ERROR_NONE;
448 
449  bi0 = from[0];
450  b0 = vlib_get_buffer (vm, bi0);
451 
452  ip4_header_t *ip0 =
454  is_output_feature *
455  vnet_buffer (b0)->
456  ip.save_rewrite_length);
457  if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
458  {
459  // this is a regular packet - no fragmentation
460  if (is_custom)
461  {
462  next0 = vnet_buffer (b0)->ip.reass.next_index;
463  }
464  else
465  {
467  }
468  vnet_buffer (b0)->ip.reass.is_non_first_fragment = 0;
469  vnet_buffer (b0)->ip.reass.ip_proto = ip0->protocol;
470  if (IP_PROTOCOL_TCP == ip0->protocol)
471  {
472  vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
473  ((tcp_header_t *) (ip0 + 1))->flags;
474  vnet_buffer (b0)->ip.reass.tcp_ack_number =
475  ((tcp_header_t *) (ip0 + 1))->ack_number;
476  vnet_buffer (b0)->ip.reass.tcp_seq_number =
477  ((tcp_header_t *) (ip0 + 1))->seq_number;
478  }
479  else if (IP_PROTOCOL_ICMP == ip0->protocol)
480  {
481  vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
482  ((icmp46_header_t *) (ip0 + 1))->type;
483  }
484  vnet_buffer (b0)->ip.reass.l4_src_port = ip4_get_port (ip0, 1);
485  vnet_buffer (b0)->ip.reass.l4_dst_port = ip4_get_port (ip0, 0);
486  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
487  {
488  ip4_sv_reass_add_trace (vm, node, rm, NULL, bi0,
490  vnet_buffer (b0)->ip.reass.ip_proto,
491  vnet_buffer (b0)->ip.
492  reass.l4_src_port,
493  vnet_buffer (b0)->ip.
494  reass.l4_dst_port);
495  }
496  goto packet_enqueue;
497  }
498  const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
499  const u32 fragment_length =
500  clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
501  const u32 fragment_last = fragment_first + fragment_length - 1;
502  if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0))) // 8 is minimum frag length per RFC 791
503  {
505  error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
506  b0->error = node->errors[error0];
507  goto packet_enqueue;
508  }
510  u8 do_handoff = 0;
511 
512  kv.k.as_u64[0] =
514  vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
515  (u64) ip0->src_address.as_u32 << 32;
516  kv.k.as_u64[1] =
517  (u64) ip0->dst_address.
518  as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
519 
520  ip4_sv_reass_t *reass =
521  ip4_sv_reass_find_or_create (vm, rm, rt, &kv, &do_handoff);
522 
523  if (PREDICT_FALSE (do_handoff))
524  {
526  vnet_buffer (b0)->ip.reass.owner_thread_index =
527  kv.v.thread_index;
528  goto packet_enqueue;
529  }
530 
531  if (!reass)
532  {
534  error0 = IP4_ERROR_REASS_LIMIT_REACHED;
535  b0->error = node->errors[error0];
536  goto packet_enqueue;
537  }
538 
539  if (reass->is_complete)
540  {
541  if (is_custom)
542  {
543  next0 = vnet_buffer (b0)->ip.reass.next_index;
544  }
545  else
546  {
548  }
549  vnet_buffer (b0)->ip.reass.is_non_first_fragment =
550  ! !fragment_first;
551  vnet_buffer (b0)->ip.reass.ip_proto = reass->ip_proto;
552  vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
553  reass->icmp_type_or_tcp_flags;
554  vnet_buffer (b0)->ip.reass.tcp_ack_number =
555  reass->tcp_ack_number;
556  vnet_buffer (b0)->ip.reass.tcp_seq_number =
557  reass->tcp_seq_number;
558  vnet_buffer (b0)->ip.reass.l4_src_port = reass->l4_src_port;
559  vnet_buffer (b0)->ip.reass.l4_dst_port = reass->l4_dst_port;
560  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
561  {
562  ip4_sv_reass_add_trace (vm, node, rm, reass, bi0,
564  reass->ip_proto,
565  reass->l4_src_port,
566  reass->l4_dst_port);
567  }
568  goto packet_enqueue;
569  }
570 
571  ip4_sv_reass_rc_t rc =
572  ip4_sv_reass_update (vm, node, rm, rt, ip0, reass, bi0);
573  switch (rc)
574  {
575  case IP4_SV_REASS_RC_OK:
576  /* nothing to do here */
577  break;
580  IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
581  1);
582  ip4_sv_reass_free (vm, rm, rt, reass);
583  goto next_packet;
584  break;
587  IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
588  1);
589  ip4_sv_reass_free (vm, rm, rt, reass);
590  goto next_packet;
591  break;
592  }
593  if (reass->is_complete)
594  {
595  u32 idx;
596  vec_foreach_index (idx, reass->cached_buffers)
597  {
598  u32 bi0 = vec_elt (reass->cached_buffers, idx);
599  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
601  if (is_feature)
602  {
603  vnet_feature_next (&next0, b0);
604  }
605  if (is_custom)
606  {
607  next0 = vnet_buffer (b0)->ip.reass.next_index;
608  }
609  if (0 == n_left_to_next)
610  {
611  vlib_put_next_frame (vm, node, next_index,
612  n_left_to_next);
613  vlib_get_next_frame (vm, node, next_index, to_next,
614  n_left_to_next);
615  }
616  to_next[0] = bi0;
617  to_next += 1;
618  n_left_to_next -= 1;
619  vnet_buffer (b0)->ip.reass.is_non_first_fragment =
621  vnet_buffer (b0)->ip.reass.ip_proto = reass->ip_proto;
622  vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
623  reass->icmp_type_or_tcp_flags;
624  vnet_buffer (b0)->ip.reass.tcp_ack_number =
625  reass->tcp_ack_number;
626  vnet_buffer (b0)->ip.reass.tcp_seq_number =
627  reass->tcp_seq_number;
628  vnet_buffer (b0)->ip.reass.l4_src_port = reass->l4_src_port;
629  vnet_buffer (b0)->ip.reass.l4_dst_port = reass->l4_dst_port;
630  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
631  {
632  ip4_sv_reass_add_trace (vm, node, rm, reass, bi0,
634  reass->ip_proto,
635  reass->l4_src_port,
636  reass->l4_dst_port);
637  }
638  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
639  to_next, n_left_to_next, bi0,
640  next0);
641  }
642  _vec_len (reass->cached_buffers) = 0; // buffers are owned by frame now
643  }
644  goto next_packet;
645 
646  packet_enqueue:
647  to_next[0] = bi0;
648  to_next += 1;
649  n_left_to_next -= 1;
650  if (is_feature && IP4_ERROR_NONE == error0)
651  {
652  b0 = vlib_get_buffer (vm, bi0);
653  vnet_feature_next (&next0, b0);
654  }
655  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
656  to_next, n_left_to_next,
657  bi0, next0);
658 
659  next_packet:
660  from += 1;
661  n_left_from -= 1;
662  }
663 
664  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
665  }
666 
667  clib_spinlock_unlock (&rt->lock);
668  return frame->n_vectors;
669 }
670 
671 static char *ip4_sv_reass_error_strings[] = {
672 #define _(sym, string) string,
674 #undef _
675 };
676 
677 VLIB_NODE_FN (ip4_sv_reass_node) (vlib_main_t * vm,
680 {
681  return ip4_sv_reass_inline (vm, node, frame, false /* is_feature */ ,
682  false /* is_output_feature */ ,
683  false /* is_custom */ );
684 }
685 
686 /* *INDENT-OFF* */
687 VLIB_REGISTER_NODE (ip4_sv_reass_node) = {
688  .name = "ip4-sv-reassembly",
689  .vector_size = sizeof (u32),
690  .format_trace = format_ip4_sv_reass_trace,
691  .n_errors = ARRAY_LEN (ip4_sv_reass_error_strings),
692  .error_strings = ip4_sv_reass_error_strings,
693  .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
694  .next_nodes =
695  {
696  [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
697  [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
698  [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reassembly-handoff",
699 
700  },
701 };
702 /* *INDENT-ON* */
703 
704 VLIB_NODE_FN (ip4_sv_reass_node_feature) (vlib_main_t * vm,
707 {
708  return ip4_sv_reass_inline (vm, node, frame, true /* is_feature */ ,
709  false /* is_output_feature */ ,
710  false /* is_custom */ );
711 }
712 
713 /* *INDENT-OFF* */
714 VLIB_REGISTER_NODE (ip4_sv_reass_node_feature) = {
715  .name = "ip4-sv-reassembly-feature",
716  .vector_size = sizeof (u32),
717  .format_trace = format_ip4_sv_reass_trace,
718  .n_errors = ARRAY_LEN (ip4_sv_reass_error_strings),
719  .error_strings = ip4_sv_reass_error_strings,
720  .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
721  .next_nodes =
722  {
723  [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
724  [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
725  [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reass-feature-hoff",
726  },
727 };
728 /* *INDENT-ON* */
729 
730 /* *INDENT-OFF* */
731 VNET_FEATURE_INIT (ip4_sv_reass_feature) = {
732  .arc_name = "ip4-unicast",
733  .node_name = "ip4-sv-reassembly-feature",
734  .runs_before = VNET_FEATURES ("ip4-lookup"),
735  .runs_after = 0,
736 };
737 /* *INDENT-ON* */
738 
742 {
743  return ip4_sv_reass_inline (vm, node, frame, true /* is_feature */ ,
744  true /* is_output_feature */ ,
745  false /* is_custom */ );
746 }
747 
748 
749 /* *INDENT-OFF* */
751  .name = "ip4-sv-reassembly-output-feature",
752  .vector_size = sizeof (u32),
753  .format_trace = format_ip4_sv_reass_trace,
754  .n_errors = ARRAY_LEN (ip4_sv_reass_error_strings),
755  .error_strings = ip4_sv_reass_error_strings,
756  .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
757  .next_nodes =
758  {
759  [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
760  [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
761  [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reass-feature-hoff",
762  },
763 };
764 /* *INDENT-ON* */
765 
766 /* *INDENT-OFF* */
767 VNET_FEATURE_INIT (ip4_sv_reass_output_feature) = {
768  .arc_name = "ip4-output",
769  .node_name = "ip4-sv-reassembly-output-feature",
770  .runs_before = 0,
771  .runs_after = 0,
772 };
773 /* *INDENT-ON* */
774 
775 /* *INDENT-OFF* */
777  .name = "ip4-sv-reassembly-custom-next",
778  .vector_size = sizeof (u32),
779  .format_trace = format_ip4_sv_reass_trace,
780  .n_errors = ARRAY_LEN (ip4_sv_reass_error_strings),
781  .error_strings = ip4_sv_reass_error_strings,
782  .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
783  .next_nodes =
784  {
785  [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
786  [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
787  [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reassembly-handoff",
788 
789  },
790 };
791 /* *INDENT-ON* */
792 
796 {
797  return ip4_sv_reass_inline (vm, node, frame, false /* is_feature */ ,
798  false /* is_output_feature */ ,
799  true /* is_custom */ );
800 }
801 
802 #ifndef CLIB_MARCH_VARIANT
805 {
807  u32 nbuckets;
808  u8 i;
809 
810  nbuckets = (u32) (rm->max_reass_n / IP4_SV_REASS_HT_LOAD_FACTOR);
811 
812  for (i = 0; i < 31; i++)
813  if ((1 << i) >= nbuckets)
814  break;
815  nbuckets = 1 << i;
816 
817  return nbuckets;
818 }
819 #endif /* CLIB_MARCH_VARIANT */
820 
821 typedef enum
822 {
825 
826 typedef struct
827 {
828  int failure;
829  clib_bihash_16_8_t *new_hash;
831 
832 #ifndef CLIB_MARCH_VARIANT
833 static int
835 {
836  ip4_rehash_cb_ctx *ctx = _ctx;
837  if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
838  {
839  ctx->failure = 1;
840  }
841  return (BIHASH_WALK_CONTINUE);
842 }
843 
844 static void
845 ip4_sv_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
846  u32 max_reassembly_length,
847  u32 expire_walk_interval_ms)
848 {
849  ip4_sv_reass_main.timeout_ms = timeout_ms;
850  ip4_sv_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
851  ip4_sv_reass_main.max_reass_n = max_reassemblies;
852  ip4_sv_reass_main.max_reass_len = max_reassembly_length;
853  ip4_sv_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
854 }
855 
857 ip4_sv_reass_set (u32 timeout_ms, u32 max_reassemblies,
858  u32 max_reassembly_length, u32 expire_walk_interval_ms)
859 {
860  u32 old_nbuckets = ip4_sv_reass_get_nbuckets ();
861  ip4_sv_reass_set_params (timeout_ms, max_reassemblies,
862  max_reassembly_length, expire_walk_interval_ms);
863  vlib_process_signal_event (ip4_sv_reass_main.vlib_main,
864  ip4_sv_reass_main.ip4_sv_reass_expire_node_idx,
866  u32 new_nbuckets = ip4_sv_reass_get_nbuckets ();
867  if (ip4_sv_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
868  {
869  clib_bihash_16_8_t new_hash;
870  clib_memset (&new_hash, 0, sizeof (new_hash));
872  ctx.failure = 0;
873  ctx.new_hash = &new_hash;
874  clib_bihash_init_16_8 (&new_hash, "ip4-dr", new_nbuckets,
875  new_nbuckets * 1024);
876  clib_bihash_foreach_key_value_pair_16_8 (&ip4_sv_reass_main.hash,
877  ip4_rehash_cb, &ctx);
878  if (ctx.failure)
879  {
880  clib_bihash_free_16_8 (&new_hash);
881  return -1;
882  }
883  else
884  {
885  clib_bihash_free_16_8 (&ip4_sv_reass_main.hash);
886  clib_memcpy_fast (&ip4_sv_reass_main.hash, &new_hash,
887  sizeof (ip4_sv_reass_main.hash));
888  clib_bihash_copied (&ip4_sv_reass_main.hash, &new_hash);
889  }
890  }
891  return 0;
892 }
893 
895 ip4_sv_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
896  u32 * max_reassembly_length, u32 * expire_walk_interval_ms)
897 {
898  *timeout_ms = ip4_sv_reass_main.timeout_ms;
899  *max_reassemblies = ip4_sv_reass_main.max_reass_n;
900  *max_reassembly_length = ip4_sv_reass_main.max_reass_len;
901  *expire_walk_interval_ms = ip4_sv_reass_main.expire_walk_interval_ms;
902  return 0;
903 }
904 
905 static clib_error_t *
907 {
909  clib_error_t *error = 0;
910  u32 nbuckets;
911  vlib_node_t *node;
912 
913  rm->vlib_main = vm;
914  rm->vnet_main = vnet_get_main ();
915 
918  vec_foreach (rt, rm->per_thread_data)
919  {
920  clib_spinlock_init (&rt->lock);
921  pool_alloc (rt->pool, rm->max_reass_n);
922  rt->lru_first = rt->lru_last = ~0;
923  }
924 
925  node = vlib_get_node_by_name (vm, (u8 *) "ip4-sv-reassembly-expire-walk");
926  ASSERT (node);
928 
933 
934  nbuckets = ip4_sv_reass_get_nbuckets ();
935  clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
936 
937  node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
938  ASSERT (node);
939  rm->ip4_drop_idx = node->index;
940 
941  rm->fq_index = vlib_frame_queue_main_init (ip4_sv_reass_node.index, 0);
942  rm->fq_feature_index =
943  vlib_frame_queue_main_init (ip4_sv_reass_node_feature.index, 0);
944 
947 
948  return error;
949 }
950 
952 #endif /* CLIB_MARCH_VARIANT */
953 
954 static uword
956  vlib_node_runtime_t * node, vlib_frame_t * f)
957 {
959  uword event_type, *event_data = 0;
960 
961  while (true)
962  {
964  (f64)
966  (f64) MSEC_PER_SEC);
967  event_type = vlib_process_get_events (vm, &event_data);
968 
969  switch (event_type)
970  {
971  case ~0: /* no events => timeout */
972  /* nothing to do here */
973  break;
975  break;
976  default:
977  clib_warning ("BUG: event type 0x%wx", event_type);
978  break;
979  }
980  f64 now = vlib_time_now (vm);
981 
982  ip4_sv_reass_t *reass;
983  int *pool_indexes_to_free = NULL;
984 
985  uword thread_index = 0;
986  int index;
987  const uword nthreads = vlib_num_workers () + 1;
988  for (thread_index = 0; thread_index < nthreads; ++thread_index)
989  {
990  ip4_sv_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
991  clib_spinlock_lock (&rt->lock);
992 
993  vec_reset_length (pool_indexes_to_free);
994  /* *INDENT-OFF* */
995  pool_foreach_index (index, rt->pool, ({
996  reass = pool_elt_at_index (rt->pool, index);
997  if (now > reass->last_heard + rm->timeout)
998  {
999  vec_add1 (pool_indexes_to_free, index);
1000  }
1001  }));
1002  /* *INDENT-ON* */
1003  int *i;
1004  /* *INDENT-OFF* */
1005  vec_foreach (i, pool_indexes_to_free)
1006  {
1007  ip4_sv_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1008  ip4_sv_reass_free (vm, rm, rt, reass);
1009  }
1010  /* *INDENT-ON* */
1011 
1012  clib_spinlock_unlock (&rt->lock);
1013  }
1014 
1015  vec_free (pool_indexes_to_free);
1016  if (event_data)
1017  {
1018  _vec_len (event_data) = 0;
1019  }
1020  }
1021 
1022  return 0;
1023 }
1024 
1025 /* *INDENT-OFF* */
1027  .function = ip4_sv_reass_walk_expired,
1028  .type = VLIB_NODE_TYPE_PROCESS,
1029  .name = "ip4-sv-reassembly-expire-walk",
1030  .format_trace = format_ip4_sv_reass_trace,
1031  .n_errors = ARRAY_LEN (ip4_sv_reass_error_strings),
1032  .error_strings = ip4_sv_reass_error_strings,
1033 
1034 };
1035 /* *INDENT-ON* */
1036 
1037 static u8 *
1038 format_ip4_sv_reass_key (u8 * s, va_list * args)
1039 {
1040  ip4_sv_reass_key_t *key = va_arg (*args, ip4_sv_reass_key_t *);
1041  s =
1042  format (s,
1043  "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1045  &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
1046  return s;
1047 }
1048 
1049 static u8 *
1050 format_ip4_sv_reass (u8 * s, va_list * args)
1051 {
1052  vlib_main_t *vm = va_arg (*args, vlib_main_t *);
1053  ip4_sv_reass_t *reass = va_arg (*args, ip4_sv_reass_t *);
1054 
1055  s = format (s, "ID: %lu, key: %U trace_op_counter: %u\n",
1056  reass->id, format_ip4_sv_reass_key, &reass->key,
1057  reass->trace_op_counter);
1058 
1059  vlib_buffer_t *b;
1060  u32 *bip;
1061  u32 counter = 0;
1062  vec_foreach (bip, reass->cached_buffers)
1063  {
1064  u32 bi = *bip;
1065  do
1066  {
1067  b = vlib_get_buffer (vm, bi);
1068  s = format (s, " #%03u: bi: %u, ", counter, bi);
1069  ++counter;
1070  bi = b->next_buffer;
1071  }
1072  while (b->flags & VLIB_BUFFER_NEXT_PRESENT);
1073  }
1074  return s;
1075 }
1076 
1077 static clib_error_t *
1079  unformat_input_t * input,
1081 {
1083 
1084  vlib_cli_output (vm, "---------------------");
1085  vlib_cli_output (vm, "IP4 reassembly status");
1086  vlib_cli_output (vm, "---------------------");
1087  bool details = false;
1088  if (unformat (input, "details"))
1089  {
1090  details = true;
1091  }
1092 
1093  u32 sum_reass_n = 0;
1094  ip4_sv_reass_t *reass;
1095  uword thread_index;
1096  const uword nthreads = vlib_num_workers () + 1;
1097  for (thread_index = 0; thread_index < nthreads; ++thread_index)
1098  {
1099  ip4_sv_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1100  clib_spinlock_lock (&rt->lock);
1101  if (details)
1102  {
1103  /* *INDENT-OFF* */
1104  pool_foreach (reass, rt->pool, {
1105  vlib_cli_output (vm, "%U", format_ip4_sv_reass, vm, reass);
1106  });
1107  /* *INDENT-ON* */
1108  }
1109  sum_reass_n += rt->reass_n;
1110  clib_spinlock_unlock (&rt->lock);
1111  }
1112  vlib_cli_output (vm, "---------------------");
1113  vlib_cli_output (vm, "Current IP4 reassemblies count: %lu\n",
1114  (long unsigned) sum_reass_n);
1115  vlib_cli_output (vm,
1116  "Maximum configured concurrent shallow virtual IP4 reassemblies per worker-thread: %lu\n",
1117  (long unsigned) rm->max_reass_n);
1118  vlib_cli_output (vm,
1119  "Maximum configured shallow virtual IP4 reassembly timeout: %lums\n",
1120  (long unsigned) rm->timeout_ms);
1121  vlib_cli_output (vm,
1122  "Maximum configured shallow virtual IP4 reassembly expire walk interval: %lums\n",
1123  (long unsigned) rm->expire_walk_interval_ms);
1124  return 0;
1125 }
1126 
1127 /* *INDENT-OFF* */
1129  .path = "show ip4-sv-reassembly",
1130  .short_help = "show ip4-sv-reassembly [details]",
1131  .function = show_ip4_reass,
1132 };
1133 /* *INDENT-ON* */
1134 
1135 #ifndef CLIB_MARCH_VARIANT
1138 {
1139  return ip4_sv_reass_enable_disable_with_refcnt (sw_if_index,
1140  enable_disable);
1141 }
1142 #endif /* CLIB_MARCH_VARIANT */
1143 
1144 
1145 #define foreach_ip4_sv_reass_handoff_error \
1146 _(CONGESTION_DROP, "congestion drop")
1147 
1148 
1149 typedef enum
1150 {
1151 #define _(sym,str) IP4_SV_REASSEMBLY_HANDOFF_ERROR_##sym,
1153 #undef _
1156 
1158 #define _(sym,string) string,
1160 #undef _
1161 };
1162 
1163 typedef struct
1164 {
1167 
1168 static u8 *
1170 {
1171  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1172  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1174  va_arg (*args, ip4_sv_reass_handoff_trace_t *);
1175 
1176  s =
1177  format (s, "ip4-sv-reassembly-handoff: next-worker %d",
1178  t->next_worker_index);
1179 
1180  return s;
1181 }
1182 
1186  vlib_frame_t * frame, bool is_feature)
1187 {
1189 
1190  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1191  u32 n_enq, n_left_from, *from;
1192  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1193  u32 fq_index;
1194 
1195  from = vlib_frame_vector_args (frame);
1196  n_left_from = frame->n_vectors;
1197  vlib_get_buffers (vm, from, bufs, n_left_from);
1198 
1199  b = bufs;
1200  ti = thread_indices;
1201 
1202  fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index;
1203 
1204  while (n_left_from > 0)
1205  {
1206  ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
1207 
1208  if (PREDICT_FALSE
1209  ((node->flags & VLIB_NODE_FLAG_TRACE)
1210  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1211  {
1213  vlib_add_trace (vm, node, b[0], sizeof (*t));
1214  t->next_worker_index = ti[0];
1215  }
1216 
1217  n_left_from -= 1;
1218  ti += 1;
1219  b += 1;
1220  }
1221  n_enq =
1222  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1223  frame->n_vectors, 1);
1224 
1225  if (n_enq < frame->n_vectors)
1227  IP4_SV_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
1228  frame->n_vectors - n_enq);
1229  return frame->n_vectors;
1230 }
1231 
1234  vlib_frame_t * frame)
1235 {
1236  return ip4_sv_reass_handoff_node_inline (vm, node, frame,
1237  false /* is_feature */ );
1238 }
1239 
1240 
1241 /* *INDENT-OFF* */
1243  .name = "ip4-sv-reassembly-handoff",
1244  .vector_size = sizeof (u32),
1245  .n_errors = ARRAY_LEN(ip4_sv_reass_handoff_error_strings),
1246  .error_strings = ip4_sv_reass_handoff_error_strings,
1247  .format_trace = format_ip4_sv_reass_handoff_trace,
1248 
1249  .n_next_nodes = 1,
1250 
1251  .next_nodes = {
1252  [0] = "error-drop",
1253  },
1254 };
1255 /* *INDENT-ON* */
1256 
1257 
1258 /* *INDENT-OFF* */
1261  node,
1262  vlib_frame_t * frame)
1263 {
1264  return ip4_sv_reass_handoff_node_inline (vm, node, frame,
1265  true /* is_feature */ );
1266 }
1267 /* *INDENT-ON* */
1268 
1269 
1270 /* *INDENT-OFF* */
1272  .name = "ip4-sv-reass-feature-hoff",
1273  .vector_size = sizeof (u32),
1274  .n_errors = ARRAY_LEN(ip4_sv_reass_handoff_error_strings),
1275  .error_strings = ip4_sv_reass_handoff_error_strings,
1276  .format_trace = format_ip4_sv_reass_handoff_trace,
1277 
1278  .n_next_nodes = 1,
1279 
1280  .next_nodes = {
1281  [0] = "error-drop",
1282  },
1283 };
1284 /* *INDENT-ON* */
1285 
1286 #ifndef CLIB_MARCH_VARIANT
1287 int
1289 {
1291  vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
1292  if (is_enable)
1293  {
1294  if (!rm->feature_use_refcount_per_intf[sw_if_index])
1295  {
1297  return vnet_feature_enable_disable ("ip4-unicast",
1298  "ip4-sv-reassembly-feature",
1299  sw_if_index, 1, 0, 0);
1300  }
1302  }
1303  else
1304  {
1305  if (rm->feature_use_refcount_per_intf[sw_if_index])
1307  if (!rm->feature_use_refcount_per_intf[sw_if_index])
1308  return vnet_feature_enable_disable ("ip4-unicast",
1309  "ip4-sv-reassembly-feature",
1310  sw_if_index, 0, 0, 0);
1311  }
1312  return 0;
1313 }
1314 
1315 uword
1317 {
1319  node_index);
1320 }
1321 
1322 int
1324  int is_enable)
1325 {
1328  if (is_enable)
1329  {
1330  if (!rm->output_feature_use_refcount_per_intf[sw_if_index])
1331  {
1333  return vnet_feature_enable_disable ("ip4-output",
1334  "ip4-sv-reassembly-output-feature",
1335  sw_if_index, 1, 0, 0);
1336  }
1338  }
1339  else
1340  {
1341  if (rm->output_feature_use_refcount_per_intf[sw_if_index])
1343  if (!rm->output_feature_use_refcount_per_intf[sw_if_index])
1344  return vnet_feature_enable_disable ("ip4-output",
1345  "ip4-sv-reassembly-output-feature",
1346  sw_if_index, 0, 0, 0);
1347  }
1348  return 0;
1349 }
1350 #endif
1351 
1352 /*
1353  * fd.io coding-style-patch-verification: ON
1354  *
1355  * Local Variables:
1356  * eval: (c-set-style "gnu")
1357  * End:
1358  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:507
ip4_sv_reass_t * pool
Definition: ip4_sv_reass.c:116
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
vnet_main_t * vnet_main
Definition: ip4_sv_reass.c:144
#define IP4_SV_REASS_TIMEOUT_DEFAULT_MS
Definition: ip4_sv_reass.c:32
#define vec_foreach_index(var, v)
Iterate over vector indices.
vnet_api_error_t
Definition: api_errno.h:160
ip4_sv_reass_rc_t
Definition: ip4_sv_reass.c:38
static void ip4_sv_reass_set_params(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
Definition: ip4_sv_reass.c:845
#define IP4_SV_REASS_HT_LOAD_FACTOR
Definition: ip4_sv_reass.c:36
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:102
static vlib_cli_command_t show_ip4_sv_reass_cmd
(constructor) VLIB_CLI_COMMAND (show_ip4_sv_reass_cmd)
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:80
#define CLIB_UNUSED(x)
Definition: clib.h:86
clib_bihash_kv_16_8_t kv
Definition: ip4_sv_reass.c:79
static char * ip4_sv_reass_handoff_error_strings[]
u16 min_fragment_length
Definition: ip4_sv_reass.c:93
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:673
static uword ip4_sv_reass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature, bool is_output_feature, bool is_custom)
Definition: ip4_sv_reass.c:425
int ip4_sv_reass_enable_disable_with_refcnt(u32 sw_if_index, int is_enable)
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:937
ip4_address_t src_address
Definition: ip4_packet.h:170
static ip4_sv_reass_rc_t ip4_sv_reass_update(vlib_main_t *vm, vlib_node_runtime_t *node, ip4_sv_reass_main_t *rm, ip4_sv_reass_per_thread_t *rt, ip4_header_t *ip0, ip4_sv_reass_t *reass, u32 bi0)
Definition: ip4_sv_reass.c:374
#define IP4_SV_REASS_MAX_REASSEMBLIES_DEFAULT
Definition: ip4_sv_reass.c:34
vlib_node_registration_t ip4_sv_reass_custom_node
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_custom_node)
Definition: ip4_sv_reass.c:776
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
static u8 * format_ip4_sv_reass_key(u8 *s, va_list *args)
vlib_main_t * vlib_main
Definition: ip4_sv_reass.c:143
#define pool_alloc(P, N)
Allocate N more free elements to pool (unspecified alignment).
Definition: pool.h:361
u64 as_u64
Definition: bihash_doc.h:63
u32 * feature_use_refcount_per_intf
Definition: ip4_sv_reass.c:155
unsigned long u64
Definition: types.h:89
vnet_api_error_t ip4_sv_reass_get(u32 *timeout_ms, u32 *max_reassemblies, u32 *max_reassembly_length, u32 *expire_walk_interval_ms)
get ip4 reassembly configuration
Definition: ip4_sv_reass.c:895
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
vlib_node_registration_t ip4_sv_reass_node_feature
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_node_feature)
Definition: ip4_sv_reass.c:714
static void ip4_sv_reass_init(ip4_sv_reass_t *reass)
Definition: ip4_sv_reass.c:296
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 index
Definition: node.h:282
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:291
static u32 ip4_sv_reass_get_nbuckets()
Definition: ip4_sv_reass.c:804
u32 * output_feature_use_refcount_per_intf
Definition: ip4_sv_reass.c:158
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1854
u32 thread_index
Definition: main.h:218
int ip4_sv_reass_output_enable_disable_with_refcnt(u32 sw_if_index, int is_enable)
static u8 * format_ip4_sv_reass_trace(u8 *s, va_list *args)
Definition: ip4_sv_reass.c:198
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:590
#define IP4_SV_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS
Definition: ip4_sv_reass.c:33
u32 * fib_index_by_sw_if_index
Table index indexed by software interface.
Definition: ip4.h:122
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
vnet_api_error_t ip4_sv_reass_enable_disable(u32 sw_if_index, u8 enable_disable)
#define VLIB_NODE_FN(node)
Definition: node.h:202
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:472
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:252
struct _tcp_header tcp_header_t
static void ip4_sv_reass_add_trace(vlib_main_t *vm, vlib_node_runtime_t *node, ip4_sv_reass_main_t *rm, ip4_sv_reass_t *reass, u32 bi, ip4_sv_reass_trace_operation_e action, u32 ip_proto, u16 l4_src_port, u16 l4_dst_port)
Definition: ip4_sv_reass.c:232
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1092
unsigned char u8
Definition: types.h:56
static int ip4_get_fragment_offset_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:229
IPv4 to IPv6 translation.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
ip4_sv_reass_main_t ip4_sv_reass_main
Definition: ip4_sv_reass.c:165
ip4_address_t src
Definition: ip4_sv_reass.c:52
#define u8_ptr_add(ptr, index)
Definition: ip_types.h:43
format_function_t format_ip4_address
Definition: format.h:73
#define foreach_ip4_sv_reass_handoff_error
static ip4_sv_reass_t * ip4_sv_reass_find_or_create(vlib_main_t *vm, ip4_sv_reass_main_t *rm, ip4_sv_reass_per_thread_t *rt, ip4_sv_reass_kv_t *kv, u8 *do_handoff)
Definition: ip4_sv_reass.c:303
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:513
vl_api_interface_index_t sw_if_index
Definition: gre.api:53
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:516
static u16 ip4_get_fragment_more(const ip4_header_t *i)
Definition: ip4_packet.h:206
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_node_registration_t ip4_sv_reass_feature_handoff_node
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_feature_handoff_node)
clib_bihash_16_8_t * new_hash
ip4_sv_reass_trace_operation_e action
Definition: ip4_sv_reass.c:186
ip4_sv_reass_next_t
Definition: ip4_sv_reass.c:168
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:380
ip4_sv_reass_handoff_error_t
static char * ip4_sv_reass_error_strings[]
Definition: ip4_sv_reass.c:671
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:63
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
vlib_node_registration_t ip4_sv_reass_node
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_node)
Definition: ip4_sv_reass.c:687
vlib_node_registration_t ip4_sv_reass_expire_node
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_expire_node)
vnet_api_error_t ip4_sv_reass_set(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
set ip4 reassembly configuration
Definition: ip4_sv_reass.c:857
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
vlib_node_t * vlib_get_node_by_name(vlib_main_t *vm, u8 *name)
Definition: node.c:45
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:934
#define IP4_SV_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT
Definition: ip4_sv_reass.c:35
IPv4 shallow virtual reassembly.
long ctx[MAX_CONNS]
Definition: main.c:144
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:302
ip4_sv_reass_trace_operation_e
Definition: ip4_sv_reass.c:176
u32 fq_index
Worker handoff.
Definition: ip4_sv_reass.c:151
ip4_sv_reass_key_t k
Definition: ip4_sv_reass.c:76
#define PREDICT_FALSE(x)
Definition: clib.h:118
#define always_inline
Definition: ipsec.h:28
u32 node_index
Node index.
Definition: node.h:498
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
vlib_main_t * vm
Definition: in2out_ed.c:1599
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
#define foreach_ip4_error
Definition: ip4_error.h:43
static uword ip4_sv_reass_handoff_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature)
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 flags
Definition: vhost_user.h:248
u16 n_vectors
Definition: node.h:399
clib_bihash_16_8_t hash
Definition: ip4_sv_reass.c:138
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static u16 ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:200
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
ip4_sv_reass_event_t
Definition: ip4_sv_reass.c:821
#define clib_warning(format, args...)
Definition: error.h:59
static clib_error_t * show_ip4_reass(vlib_main_t *vm, unformat_input_t *input, CLIB_UNUSED(vlib_cli_command_t *lmd))
#define ARRAY_LEN(x)
Definition: clib.h:66
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1599
ip_proto
Definition: ip_types.api:64
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:152
VNET_FEATURE_INIT(ip4_sv_reass_feature)
static uword ip4_sv_reass_walk_expired(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ip4_sv_reass.c:955
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:517
#define ASSERT(truth)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:689
ip4_sv_reass_val_t v
Definition: ip4_sv_reass.c:77
#define VNET_FEATURES(...)
Definition: feature.h:470
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
ip4_address_t dst
Definition: ip4_sv_reass.c:53
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_elt(v, i)
Get vector value at index i.
void clib_bihash_copied(void *dst, void *src)
typedef key
Definition: ipsec_types.api:85
struct _vlib_node_registration vlib_node_registration_t
vl_api_address_t ip
Definition: l2.api:501
vl_api_mac_event_action_t action
Definition: l2.api:181
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
#define MSEC_PER_SEC
Definition: ip4_sv_reass.c:31
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
u32 * cached_buffers
Definition: ip4_sv_reass.c:96
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:494
ip4_sv_reass_per_thread_t * per_thread_data
Definition: ip4_sv_reass.c:140
#define vnet_buffer(b)
Definition: buffer.h:417
static u8 * format_ip4_sv_reass(u8 *s, va_list *args)
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:1144
vlib_node_registration_t ip4_sv_reass_handoff_node
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_handoff_node)
static u32 vlib_num_workers()
Definition: threads.h:376
#define vec_foreach(var, vec)
Vector iterator.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
u16 flags
Copy of main node flags.
Definition: node.h:511
static void ip4_sv_reass_free(vlib_main_t *vm, ip4_sv_reass_main_t *rm, ip4_sv_reass_per_thread_t *rt, ip4_sv_reass_t *reass)
Definition: ip4_sv_reass.c:260
static int ip4_rehash_cb(clib_bihash_kv_16_8_t *kv, void *_ctx)
Definition: ip4_sv_reass.c:834
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:235
#define pool_foreach_index(i, v, body)
Iterate pool by index.
Definition: pool.h:558
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:304
vlib_node_registration_t ip4_sv_reass_node_output_feature
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_node_output_feature)
Definition: ip4_sv_reass.c:750
static u8 * format_ip4_sv_reass_handoff_trace(u8 *s, va_list *args)
uword ip4_sv_reass_custom_register_next_node(uword node_index)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
ip4_sv_reass_key_t key
Definition: ip4_sv_reass.c:85
static clib_error_t * ip4_sv_reass_init_function(vlib_main_t *vm)
Definition: ip4_sv_reass.c:906
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
Definition: defs.h:46
int vnet_feature_enable_disable(const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes)
Definition: feature.c:304
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".