FD.io VPP  v19.08-27-gf4dcae4
Vector Packet Processing
ip6_reassembly.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * @file
18  * @brief IPv6 Reassembly.
19  *
20  * This file contains the source code for IPv6 reassembly.
21  */
22 
23 #include <vppinfra/vec.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ip/ip.h>
26 #include <vppinfra/bihash_48_8.h>
27 #include <vnet/ip/ip6_reassembly.h>
28 
29 #define MSEC_PER_SEC 1000
30 #define IP6_REASS_TIMEOUT_DEFAULT_MS 100
31 #define IP6_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
32 #define IP6_REASS_MAX_REASSEMBLIES_DEFAULT 1024
33 #define IP6_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
34 #define IP6_REASS_HT_LOAD_FACTOR (0.75)
35 
36 typedef enum
37 {
43 
44 typedef struct
45 {
46  union
47  {
48  struct
49  {
54  u8 unused[7];
56  };
57  u64 as_u64[6];
58  };
60 
61 typedef union
62 {
63  struct
64  {
67  };
70 
71 typedef union
72 {
73  struct
74  {
77  };
80 
81 
84 {
86  return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
87 }
88 
91 {
93  return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
94  (vnb->ip.reass.fragment_first + ip6_reass_buffer_get_data_offset (b)) + 1;
95 }
96 
97 typedef struct
98 {
99  // hash table key
101  // time when last packet was received
103  // internal id of this reassembly
105  // buffer index of first buffer in this reassembly context
107  // last octet of packet, ~0 until fragment without more_fragments arrives
109  // length of data collected so far
111  // trace operation counter
113  // next index - used by custom apps (~0 if not set)
115  // error next index - used by custom apps (~0 if not set)
117  // minimum fragment length for this reassembly - used to estimate MTU
119  // number of fragments for this reassembly
121 } ip6_reass_t;
122 
123 typedef struct
124 {
130 
131 typedef struct
132 {
133  // IPv6 config
137  // maximum number of fragments in one reassembly
139  // maximum number of reassemblies
141 
142  // IPv6 runtime
143  clib_bihash_48_8_t hash;
144 
145  // per-thread data
147 
148  // convenience
151 
152  // node index of ip6-drop node
156 
157  /** Worker handoff */
160 
162 
164 
165 #ifndef CLIB_MARCH_VARIANT
167 #endif /* CLIB_MARCH_VARIANT */
168 
169 typedef enum
170 {
177 
178 typedef enum
179 {
187 
188 typedef struct
189 {
197 
198 typedef struct
199 {
200  ip6_reass_trace_operation_e action;
209 
210 static void
213 {
214  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
216  trace->range_first = vnb->ip.reass.range_first;
217  trace->range_last = vnb->ip.reass.range_last;
220  trace->range_bi = bi;
221 }
222 
223 static u8 *
224 format_ip6_reass_range_trace (u8 * s, va_list * args)
225 {
227  s = format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
228  trace->range_last, trace->data_offset, trace->data_len,
229  trace->range_bi);
230  return s;
231 }
232 
233 static u8 *
234 format_ip6_reass_trace (u8 * s, va_list * args)
235 {
236  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
237  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
238  ip6_reass_trace_t *t = va_arg (*args, ip6_reass_trace_t *);
239  s = format (s, "reass id: %u, op id: %u ", t->reass_id, t->op_id);
240  u32 indent = format_get_indent (s);
241  s = format (s, "first bi: %u, data len: %u, ip/fragment[%u, %u]",
243  t->fragment_last);
244  switch (t->action)
245  {
246  case RANGE_NEW:
247  s = format (s, "\n%Unew %U", format_white_space, indent,
249  break;
250  case RANGE_OVERLAP:
251  s = format (s, "\n%Uoverlap %U", format_white_space, indent,
253  break;
255  s = format (s, "\n%Uicmp-error - frag_len > 65535 %U",
257  &t->trace_range);
258  break;
260  s = format (s, "\n%Uicmp-error - frag_len mod 8 != 0 %U",
262  &t->trace_range);
263  break;
265  s = format (s, "\n%Uicmp-error - reassembly time exceeded",
266  format_white_space, indent);
267  break;
268  case FINALIZE:
269  s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
270  break;
271  }
272  return s;
273 }
274 
275 static void
277  ip6_reass_main_t * rm, ip6_reass_t * reass,
278  u32 bi, ip6_reass_trace_operation_e action,
279  u32 size_diff)
280 {
281  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
283  ip6_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
284  t->reass_id = reass->id;
285  t->action = action;
286  ip6_reass_trace_details (vm, bi, &t->trace_range);
287  t->size_diff = size_diff;
288  t->op_id = reass->trace_op_counter;
289  ++reass->trace_op_counter;
290  t->fragment_first = vnb->ip.reass.fragment_first;
291  t->fragment_last = vnb->ip.reass.fragment_last;
292  t->trace_range.first_bi = reass->first_bi;
293  t->total_data_len = reass->data_len;
294 #if 0
295  static u8 *s = NULL;
296  s = format (s, "%U", format_ip6_reass_trace, NULL, NULL, t);
297  printf ("%.*s\n", vec_len (s), s);
298  fflush (stdout);
299  vec_reset_length (s);
300 #endif
301 }
302 
303 always_inline void
305  ip6_reass_t * reass)
306 {
308  kv.key[0] = reass->key.as_u64[0];
309  kv.key[1] = reass->key.as_u64[1];
310  kv.key[2] = reass->key.as_u64[2];
311  kv.key[3] = reass->key.as_u64[3];
312  kv.key[4] = reass->key.as_u64[4];
313  kv.key[5] = reass->key.as_u64[5];
314  clib_bihash_add_del_48_8 (&rm->hash, &kv, 0);
315  pool_put (rt->pool, reass);
316  --rt->reass_n;
317 }
318 
319 always_inline void
321  ip6_reass_main_t * rm, ip6_reass_t * reass)
322 {
323  u32 range_bi = reass->first_bi;
324  vlib_buffer_t *range_b;
325  vnet_buffer_opaque_t *range_vnb;
326  u32 *to_free = NULL;
327  while (~0 != range_bi)
328  {
329  range_b = vlib_get_buffer (vm, range_bi);
330  range_vnb = vnet_buffer (range_b);
331  u32 bi = range_bi;
332  while (~0 != bi)
333  {
334  vec_add1 (to_free, bi);
335  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
336  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
337  {
338  bi = b->next_buffer;
339  b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
340  }
341  else
342  {
343  bi = ~0;
344  }
345  }
346  range_bi = range_vnb->ip.reass.next_range_bi;
347  }
348  /* send to next_error_index */
349  if (~0 != reass->error_next_index)
350  {
351  u32 n_left_to_next, *to_next, next_index;
352 
353  next_index = reass->error_next_index;
354  u32 bi = ~0;
355 
356  while (vec_len (to_free) > 0)
357  {
358  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
359 
360  while (vec_len (to_free) > 0 && n_left_to_next > 0)
361  {
362  bi = vec_pop (to_free);
363 
364  if (~0 != bi)
365  {
366  to_next[0] = bi;
367  to_next += 1;
368  n_left_to_next -= 1;
369  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
370  to_next, n_left_to_next,
371  bi, next_index);
372  }
373  }
374  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
375  }
376  }
377  else
378  {
379  vlib_buffer_free (vm, to_free, vec_len (to_free));
380  }
381  vec_free (to_free);
382 }
383 
384 always_inline void
386  ip6_reass_main_t * rm, ip6_reass_t * reass,
387  u32 * icmp_bi)
388 {
389  if (~0 == reass->first_bi)
390  {
391  return;
392  }
393  if (~0 == reass->next_index) // custom apps don't want icmp
394  {
395  vlib_buffer_t *b = vlib_get_buffer (vm, reass->first_bi);
396  if (0 == vnet_buffer (b)->ip.reass.fragment_first)
397  {
398  *icmp_bi = reass->first_bi;
399  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
400  {
401  ip6_reass_add_trace (vm, node, rm, reass, reass->first_bi,
403  }
404  // fragment with offset zero received - send icmp message back
405  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
406  {
407  // separate first buffer from chain and steer it towards icmp node
408  b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
409  reass->first_bi = b->next_buffer;
410  }
411  else
412  {
413  reass->first_bi = vnet_buffer (b)->ip.reass.next_range_bi;
414  }
415  icmp6_error_set_vnet_buffer (b, ICMP6_time_exceeded,
416  ICMP6_time_exceeded_fragment_reassembly_time_exceeded,
417  0);
418  }
419  }
420  ip6_reass_drop_all (vm, node, rm, reass);
421 }
422 
426  ip6_reass_kv_t * kv, u32 * icmp_bi, u8 * do_handoff)
427 {
428  ip6_reass_t *reass = NULL;
429  f64 now = vlib_time_now (rm->vlib_main);
430 
431  if (!clib_bihash_search_48_8
432  (&rm->hash, (clib_bihash_kv_48_8_t *) kv, (clib_bihash_kv_48_8_t *) kv))
433  {
434  if (vm->thread_index != kv->v.thread_index)
435  {
436  *do_handoff = 1;
437  return NULL;
438  }
439  reass = pool_elt_at_index (rt->pool, kv->v.reass_index);
440 
441  if (now > reass->last_heard + rm->timeout)
442  {
443  ip6_reass_on_timeout (vm, node, rm, reass, icmp_bi);
444  ip6_reass_free (rm, rt, reass);
445  reass = NULL;
446  }
447  }
448 
449  if (reass)
450  {
451  reass->last_heard = now;
452  return reass;
453  }
454 
455  if (rt->reass_n >= rm->max_reass_n)
456  {
457  reass = NULL;
458  return reass;
459  }
460  else
461  {
462  pool_get (rt->pool, reass);
463  clib_memset (reass, 0, sizeof (*reass));
464  reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
465  ++rt->id_counter;
466  reass->first_bi = ~0;
467  reass->last_packet_octet = ~0;
468  reass->data_len = 0;
469  reass->next_index = ~0;
470  reass->error_next_index = ~0;
471  ++rt->reass_n;
472  }
473 
474  reass->key.as_u64[0] = ((clib_bihash_kv_48_8_t *) kv)->key[0];
475  reass->key.as_u64[1] = ((clib_bihash_kv_48_8_t *) kv)->key[1];
476  reass->key.as_u64[2] = ((clib_bihash_kv_48_8_t *) kv)->key[2];
477  reass->key.as_u64[3] = ((clib_bihash_kv_48_8_t *) kv)->key[3];
478  reass->key.as_u64[4] = ((clib_bihash_kv_48_8_t *) kv)->key[4];
479  reass->key.as_u64[5] = ((clib_bihash_kv_48_8_t *) kv)->key[5];
480  kv->v.reass_index = (reass - rt->pool);
481  kv->v.thread_index = vm->thread_index;
482  reass->last_heard = now;
483 
484  if (clib_bihash_add_del_48_8 (&rm->hash, (clib_bihash_kv_48_8_t *) kv, 1))
485  {
486  ip6_reass_free (rm, rt, reass);
487  reass = NULL;
488  }
489 
490  return reass;
491 }
492 
493 always_inline ip6_reass_rc_t
496  ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
497  bool is_custom_app)
498 {
499  *bi0 = reass->first_bi;
500  *error0 = IP6_ERROR_NONE;
501  ip6_frag_hdr_t *frag_hdr;
502  vlib_buffer_t *last_b = NULL;
503  u32 sub_chain_bi = reass->first_bi;
504  u32 total_length = 0;
505  u32 buf_cnt = 0;
506  u32 dropped_cnt = 0;
507  u32 *vec_drop_compress = NULL;
508  ip6_reass_rc_t rv = IP6_REASS_RC_OK;
509  do
510  {
511  u32 tmp_bi = sub_chain_bi;
512  vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
513  vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
514  if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
515  !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
516  {
518  goto free_buffers_and_return;
519  }
520 
521  u32 data_len = ip6_reass_buffer_get_data_len (tmp);
522  u32 trim_front = vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
523  sizeof (*frag_hdr) + ip6_reass_buffer_get_data_offset (tmp);
524  u32 trim_end =
525  vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
526  if (tmp_bi == reass->first_bi)
527  {
528  /* first buffer - keep ip6 header */
529  if (0 != ip6_reass_buffer_get_data_offset (tmp))
530  {
532  goto free_buffers_and_return;
533  }
534  trim_front = 0;
535  trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
536  (vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
537  sizeof (*frag_hdr));
538  if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
539  {
541  goto free_buffers_and_return;
542  }
543  }
544  u32 keep_data =
545  vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
546  while (1)
547  {
548  ++buf_cnt;
549  if (trim_front)
550  {
551  if (trim_front > tmp->current_length)
552  {
553  /* drop whole buffer */
554  vec_add1 (vec_drop_compress, tmp_bi);
555  trim_front -= tmp->current_length;
556  if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
557  {
559  goto free_buffers_and_return;
560  }
561  tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
562  tmp_bi = tmp->next_buffer;
563  tmp = vlib_get_buffer (vm, tmp_bi);
564  continue;
565  }
566  else
567  {
568  vlib_buffer_advance (tmp, trim_front);
569  trim_front = 0;
570  }
571  }
572  if (keep_data)
573  {
574  if (last_b)
575  {
576  last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
577  last_b->next_buffer = tmp_bi;
578  }
579  last_b = tmp;
580  if (keep_data <= tmp->current_length)
581  {
582  tmp->current_length = keep_data;
583  keep_data = 0;
584  }
585  else
586  {
587  keep_data -= tmp->current_length;
588  if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
589  {
591  goto free_buffers_and_return;
592  }
593  }
594  total_length += tmp->current_length;
595  }
596  else
597  {
598  vec_add1 (vec_drop_compress, tmp_bi);
599  if (reass->first_bi == tmp_bi)
600  {
602  goto free_buffers_and_return;
603  }
604  ++dropped_cnt;
605  }
606  if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
607  {
608  tmp_bi = tmp->next_buffer;
609  tmp = vlib_get_buffer (vm, tmp->next_buffer);
610  }
611  else
612  {
613  break;
614  }
615  }
616  sub_chain_bi =
617  vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
618  reass.next_range_bi;
619  }
620  while (~0 != sub_chain_bi);
621 
622  if (!last_b)
623  {
625  goto free_buffers_and_return;
626  }
627  last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
628  vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
629  if (total_length < first_b->current_length)
630  {
632  goto free_buffers_and_return;
633  }
634  total_length -= first_b->current_length;
635  first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
636  first_b->total_length_not_including_first_buffer = total_length;
637  // drop fragment header
638  vnet_buffer_opaque_t *first_b_vnb = vnet_buffer (first_b);
640  u16 ip6_frag_hdr_offset = first_b_vnb->ip.reass.ip6_frag_hdr_offset;
641  ip6_ext_header_t *prev_hdr;
642  ip6_ext_header_find_t (ip, prev_hdr, frag_hdr,
643  IP_PROTOCOL_IPV6_FRAGMENTATION);
644  if (prev_hdr)
645  {
646  prev_hdr->next_hdr = frag_hdr->next_hdr;
647  }
648  else
649  {
650  ip->protocol = frag_hdr->next_hdr;
651  }
652  if (!((u8 *) frag_hdr - (u8 *) ip == ip6_frag_hdr_offset))
653  {
655  goto free_buffers_and_return;
656  }
657  memmove (frag_hdr, (u8 *) frag_hdr + sizeof (*frag_hdr),
658  first_b->current_length - ip6_frag_hdr_offset -
659  sizeof (ip6_frag_hdr_t));
660  first_b->current_length -= sizeof (*frag_hdr);
661  ip->payload_length =
662  clib_host_to_net_u16 (total_length + first_b->current_length -
663  sizeof (*ip));
664  if (!vlib_buffer_chain_linearize (vm, first_b))
665  {
666  rv = IP6_REASS_RC_NO_BUF;
667  goto free_buffers_and_return;
668  }
669  first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
670  if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
671  {
672  ip6_reass_add_trace (vm, node, rm, reass, reass->first_bi, FINALIZE, 0);
673 #if 0
674  // following code does a hexdump of packet fragments to stdout ...
675  do
676  {
677  u32 bi = reass->first_bi;
678  u8 *s = NULL;
679  while (~0 != bi)
680  {
681  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
682  s = format (s, "%u: %U\n", bi, format_hexdump,
684  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
685  {
686  bi = b->next_buffer;
687  }
688  else
689  {
690  break;
691  }
692  }
693  printf ("%.*s\n", vec_len (s), s);
694  fflush (stdout);
695  vec_free (s);
696  }
697  while (0);
698 #endif
699  }
700  if (!is_custom_app)
701  {
702  *next0 = IP6_REASSEMBLY_NEXT_INPUT;
703  }
704  else
705  {
706  *next0 = reass->next_index;
707  }
708  vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
709  ip6_reass_free (rm, rt, reass);
710  reass = NULL;
711 free_buffers_and_return:
712  vlib_buffer_free (vm, vec_drop_compress, vec_len (vec_drop_compress));
713  vec_free (vec_drop_compress);
714  return rv;
715 }
716 
717 always_inline void
720  ip6_reass_t * reass, u32 prev_range_bi,
721  u32 new_next_bi)
722 {
723 
724  vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
725  vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
726  if (~0 != prev_range_bi)
727  {
728  vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
729  vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
730  new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
731  prev_vnb->ip.reass.next_range_bi = new_next_bi;
732  }
733  else
734  {
735  if (~0 != reass->first_bi)
736  {
737  new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
738  }
739  reass->first_bi = new_next_bi;
740  }
741  reass->data_len += ip6_reass_buffer_get_data_len (new_next_b);
742 }
743 
744 always_inline ip6_reass_rc_t
747  ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
748  ip6_frag_hdr_t * frag_hdr, bool is_custom_app)
749 {
750  int consumed = 0;
751  vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
752  vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
753  if (is_custom_app)
754  {
755  reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten
756  reass->error_next_index = fvnb->ip.reass.error_next_index; // store error_next_index before it is overwritten
757  }
758 
759  fvnb->ip.reass.ip6_frag_hdr_offset =
760  (u8 *) frag_hdr - (u8 *) vlib_buffer_get_current (fb);
762  if (fb->current_length < sizeof (*fip) ||
763  fvnb->ip.reass.ip6_frag_hdr_offset == 0 ||
764  fvnb->ip.reass.ip6_frag_hdr_offset >= fb->current_length)
765  {
767  }
768 
769  u32 fragment_first = fvnb->ip.reass.fragment_first =
770  ip6_frag_hdr_offset_bytes (frag_hdr);
771  u32 fragment_length =
772  vlib_buffer_length_in_chain (vm, fb) -
773  (fvnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
774  u32 fragment_last = fvnb->ip.reass.fragment_last =
775  fragment_first + fragment_length - 1;
776  int more_fragments = ip6_frag_hdr_more (frag_hdr);
777  u32 candidate_range_bi = reass->first_bi;
778  u32 prev_range_bi = ~0;
779  fvnb->ip.reass.range_first = fragment_first;
780  fvnb->ip.reass.range_last = fragment_last;
781  fvnb->ip.reass.next_range_bi = ~0;
782  if (!more_fragments)
783  {
784  reass->last_packet_octet = fragment_last;
785  }
786  if (~0 == reass->first_bi)
787  {
788  // starting a new reassembly
789  ip6_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
790  *bi0);
791  reass->min_fragment_length = clib_net_to_host_u16 (fip->payload_length);
792  consumed = 1;
793  reass->fragments_n = 1;
794  goto check_if_done_maybe;
795  }
796  reass->min_fragment_length =
797  clib_min (clib_net_to_host_u16 (fip->payload_length),
798  fvnb->ip.reass.estimated_mtu);
799  while (~0 != candidate_range_bi)
800  {
801  vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
802  vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
803  if (fragment_first > candidate_vnb->ip.reass.range_last)
804  {
805  // this fragments starts after candidate range
806  prev_range_bi = candidate_range_bi;
807  candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
808  if (candidate_vnb->ip.reass.range_last < fragment_last &&
809  ~0 == candidate_range_bi)
810  {
811  // special case - this fragment falls beyond all known ranges
812  ip6_reass_insert_range_in_chain (vm, rm, rt, reass,
813  prev_range_bi, *bi0);
814  consumed = 1;
815  break;
816  }
817  continue;
818  }
819  if (fragment_last < candidate_vnb->ip.reass.range_first)
820  {
821  // this fragment ends before candidate range without any overlap
822  ip6_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
823  *bi0);
824  consumed = 1;
825  }
826  else if (fragment_first == candidate_vnb->ip.reass.range_first &&
827  fragment_last == candidate_vnb->ip.reass.range_last)
828  {
829  // duplicate fragment - ignore
830  }
831  else
832  {
833  // overlapping fragment - not allowed by RFC 8200
834  ip6_reass_drop_all (vm, node, rm, reass);
835  ip6_reass_free (rm, rt, reass);
836  if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
837  {
838  ip6_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_OVERLAP,
839  0);
840  }
841  *next0 = IP6_REASSEMBLY_NEXT_DROP;
842  *error0 = IP6_ERROR_REASS_OVERLAPPING_FRAGMENT;
843  return IP6_REASS_RC_OK;
844  }
845  break;
846  }
847  ++reass->fragments_n;
848 check_if_done_maybe:
849  if (consumed)
850  {
851  if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
852  {
853  ip6_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0);
854  }
855  }
856  if (~0 != reass->last_packet_octet &&
857  reass->data_len == reass->last_packet_octet + 1)
858  {
859  return ip6_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
860  is_custom_app);
861  }
862  else
863  {
864  if (consumed)
865  {
866  *bi0 = ~0;
867  if (reass->fragments_n > rm->max_reass_len)
868  {
870  }
871  }
872  else
873  {
874  *next0 = IP6_REASSEMBLY_NEXT_DROP;
875  *error0 = IP6_ERROR_REASS_DUPLICATE_FRAGMENT;
876  }
877  }
878  return IP6_REASS_RC_OK;
879 }
880 
881 always_inline bool
883  vlib_buffer_t * b,
884  ip6_frag_hdr_t * frag_hdr)
885 {
886  ip6_ext_header_t *tmp = (ip6_ext_header_t *) frag_hdr;
887  while (ip6_ext_hdr (tmp->next_hdr))
888  {
889  tmp = ip6_ext_next_header (tmp);
890  }
891  if (IP_PROTOCOL_IP6_NONXT == tmp->next_hdr)
892  {
893  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
894  ICMP6_parameter_problem_first_fragment_has_incomplete_header_chain,
895  0);
896  b->error = node->errors[IP6_ERROR_REASS_MISSING_UPPER];
897 
898  return false;
899  }
900  return true;
901 }
902 
903 always_inline bool
905  vlib_node_runtime_t * node,
906  vlib_buffer_t * b,
907  ip6_frag_hdr_t * frag_hdr)
908 {
911  int more_fragments = ip6_frag_hdr_more (frag_hdr);
912  u32 fragment_length =
914  (vnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
915  if (more_fragments && 0 != fragment_length % 8)
916  {
917  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
918  ICMP6_parameter_problem_erroneous_header_field,
919  (u8 *) & ip->payload_length - (u8 *) ip);
920  return false;
921  }
922  return true;
923 }
924 
925 always_inline bool
927  vlib_node_runtime_t * node,
928  vlib_buffer_t * b,
929  ip6_frag_hdr_t * frag_hdr)
930 {
932  u32 fragment_first = ip6_frag_hdr_offset_bytes (frag_hdr);
933  u32 fragment_length =
935  (vnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
936  if (fragment_first + fragment_length > 65535)
937  {
939  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
940  ICMP6_parameter_problem_erroneous_header_field,
941  (u8 *) & frag_hdr->fragment_offset_and_more
942  - (u8 *) ip0);
943  return false;
944  }
945  return true;
946 }
947 
950  vlib_frame_t * frame, bool is_feature,
951  bool is_custom_app)
952 {
953  u32 *from = vlib_frame_vector_args (frame);
954  u32 n_left_from, n_left_to_next, *to_next, next_index;
957  clib_spinlock_lock (&rt->lock);
958 
959  n_left_from = frame->n_vectors;
960  next_index = node->cached_next_index;
961  while (n_left_from > 0)
962  {
963  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
964 
965  while (n_left_from > 0 && n_left_to_next > 0)
966  {
967  u32 bi0;
968  vlib_buffer_t *b0;
970  u32 error0 = IP6_ERROR_NONE;
971  u32 icmp_bi = ~0;
972 
973  bi0 = from[0];
974  b0 = vlib_get_buffer (vm, bi0);
975 
977  ip6_frag_hdr_t *frag_hdr = NULL;
978  ip6_ext_header_t *prev_hdr;
979  if (ip6_ext_hdr (ip0->protocol))
980  {
981  ip6_ext_header_find_t (ip0, prev_hdr, frag_hdr,
982  IP_PROTOCOL_IPV6_FRAGMENTATION);
983  }
984  if (!frag_hdr)
985  {
986  // this is a regular packet - no fragmentation
988  goto skip_reass;
989  }
990  if (0 == ip6_frag_hdr_offset (frag_hdr))
991  {
992  // first fragment - verify upper-layer is present
993  if (!ip6_reass_verify_upper_layer_present (node, b0, frag_hdr))
994  {
996  goto skip_reass;
997  }
998  }
999  if (!ip6_reass_verify_fragment_multiple_8 (vm, node, b0, frag_hdr)
1000  || !ip6_reass_verify_packet_size_lt_64k (vm, node, b0,
1001  frag_hdr))
1002  {
1004  goto skip_reass;
1005  }
1006  vnet_buffer (b0)->ip.reass.ip6_frag_hdr_offset =
1007  (u8 *) frag_hdr - (u8 *) ip0;
1008 
1009  ip6_reass_kv_t kv;
1010  u8 do_handoff = 0;
1011 
1012  kv.k.as_u64[0] = ip0->src_address.as_u64[0];
1013  kv.k.as_u64[1] = ip0->src_address.as_u64[1];
1014  kv.k.as_u64[2] = ip0->dst_address.as_u64[0];
1015  kv.k.as_u64[3] = ip0->dst_address.as_u64[1];
1016  kv.k.as_u64[4] =
1018  vnet_buffer (b0)->sw_if_index[VLIB_RX])) << 32 |
1019  (u64) frag_hdr->identification;
1020  kv.k.as_u64[5] = ip0->protocol;
1021 
1022  ip6_reass_t *reass =
1023  ip6_reass_find_or_create (vm, node, rm, rt, &kv, &icmp_bi,
1024  &do_handoff);
1025 
1026  if (PREDICT_FALSE (do_handoff))
1027  {
1029  if (is_feature)
1030  vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
1031  kv.v.thread_index;
1032  else
1033  vnet_buffer (b0)->ip.reass.owner_thread_index =
1034  kv.v.thread_index;
1035  }
1036  else if (reass)
1037  {
1038  switch (ip6_reass_update (vm, node, rm, rt, reass, &bi0, &next0,
1039  &error0, frag_hdr, is_custom_app))
1040  {
1041  case IP6_REASS_RC_OK:
1042  /* nothing to do here */
1043  break;
1046  IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1047  1);
1048  ip6_reass_drop_all (vm, node, rm, reass);
1049  ip6_reass_free (rm, rt, reass);
1050  goto next_packet;
1051  break;
1052  case IP6_REASS_RC_NO_BUF:
1054  IP6_ERROR_REASS_NO_BUF, 1);
1055  ip6_reass_drop_all (vm, node, rm, reass);
1056  ip6_reass_free (rm, rt, reass);
1057  goto next_packet;
1058  break;
1060  /* drop everything and start with a clean slate */
1062  IP6_ERROR_REASS_INTERNAL_ERROR,
1063  1);
1064  ip6_reass_drop_all (vm, node, rm, reass);
1065  ip6_reass_free (rm, rt, reass);
1066  goto next_packet;
1067  break;
1068  }
1069  }
1070  else
1071  {
1072  if (is_feature)
1073  {
1074  next0 = IP6_REASSEMBLY_NEXT_DROP;
1075  }
1076  else
1077  {
1078  vnet_buffer_opaque_t *fvnb = vnet_buffer (b0);
1079  next0 = fvnb->ip.reass.error_next_index;
1080  }
1081  error0 = IP6_ERROR_REASS_LIMIT_REACHED;
1082  }
1083 
1084  b0->error = node->errors[error0];
1085 
1086  if (~0 != bi0)
1087  {
1088  skip_reass:
1089  to_next[0] = bi0;
1090  to_next += 1;
1091  n_left_to_next -= 1;
1092  if (is_feature && IP6_ERROR_NONE == error0)
1093  {
1094  b0 = vlib_get_buffer (vm, bi0);
1095  vnet_feature_next (&next0, b0);
1096  }
1097  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1098  n_left_to_next, bi0, next0);
1099  }
1100 
1101  if (~0 != icmp_bi)
1102  {
1104  to_next[0] = icmp_bi;
1105  to_next += 1;
1106  n_left_to_next -= 1;
1107  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1108  n_left_to_next, icmp_bi,
1109  next0);
1110  }
1111  next_packet:
1112  from += 1;
1113  n_left_from -= 1;
1114  }
1115 
1116  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1117  }
1118 
1119  clib_spinlock_unlock (&rt->lock);
1120  return frame->n_vectors;
1121 }
1122 
1124 #define _(sym, string) string,
1126 #undef _
1127 };
1128 
1130  vlib_frame_t * frame)
1131 {
1132  return ip6_reassembly_inline (vm, node, frame, false /* is_feature */ ,
1133  false /* is_custom_app */ );
1134 }
1135 
1136 /* *INDENT-OFF* */
1138  .name = "ip6-reassembly",
1139  .vector_size = sizeof (u32),
1140  .format_trace = format_ip6_reass_trace,
1141  .n_errors = ARRAY_LEN (ip6_reassembly_error_strings),
1142  .error_strings = ip6_reassembly_error_strings,
1143  .n_next_nodes = IP6_REASSEMBLY_N_NEXT,
1144  .next_nodes =
1145  {
1146  [IP6_REASSEMBLY_NEXT_INPUT] = "ip6-input",
1147  [IP6_REASSEMBLY_NEXT_DROP] = "ip6-drop",
1148  [IP6_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
1149  [IP6_REASSEMBLY_NEXT_HANDOFF] = "ip6-reassembly-handoff",
1150  },
1151 };
1152 /* *INDENT-ON* */
1153 
1155  vlib_node_runtime_t * node,
1156  vlib_frame_t * frame)
1157 {
1158  return ip6_reassembly_inline (vm, node, frame, true /* is_feature */ ,
1159  false /* is_custom_app */ );
1160 }
1161 
1162 /* *INDENT-OFF* */
1164  .name = "ip6-reassembly-feature",
1165  .vector_size = sizeof (u32),
1166  .format_trace = format_ip6_reass_trace,
1167  .n_errors = ARRAY_LEN (ip6_reassembly_error_strings),
1168  .error_strings = ip6_reassembly_error_strings,
1169  .n_next_nodes = IP6_REASSEMBLY_N_NEXT,
1170  .next_nodes =
1171  {
1172  [IP6_REASSEMBLY_NEXT_INPUT] = "ip6-input",
1173  [IP6_REASSEMBLY_NEXT_DROP] = "ip6-drop",
1174  [IP6_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
1175  [IP6_REASSEMBLY_NEXT_HANDOFF] = "ip6-reass-feature-hoff",
1176  },
1177 };
1178 /* *INDENT-ON* */
1179 
1180 /* *INDENT-OFF* */
1181 VNET_FEATURE_INIT (ip6_reassembly_feature, static) = {
1182  .arc_name = "ip6-unicast",
1183  .node_name = "ip6-reassembly-feature",
1184  .runs_before = VNET_FEATURES ("ip6-lookup",
1185  "ipsec6-input-feature"),
1186  .runs_after = 0,
1187 };
1188 /* *INDENT-ON* */
1189 
1190 #ifndef CLIB_MARCH_VARIANT
1191 static u32
1193 {
1195  u32 nbuckets;
1196  u8 i;
1197 
1198  nbuckets = (u32) (rm->max_reass_n / IP6_REASS_HT_LOAD_FACTOR);
1199 
1200  for (i = 0; i < 31; i++)
1201  if ((1 << i) >= nbuckets)
1202  break;
1203  nbuckets = 1 << i;
1204 
1205  return nbuckets;
1206 }
1207 #endif /* CLIB_MARCH_VARIANT */
1208 
1209 typedef enum
1210 {
1213 
1214 #ifndef CLIB_MARCH_VARIANT
1215 typedef struct
1216 {
1217  int failure;
1218  clib_bihash_48_8_t *new_hash;
1220 
1221 static void
1223 {
1224  ip6_rehash_cb_ctx *ctx = _ctx;
1225  if (clib_bihash_add_del_48_8 (ctx->new_hash, kv, 1))
1226  {
1227  ctx->failure = 1;
1228  }
1229 }
1230 
1231 static void
1232 ip6_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1233  u32 max_reassembly_length, u32 expire_walk_interval_ms)
1234 {
1235  ip6_reass_main.timeout_ms = timeout_ms;
1236  ip6_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1237  ip6_reass_main.max_reass_n = max_reassemblies;
1238  ip6_reass_main.max_reass_len = max_reassembly_length;
1239  ip6_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
1240 }
1241 
1243 ip6_reass_set (u32 timeout_ms, u32 max_reassemblies,
1244  u32 max_reassembly_length, u32 expire_walk_interval_ms)
1245 {
1246  u32 old_nbuckets = ip6_reass_get_nbuckets ();
1247  ip6_reass_set_params (timeout_ms, max_reassemblies, max_reassembly_length,
1248  expire_walk_interval_ms);
1249  vlib_process_signal_event (ip6_reass_main.vlib_main,
1250  ip6_reass_main.ip6_reass_expire_node_idx,
1252  u32 new_nbuckets = ip6_reass_get_nbuckets ();
1253  if (ip6_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
1254  {
1255  clib_bihash_48_8_t new_hash;
1256  clib_memset (&new_hash, 0, sizeof (new_hash));
1258  ctx.failure = 0;
1259  ctx.new_hash = &new_hash;
1260  clib_bihash_init_48_8 (&new_hash, "ip6-reass", new_nbuckets,
1261  new_nbuckets * 1024);
1262  clib_bihash_foreach_key_value_pair_48_8 (&ip6_reass_main.hash,
1263  ip6_rehash_cb, &ctx);
1264  if (ctx.failure)
1265  {
1266  clib_bihash_free_48_8 (&new_hash);
1267  return -1;
1268  }
1269  else
1270  {
1271  clib_bihash_free_48_8 (&ip6_reass_main.hash);
1272  clib_memcpy_fast (&ip6_reass_main.hash, &new_hash,
1273  sizeof (ip6_reass_main.hash));
1274  clib_bihash_copied (&ip6_reass_main.hash, &new_hash);
1275  }
1276  }
1277  return 0;
1278 }
1279 
1281 ip6_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1282  u32 * expire_walk_interval_ms)
1283 {
1284  *timeout_ms = ip6_reass_main.timeout_ms;
1285  *max_reassemblies = ip6_reass_main.max_reass_n;
1286  *expire_walk_interval_ms = ip6_reass_main.expire_walk_interval_ms;
1287  return 0;
1288 }
1289 
1290 static clib_error_t *
1292 {
1294  clib_error_t *error = 0;
1295  u32 nbuckets;
1296  vlib_node_t *node;
1297 
1298  rm->vlib_main = vm;
1299  rm->vnet_main = vnet_get_main ();
1300 
1303  vec_foreach (rt, rm->per_thread_data)
1304  {
1305  clib_spinlock_init (&rt->lock);
1306  pool_alloc (rt->pool, rm->max_reass_n);
1307  }
1308 
1309  node = vlib_get_node_by_name (vm, (u8 *) "ip6-reassembly-expire-walk");
1310  ASSERT (node);
1311  rm->ip6_reass_expire_node_idx = node->index;
1312 
1317 
1318  nbuckets = ip6_reass_get_nbuckets ();
1319  clib_bihash_init_48_8 (&rm->hash, "ip6-reass", nbuckets, nbuckets * 1024);
1320 
1321  node = vlib_get_node_by_name (vm, (u8 *) "ip6-drop");
1322  ASSERT (node);
1323  rm->ip6_drop_idx = node->index;
1324  node = vlib_get_node_by_name (vm, (u8 *) "ip6-icmp-error");
1325  ASSERT (node);
1326  rm->ip6_icmp_error_idx = node->index;
1327 
1328  if ((error = vlib_call_init_function (vm, ip_main_init)))
1329  return error;
1330  ip6_register_protocol (IP_PROTOCOL_IPV6_FRAGMENTATION,
1331  ip6_reass_node.index);
1332 
1334  rm->fq_feature_index =
1336 
1337  return error;
1338 }
1339 
1341 #endif /* CLIB_MARCH_VARIANT */
1342 
1343 static uword
1345  vlib_node_runtime_t * node, vlib_frame_t * f)
1346 {
1348  uword event_type, *event_data = 0;
1349 
1350  while (true)
1351  {
1354  / (f64) MSEC_PER_SEC);
1355  event_type = vlib_process_get_events (vm, &event_data);
1356 
1357  switch (event_type)
1358  {
1359  case ~0: /* no events => timeout */
1360  /* nothing to do here */
1361  break;
1363  break;
1364  default:
1365  clib_warning ("BUG: event type 0x%wx", event_type);
1366  break;
1367  }
1368  f64 now = vlib_time_now (vm);
1369 
1370  ip6_reass_t *reass;
1371  int *pool_indexes_to_free = NULL;
1372 
1373  uword thread_index = 0;
1374  int index;
1375  const uword nthreads = vlib_num_workers () + 1;
1376  u32 *vec_icmp_bi = NULL;
1377  for (thread_index = 0; thread_index < nthreads; ++thread_index)
1378  {
1379  ip6_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1380  clib_spinlock_lock (&rt->lock);
1381 
1382  vec_reset_length (pool_indexes_to_free);
1383  /* *INDENT-OFF* */
1384  pool_foreach_index (index, rt->pool, ({
1385  reass = pool_elt_at_index (rt->pool, index);
1386  if (now > reass->last_heard + rm->timeout)
1387  {
1388  vec_add1 (pool_indexes_to_free, index);
1389  }
1390  }));
1391  /* *INDENT-ON* */
1392  int *i;
1393  /* *INDENT-OFF* */
1394  vec_foreach (i, pool_indexes_to_free)
1395  {
1396  ip6_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1397  u32 icmp_bi = ~0;
1398  ip6_reass_on_timeout (vm, node, rm, reass, &icmp_bi);
1399  if (~0 != icmp_bi)
1400  vec_add1 (vec_icmp_bi, icmp_bi);
1401 
1402  ip6_reass_free (rm, rt, reass);
1403  }
1404  /* *INDENT-ON* */
1405 
1406  clib_spinlock_unlock (&rt->lock);
1407  }
1408 
1409  while (vec_len (vec_icmp_bi) > 0)
1410  {
1411  vlib_frame_t *f =
1413  u32 *to_next = vlib_frame_vector_args (f);
1414  u32 n_left_to_next = VLIB_FRAME_SIZE - f->n_vectors;
1415  int trace_frame = 0;
1416  while (vec_len (vec_icmp_bi) > 0 && n_left_to_next > 0)
1417  {
1418  u32 bi = vec_pop (vec_icmp_bi);
1419  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1420  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
1421  trace_frame = 1;
1422  b->error = node->errors[IP6_ERROR_REASS_TIMEOUT];
1423  to_next[0] = bi;
1424  ++f->n_vectors;
1425  to_next += 1;
1426  n_left_to_next -= 1;
1427  }
1428  f->frame_flags |= (trace_frame * VLIB_FRAME_TRACE);
1430  }
1431 
1432  vec_free (pool_indexes_to_free);
1433  vec_free (vec_icmp_bi);
1434  if (event_data)
1435  {
1436  _vec_len (event_data) = 0;
1437  }
1438  }
1439 
1440  return 0;
1441 }
1442 
1443 /* *INDENT-OFF* */
1445  .function = ip6_reass_walk_expired,
1446  .format_trace = format_ip6_reass_trace,
1447  .type = VLIB_NODE_TYPE_PROCESS,
1448  .name = "ip6-reassembly-expire-walk",
1449 
1451  .error_strings = ip6_reassembly_error_strings,
1452 
1453 };
1454 /* *INDENT-ON* */
1455 
1456 static u8 *
1457 format_ip6_reass_key (u8 * s, va_list * args)
1458 {
1459  ip6_reass_key_t *key = va_arg (*args, ip6_reass_key_t *);
1460  s = format (s, "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1462  &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
1463  return s;
1464 }
1465 
1466 static u8 *
1467 format_ip6_reass (u8 * s, va_list * args)
1468 {
1469  vlib_main_t *vm = va_arg (*args, vlib_main_t *);
1470  ip6_reass_t *reass = va_arg (*args, ip6_reass_t *);
1471 
1472  s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
1473  "last_packet_octet: %u, trace_op_counter: %u\n",
1474  reass->id, format_ip6_reass_key, &reass->key, reass->first_bi,
1475  reass->data_len, reass->last_packet_octet,
1476  reass->trace_op_counter);
1477  u32 bi = reass->first_bi;
1478  u32 counter = 0;
1479  while (~0 != bi)
1480  {
1481  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1482  vnet_buffer_opaque_t *vnb = vnet_buffer (b);
1483  s = format (s, " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1484  "fragment[%u, %u]\n",
1485  counter, vnb->ip.reass.range_first,
1486  vnb->ip.reass.range_last, bi,
1489  vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
1490  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1491  {
1492  bi = b->next_buffer;
1493  }
1494  else
1495  {
1496  bi = ~0;
1497  }
1498  }
1499  return s;
1500 }
1501 
1502 static clib_error_t *
1505 {
1507 
1508  vlib_cli_output (vm, "---------------------");
1509  vlib_cli_output (vm, "IP6 reassembly status");
1510  vlib_cli_output (vm, "---------------------");
1511  bool details = false;
1512  if (unformat (input, "details"))
1513  {
1514  details = true;
1515  }
1516 
1517  u32 sum_reass_n = 0;
1518  u64 sum_buffers_n = 0;
1519  ip6_reass_t *reass;
1520  uword thread_index;
1521  const uword nthreads = vlib_num_workers () + 1;
1522  for (thread_index = 0; thread_index < nthreads; ++thread_index)
1523  {
1524  ip6_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1525  clib_spinlock_lock (&rt->lock);
1526  if (details)
1527  {
1528  /* *INDENT-OFF* */
1529  pool_foreach (reass, rt->pool, {
1530  vlib_cli_output (vm, "%U", format_ip6_reass, vm, reass);
1531  });
1532  /* *INDENT-ON* */
1533  }
1534  sum_reass_n += rt->reass_n;
1535  clib_spinlock_unlock (&rt->lock);
1536  }
1537  vlib_cli_output (vm, "---------------------");
1538  vlib_cli_output (vm, "Current IP6 reassemblies count: %lu\n",
1539  (long unsigned) sum_reass_n);
1540  vlib_cli_output (vm, "Maximum configured concurrent IP6 reassemblies per "
1541  "worker-thread: %lu\n", (long unsigned) rm->max_reass_n);
1542  vlib_cli_output (vm, "Buffers in use: %lu\n",
1543  (long unsigned) sum_buffers_n);
1544  return 0;
1545 }
1546 
1547 /* *INDENT-OFF* */
1549  .path = "show ip6-reassembly",
1550  .short_help = "show ip6-reassembly [details]",
1551  .function = show_ip6_reass,
1552 };
1553 /* *INDENT-ON* */
1554 
1555 #ifndef CLIB_MARCH_VARIANT
1558 {
1559  return vnet_feature_enable_disable ("ip6-unicast", "ip6-reassembly-feature",
1560  sw_if_index, enable_disable, 0, 0);
1561 }
1562 #endif /* CLIB_MARCH_VARIANT */
1563 
1564 #define foreach_ip6_reassembly_handoff_error \
1565 _(CONGESTION_DROP, "congestion drop")
1566 
1567 
1568 typedef enum
1569 {
1570 #define _(sym,str) IP6_REASSEMBLY_HANDOFF_ERROR_##sym,
1572 #undef _
1575 
1577 #define _(sym,string) string,
1579 #undef _
1580 };
1581 
1582 typedef struct
1583 {
1586 
1587 static u8 *
1589 {
1590  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1591  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1593  va_arg (*args, ip6_reassembly_handoff_trace_t *);
1594 
1595  s =
1596  format (s, "ip6-reassembly-handoff: next-worker %d",
1597  t->next_worker_index);
1598 
1599  return s;
1600 }
1601 
1604  vlib_frame_t * frame, bool is_feature)
1605 {
1607 
1608  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1609  u32 n_enq, n_left_from, *from;
1610  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1611  u32 fq_index;
1612 
1613  from = vlib_frame_vector_args (frame);
1614  n_left_from = frame->n_vectors;
1615  vlib_get_buffers (vm, from, bufs, n_left_from);
1616 
1617  b = bufs;
1618  ti = thread_indices;
1619 
1620  fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index;
1621 
1622  while (n_left_from > 0)
1623  {
1624  ti[0] =
1625  (is_feature) ? vnet_buffer (b[0])->ip.
1626  reass.owner_feature_thread_index : vnet_buffer (b[0])->ip.
1627  reass.owner_thread_index;
1628 
1629  if (PREDICT_FALSE
1630  ((node->flags & VLIB_NODE_FLAG_TRACE)
1631  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1632  {
1634  vlib_add_trace (vm, node, b[0], sizeof (*t));
1635  t->next_worker_index = ti[0];
1636  }
1637 
1638  n_left_from -= 1;
1639  ti += 1;
1640  b += 1;
1641  }
1642  n_enq =
1643  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1644  frame->n_vectors, 1);
1645 
1646  if (n_enq < frame->n_vectors)
1648  IP6_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
1649  frame->n_vectors - n_enq);
1650  return frame->n_vectors;
1651 }
1652 
1654  vlib_node_runtime_t * node,
1655  vlib_frame_t * frame)
1656 {
1657  return ip6_reassembly_handoff_inline (vm, node, frame,
1658  false /* is_feature */ );
1659 }
1660 
1661 /* *INDENT-OFF* */
1663  .name = "ip6-reassembly-handoff",
1664  .vector_size = sizeof (u32),
1665  .n_errors = ARRAY_LEN(ip6_reassembly_handoff_error_strings),
1666  .error_strings = ip6_reassembly_handoff_error_strings,
1667  .format_trace = format_ip6_reassembly_handoff_trace,
1668 
1669  .n_next_nodes = 1,
1670 
1671  .next_nodes = {
1672  [0] = "error-drop",
1673  },
1674 };
1675 
1676 
1678  vlib_node_runtime_t * node, vlib_frame_t * frame)
1679 {
1680  return ip6_reassembly_handoff_inline (vm, node, frame, true /* is_feature */ );
1681 }
1682 
1683 
1684 /* *INDENT-OFF* */
1686  .name = "ip6-reass-feature-hoff",
1687  .vector_size = sizeof (u32),
1688  .n_errors = ARRAY_LEN(ip6_reassembly_handoff_error_strings),
1689  .error_strings = ip6_reassembly_handoff_error_strings,
1690  .format_trace = format_ip6_reassembly_handoff_trace,
1691 
1692  .n_next_nodes = 1,
1693 
1694  .next_nodes = {
1695  [0] = "error-drop",
1696  },
1697 };
1698 /* *INDENT-ON* */
1699 
1700 /*
1701  * fd.io coding-style-patch-verification: ON
1702  *
1703  * Local Variables:
1704  * eval: (c-set-style "gnu")
1705  * End:
1706  */
#define IP6_REASS_TIMEOUT_DEFAULT_MS
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define foreach_ip6_reassembly_handoff_error
vnet_api_error_t
Definition: api_errno.h:154
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:870
#define clib_min(x, y)
Definition: clib.h:295
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:100
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:78
#define CLIB_UNUSED(x)
Definition: clib.h:82
static void ip6_reass_drop_all(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_reass_main_t *rm, ip6_reass_t *reass)
ip6_reass_next_t
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:673
vnet_main_t * vnet_main
void ip6_register_protocol(u32 protocol, u32 node_index)
Definition: ip6_forward.c:1447
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:865
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
#define pool_alloc(P, N)
Allocate N more free elements to pool (unspecified alignment).
Definition: pool.h:341
u64 as_u64
Definition: bihash_doc.h:63
IPv6 Reassembly.
#define IP6_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define NULL
Definition: clib.h:58
u32 index
Definition: node.h:279
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:258
static clib_error_t * ip6_reass_init_function(vlib_main_t *vm)
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1766
u32 thread_index
Definition: main.h:197
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:522
ip6_reass_trace_operation_e action
static void * ip6_ext_next_header(ip6_ext_header_t *ext_hdr)
Definition: ip6_packet.h:550
int i
static u32 format_get_indent(u8 *s)
Definition: format.h:72
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define VLIB_NODE_FN(node)
Definition: node.h:201
ip6_reass_event_t
ip6_reass_per_thread_t * per_thread_data
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:468
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:236
ip6_address_t src_address
Definition: ip6_packet.h:383
static void ip6_reass_trace_details(vlib_main_t *vm, u32 bi, ip6_reass_range_trace_t *trace)
unsigned char u8
Definition: types.h:56
#define vec_pop(V)
Returns last element of a vector and decrements its length.
Definition: vec.h:615
static u16 ip6_reass_buffer_get_data_len(vlib_buffer_t *b)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define IP6_REASS_HT_LOAD_FACTOR
double f64
Definition: types.h:142
static u32 ip6_reass_get_nbuckets()
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:493
vl_api_interface_index_t sw_if_index
Definition: gre.api:50
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:516
static void ip6_reass_free(ip6_reass_main_t *rm, ip6_reass_per_thread_t *rt, ip6_reass_t *reass)
#define always_inline
Definition: clib.h:98
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
struct vnet_buffer_opaque_t::@60::@62 ip
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
Definition: main.c:187
clib_bihash_48_8_t hash
static u8 * format_ip6_reass_range_trace(u8 *s, va_list *args)
unsigned int u32
Definition: types.h:88
static uword ip6_reassembly_handoff_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature)
#define vlib_call_init_function(vm, x)
Definition: init.h:270
#define VLIB_FRAME_SIZE
Definition: node.h:376
ip6_reass_main_t ip6_reass_main
void icmp6_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp6.c:446
static char * ip6_reassembly_handoff_error_strings[]
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:61
static char * ip6_reassembly_error_strings[]
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:604
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
ip6_reass_key_t key
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:934
u16 frame_flags
Definition: node.h:383
long ctx[MAX_CONNS]
Definition: main.c:144
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
Definition: main.c:196
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:286
#define PREDICT_FALSE(x)
Definition: clib.h:111
static bool ip6_reass_verify_packet_size_lt_64k(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
static u32 ip6_reass_buffer_get_data_offset(vlib_buffer_t *b)
static uword ip6_reassembly_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature, bool is_custom_app)
u32 node_index
Node index.
Definition: node.h:494
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
ip6_reass_range_trace_t trace_range
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
VNET_FEATURE_INIT(ip6_reassembly_feature, static)
ip6_reassembly_handoff_error_t
static vlib_cli_command_t show_ip6_reassembly_cmd
(constructor) VLIB_CLI_COMMAND (show_ip6_reassembly_cmd)
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
clib_error_t * ip_main_init(vlib_main_t *vm)
Definition: ip_init.c:45
ip6_reass_val_t v
u16 n_vectors
Definition: node.h:395
vlib_node_registration_t ip6_reass_node_feature
(constructor) VLIB_REGISTER_NODE (ip6_reass_node_feature)
format_function_t format_ip6_address
Definition: format.h:93
vlib_main_t * vm
Definition: buffer.c:312
static u8 * format_ip6_reassembly_handoff_trace(u8 *s, va_list *args)
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:295
#define clib_warning(format, args...)
Definition: error.h:59
static void ip6_reass_set_params(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
static void ip6_rehash_cb(clib_bihash_kv_48_8_t *kv, void *_ctx)
u8 * format_hexdump(u8 *s, va_list *va)
Definition: std-formats.c:297
#define ip6_frag_hdr_offset_bytes(hdr)
Definition: ip6_packet.h:601
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:458
vlib_node_t * vlib_get_node_by_name(vlib_main_t *vm, u8 *name)
Definition: node.c:45
u32 fq_index
Worker handoff.
ip6_reass_rc_t
static u8 ip6_ext_hdr(u8 nexthdr)
Definition: ip6_packet.h:525
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:155
vlib_node_registration_t ip6_reassembly_handoff_node
(constructor) VLIB_REGISTER_NODE (ip6_reassembly_handoff_node)
signed int i32
Definition: types.h:77
static bool ip6_reass_verify_fragment_multiple_8(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:598
clib_spinlock_t lock
static u8 * format_ip6_reass_key(u8 *s, va_list *args)
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
#define ASSERT(truth)
ip6_main_t ip6_main
Definition: ip6_forward.c:2671
vlib_node_registration_t ip6_reassembly_feature_handoff_node
(constructor) VLIB_REGISTER_NODE (ip6_reassembly_feature_handoff_node)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
static clib_error_t * show_ip6_reass(vlib_main_t *vm, unformat_input_t *input, CLIB_UNUSED(vlib_cli_command_t *lmd))
u16 min_fragment_length
#define VNET_FEATURES(...)
Definition: feature.h:435
vnet_api_error_t ip6_reass_set(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
set ip6 reassembly configuration
vlib_main_t * vlib_main
static void ip6_reass_insert_range_in_chain(vlib_main_t *vm, ip6_reass_main_t *rm, ip6_reass_per_thread_t *rt, ip6_reass_t *reass, u32 prev_range_bi, u32 new_next_bi)
static u8 * format_ip6_reass_trace(u8 *s, va_list *args)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
static u8 * format_ip6_reass(u8 *s, va_list *args)
#define vec_elt(v, i)
Get vector value at index i.
void clib_bihash_copied(void *dst, void *src)
clib_bihash_kv_48_8_t kv
static void ip6_reass_on_timeout(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_reass_main_t *rm, ip6_reass_t *reass, u32 *icmp_bi)
u16 payload_length
Definition: ip6_packet.h:374
vl_api_address_t ip
Definition: l2.api:489
#define MSEC_PER_SEC
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword ip6_reass_walk_expired(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void ip6_reass_add_trace(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_reass_main_t *rm, ip6_reass_t *reass, u32 bi, ip6_reass_trace_operation_e action, u32 size_diff)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
typedef key
Definition: ipsec.api:245
ip6_reass_trace_operation_e
#define VLIB_FRAME_TRACE
Definition: node.h:433
#define foreach_ip6_error
Definition: ip6_error.h:43
static ip6_reass_t * ip6_reass_find_or_create(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_reass_main_t *rm, ip6_reass_per_thread_t *rt, ip6_reass_kv_t *kv, u32 *icmp_bi, u8 *do_handoff)
#define IP6_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT
#define IP6_REASS_MAX_REASSEMBLIES_DEFAULT
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:487
vnet_api_error_t ip6_reass_get(u32 *timeout_ms, u32 *max_reassemblies, u32 *expire_walk_interval_ms)
get ip6 reassembly configuration
#define vnet_buffer(b)
Definition: buffer.h:361
clib_bihash_48_8_t * new_hash
static u32 vlib_num_workers()
Definition: threads.h:367
vlib_node_registration_t ip6_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_reass_node)
#define vec_foreach(var, vec)
Vector iterator.
vnet_api_error_t ip6_reass_enable_disable(u32 sw_if_index, u8 enable_disable)
ip6_reass_key_t k
#define ip6_ext_header_find_t(i, p, m, t)
Definition: ip6_packet.h:559
static ip6_reass_rc_t ip6_reass_update(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_reass_main_t *rm, ip6_reass_per_thread_t *rt, ip6_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, ip6_frag_hdr_t *frag_hdr, bool is_custom_app)
u16 flags
Copy of main node flags.
Definition: node.h:507
static bool ip6_reass_verify_upper_layer_present(vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
ip6_address_t dst
#define pool_foreach_index(i, v, body)
Iterate pool by index.
Definition: pool.h:538
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
ip6_address_t src
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:768
u32 * fib_index_by_sw_if_index
Definition: ip6.h:194
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static vlib_node_registration_t ip6_reass_expire_node
(constructor) VLIB_REGISTER_NODE (ip6_reass_expire_node)
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
static ip6_reass_rc_t ip6_reass_finalize(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_reass_main_t *rm, ip6_reass_per_thread_t *rt, ip6_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, bool is_custom_app)
Definition: defs.h:46
int vnet_feature_enable_disable(const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes)
Definition: feature.c:274
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
ip6_address_t dst_address
Definition: ip6_packet.h:383