FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
ip6_map_t.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include <vnet/ip/ip4_to_ip6.h>
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip_frag.h>
20 
21 typedef enum
22 {
29 
30 typedef enum
31 {
38 
39 typedef enum
40 {
47 
48 typedef enum
49 {
56 
58 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
59  map_domain_t * d, u16 port)
60 {
61  u32 *ignore = NULL;
63  map_ip4_reass_t *r =
66  frag_id_6to4 (frag->identification),
67  (ip6->protocol ==
68  IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : ip6->protocol,
69  &ignore);
70  if (r)
71  r->port = port;
72 
74  return !r;
75 }
76 
77 /* Returns the associated port or -1 */
79 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
80  map_domain_t * d)
81 {
82  u32 *ignore = NULL;
84  map_ip4_reass_t *r =
87  frag_id_6to4 (frag->identification),
88  (ip6->protocol ==
89  IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : ip6->protocol,
90  &ignore);
91  i32 ret = r ? r->port : -1;
93  return ret;
94 }
95 
96 typedef struct
97 {
101 
102 static int
104 {
105  icmp6_to_icmp_ctx_t *ctx = arg;
106  u32 ip4_sadr;
107 
108  // Security check
109  // Note that this prevents an intermediate IPv6 router from answering
110  // the request.
111  ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->ip6_src_len);
112  if (ip6->src_address.as_u64[0] !=
113  map_get_pfx_net (ctx->d, ip4_sadr, ctx->sender_port)
114  || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
115  ctx->sender_port))
116  return -1;
117 
118  ip4->dst_address.as_u32 =
120  ip4->src_address.as_u32 = ip4_sadr;
121 
122  return 0;
123 }
124 
125 static int
127  void *arg)
128 {
129  icmp6_to_icmp_ctx_t *ctx = arg;
130  u32 inner_ip4_dadr;
131 
132  //Security check of inner packet
133  inner_ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->ip6_src_len);
134  if (ip6->dst_address.as_u64[0] !=
135  map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->sender_port)
136  || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
137  inner_ip4_dadr,
138  ctx->sender_port))
139  return -1;
140 
141  ip4->dst_address.as_u32 = inner_ip4_dadr;
142  ip4->src_address.as_u32 =
144 
145  return 0;
146 }
147 
148 static uword
150  vlib_node_runtime_t * node, vlib_frame_t * frame)
151 {
152  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
153  vlib_node_runtime_t *error_node =
155  from = vlib_frame_vector_args (frame);
156  n_left_from = frame->n_vectors;
157  next_index = node->cached_next_index;
159  u32 thread_index = vm->thread_index;
160 
161  while (n_left_from > 0)
162  {
163  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
164 
165  while (n_left_from > 0 && n_left_to_next > 0)
166  {
167  u32 pi0;
168  vlib_buffer_t *p0;
169  u8 error0;
170  ip6_mapt_icmp_next_t next0;
171  map_domain_t *d0;
172  u16 len0;
173  icmp6_to_icmp_ctx_t ctx0;
174  ip6_header_t *ip60;
175 
176  pi0 = to_next[0] = from[0];
177  from += 1;
178  n_left_from -= 1;
179  to_next += 1;
180  n_left_to_next -= 1;
181  error0 = MAP_ERROR_NONE;
183 
184  p0 = vlib_get_buffer (vm, pi0);
185  ip60 = vlib_buffer_get_current (p0);
186  len0 = clib_net_to_host_u16 (ip60->payload_length);
187  d0 =
189  vnet_buffer (p0)->map_t.map_domain_index);
190  ctx0.sender_port = ip6_get_port (ip60, 0, p0->current_length);
191  ctx0.d = d0;
192  if (ctx0.sender_port == 0)
193  {
194  // In case of 1:1 mapping, we don't care about the port
195  if (!(d0->ea_bits_len == 0 && d0->rules))
196  {
197  error0 = MAP_ERROR_ICMP;
198  goto err0;
199  }
200  }
201 
202  if (icmp6_to_icmp
203  (p0, ip6_to_ip4_set_icmp_cb, &ctx0,
205  {
206  error0 = MAP_ERROR_ICMP;
207  goto err0;
208  }
209 
210  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
211  {
212  // Send to fragmentation node if necessary
213  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
214  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
216  }
217  else
218  {
219  next0 = ip6_map_ip4_lookup_bypass (p0, NULL) ?
221  }
222  err0:
223  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
224  {
226  thread_index,
227  vnet_buffer (p0)->
228  map_t.map_domain_index, 1,
229  len0);
230  }
231  else
232  {
233  next0 = IP6_MAPT_ICMP_NEXT_DROP;
234  }
235 
236  p0->error = error_node->errors[error0];
237  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
238  to_next, n_left_to_next, pi0,
239  next0);
240  }
241  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
242  }
243  return frame->n_vectors;
244 }
245 
246 /*
247  * Translate IPv6 fragmented packet to IPv4.
248  */
249 always_inline int
251 {
252  ip6_header_t *ip6;
253  ip6_frag_hdr_t *frag;
254  ip4_header_t *ip4;
255  u16 frag_id;
256  u8 frag_more;
257  u16 frag_offset;
258  u8 l4_protocol;
259  u16 l4_offset;
260 
261  ip6 = vlib_buffer_get_current (p);
262 
263  if (ip6_parse
264  (ip6, p->current_length, &l4_protocol, &l4_offset, &frag_offset))
265  return -1;
266 
267  frag = (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_offset);
268  ip4 = (ip4_header_t *) u8_ptr_add (ip6, l4_offset - sizeof (*ip4));
269  vlib_buffer_advance (p, l4_offset - sizeof (*ip4));
270 
271  frag_id = frag_id_6to4 (frag->identification);
272  frag_more = ip6_frag_hdr_more (frag);
273  frag_offset = ip6_frag_hdr_offset (frag);
274 
275  ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
276  ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
277 
280  ip4->tos = ip6_translate_tos (ip6);
281  ip4->length =
283  sizeof (*ip4) - l4_offset + sizeof (*ip6));
284  ip4->fragment_id = frag_id;
286  clib_host_to_net_u16 (frag_offset |
287  (frag_more ? IP4_HEADER_FLAG_MORE_FRAGMENTS : 0));
288  ip4->ttl = ip6->hop_limit;
289  ip4->protocol =
290  (l4_protocol == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : l4_protocol;
291  ip4->checksum = ip4_header_checksum (ip4);
292 
293  return 0;
294 }
295 
296 static uword
298  vlib_node_runtime_t * node, vlib_frame_t * frame)
299 {
300  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
301  from = vlib_frame_vector_args (frame);
302  n_left_from = frame->n_vectors;
303  next_index = node->cached_next_index;
304  vlib_node_runtime_t *error_node =
306 
307  while (n_left_from > 0)
308  {
309  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
310 
311  while (n_left_from > 0 && n_left_to_next > 0)
312  {
313  u32 pi0;
314  vlib_buffer_t *p0;
315  u32 next0;
316 
317  pi0 = to_next[0] = from[0];
318  from += 1;
319  n_left_from -= 1;
320  to_next += 1;
321  n_left_to_next -= 1;
323  p0 = vlib_get_buffer (vm, pi0);
324 
325  if (map_ip6_to_ip4_fragmented (p0))
326  {
327  p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
329  }
330  else
331  {
332  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
333  {
334  // Send to fragmentation node if necessary
335  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
336  vnet_buffer (p0)->ip_frag.next_index =
339  }
340  else
341  {
342  next0 = ip6_map_ip4_lookup_bypass (p0, NULL) ?
344  }
345  }
346 
347  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
348  to_next, n_left_to_next, pi0,
349  next0);
350  }
351  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
352  }
353  return frame->n_vectors;
354 }
355 
356 /*
357  * Translate IPv6 UDP/TCP packet to IPv4.
358  * Returns 0 on success.
359  * Returns a non-zero error code on error.
360  */
361 always_inline int
363 {
364  map_main_t *mm = &map_main;
365  ip6_header_t *ip6;
366  u16 *checksum;
367  ip_csum_t csum = 0;
368  ip4_header_t *ip4;
369  u16 fragment_id;
370  u16 flags;
371  u16 frag_offset;
372  u8 l4_protocol;
373  u16 l4_offset;
374  ip6_address_t old_src, old_dst;
375 
376  ip6 = vlib_buffer_get_current (p);
377 
378  if (ip6_parse
379  (ip6, p->current_length, &l4_protocol, &l4_offset, &frag_offset))
380  return -1;
381 
382  if (l4_protocol == IP_PROTOCOL_TCP)
383  {
384  tcp_header_t *tcp = (tcp_header_t *) u8_ptr_add (ip6, l4_offset);
385  if (mm->tcp_mss > 0)
386  {
387  csum = tcp->checksum;
388  map_mss_clamping (tcp, &csum, mm->tcp_mss);
389  tcp->checksum = ip_csum_fold (csum);
390  }
391  checksum = &tcp->checksum;
392  }
393  else
394  {
395  udp_header_t *udp = (udp_header_t *) u8_ptr_add (ip6, l4_offset);
396  checksum = &udp->checksum;
397  }
398 
399  old_src.as_u64[0] = ip6->src_address.as_u64[0];
400  old_src.as_u64[1] = ip6->src_address.as_u64[1];
401  old_dst.as_u64[0] = ip6->dst_address.as_u64[0];
402  old_dst.as_u64[1] = ip6->dst_address.as_u64[1];
403 
404  ip4 = (ip4_header_t *) u8_ptr_add (ip6, l4_offset - sizeof (*ip4));
405 
406  vlib_buffer_advance (p, l4_offset - sizeof (*ip4));
407 
408  if (PREDICT_FALSE (frag_offset))
409  {
410  // Only the first fragment
411  ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_offset);
412  fragment_id = frag_id_6to4 (hdr->identification);
413  flags = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
414  }
415  else
416  {
417  fragment_id = 0;
418  flags = 0;
419  }
420 
421  ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
422  ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
423 
424  /*
425  * Drop spoofed packets that from a known domain source.
426  */
427  u32 map_domain_index = -1;
428  u8 error = 0;
429 
430  ip4_map_get_domain (&ip4->src_address, &map_domain_index, &error);
431  if (error)
432  return error;
433 
436  ip4->tos = ip6_translate_tos (ip6);
437  ip4->length =
439  sizeof (*ip4) + sizeof (*ip6) - l4_offset);
440  ip4->fragment_id = fragment_id;
442  ip4->ttl = ip6->hop_limit;
443  ip4->protocol = l4_protocol;
444  ip4->checksum = ip4_header_checksum (ip4);
445 
446  // UDP checksum is optional over IPv4
447  if (!udp_checksum && l4_protocol == IP_PROTOCOL_UDP)
448  {
449  *checksum = 0;
450  }
451  else
452  {
453  csum = ip_csum_sub_even (*checksum, old_src.as_u64[0]);
454  csum = ip_csum_sub_even (csum, old_src.as_u64[1]);
455  csum = ip_csum_sub_even (csum, old_dst.as_u64[0]);
456  csum = ip_csum_sub_even (csum, old_dst.as_u64[1]);
457  csum = ip_csum_add_even (csum, ip4->dst_address.as_u32);
458  csum = ip_csum_add_even (csum, ip4->src_address.as_u32);
459  *checksum = ip_csum_fold (csum);
460  }
461 
462  return 0;
463 }
464 
465 static uword
467  vlib_node_runtime_t * node, vlib_frame_t * frame)
468 {
469  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
470  vlib_node_runtime_t *error_node =
472 
473  from = vlib_frame_vector_args (frame);
474  n_left_from = frame->n_vectors;
475  next_index = node->cached_next_index;
476  while (n_left_from > 0)
477  {
478  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
479 
480  while (n_left_from > 0 && n_left_to_next > 0)
481  {
482  u32 pi0;
483  vlib_buffer_t *p0;
485 
486  pi0 = to_next[0] = from[0];
487  from += 1;
488  n_left_from -= 1;
489  to_next += 1;
490  n_left_to_next -= 1;
492 
493  p0 = vlib_get_buffer (vm, pi0);
494 
495  if (map_ip6_to_ip4_tcp_udp (p0, true))
496  {
497  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
499  }
500  else
501  {
502  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
503  {
504  // Send to fragmentation node if necessary
505  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
506  vnet_buffer (p0)->ip_frag.next_index =
509  }
510  else
511  {
512  next0 = ip6_map_ip4_lookup_bypass (p0, NULL) ?
514  }
515  }
516 
517  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
518  to_next, n_left_to_next, pi0,
519  next0);
520  }
521  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
522  }
523  return frame->n_vectors;
524 }
525 
526 static uword
528 {
529  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
530  vlib_node_runtime_t *error_node =
533  u32 thread_index = vm->thread_index;
534 
535  from = vlib_frame_vector_args (frame);
536  n_left_from = frame->n_vectors;
537  next_index = node->cached_next_index;
538  while (n_left_from > 0)
539  {
540  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
541 
542  while (n_left_from > 0 && n_left_to_next > 0)
543  {
544  u32 pi0;
545  vlib_buffer_t *p0;
546  ip6_header_t *ip60;
547  u8 error0;
548  u32 l4_len0;
549  i32 map_port0;
550  map_domain_t *d0;
551  ip6_frag_hdr_t *frag0;
552  ip6_mapt_next_t next0 = 0;
553  u32 saddr;
554 
555  pi0 = to_next[0] = from[0];
556  from += 1;
557  n_left_from -= 1;
558  to_next += 1;
559  n_left_to_next -= 1;
560  error0 = MAP_ERROR_NONE;
561 
562  p0 = vlib_get_buffer (vm, pi0);
563  ip60 = vlib_buffer_get_current (p0);
564 
565  d0 =
567  &vnet_buffer (p0)->map_t.map_domain_index,
568  &error0);
569  if (!d0)
570  { /* Guess it wasn't for us */
571  vnet_feature_next (&next0, p0);
572  goto exit;
573  }
574 
575  saddr = map_get_ip4 (&ip60->src_address, d0->ip6_src_len);
576  vnet_buffer (p0)->map_t.v6.saddr = saddr;
577  vnet_buffer (p0)->map_t.v6.daddr =
579  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
580 
581  if (PREDICT_FALSE
582  (ip6_parse (ip60, p0->current_length,
583  &(vnet_buffer (p0)->map_t.v6.l4_protocol),
584  &(vnet_buffer (p0)->map_t.v6.l4_offset),
585  &(vnet_buffer (p0)->map_t.v6.frag_offset))))
586  {
587  error0 =
588  error0 == MAP_ERROR_NONE ? MAP_ERROR_MALFORMED : error0;
589  }
590 
591  map_port0 = -1;
592  l4_len0 =
593  (u32) clib_net_to_host_u16 (ip60->payload_length) +
594  sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
595  frag0 =
596  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
597  vnet_buffer (p0)->map_t.v6.
598  frag_offset);
599 
600  if (PREDICT_FALSE
601  (vnet_buffer (p0)->map_t.v6.frag_offset
602  && ip6_frag_hdr_offset (frag0)))
603  {
604  map_port0 = ip6_map_fragment_get (ip60, frag0, d0);
605  if (map_port0 == -1)
606  error0 =
607  error0 ==
608  MAP_ERROR_NONE ? MAP_ERROR_FRAGMENT_MEMORY : error0;
609  else
611  }
612  else
613  if (PREDICT_TRUE
614  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
615  {
616  error0 =
617  l4_len0 <
618  sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
619  vnet_buffer (p0)->map_t.checksum_offset =
620  vnet_buffer (p0)->map_t.v6.l4_offset + 16;
622  map_port0 =
623  (i32) *
624  ((u16 *)
625  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
626  }
627  else
628  if (PREDICT_TRUE
629  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
630  {
631  error0 =
632  l4_len0 <
633  sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
634  vnet_buffer (p0)->map_t.checksum_offset =
635  vnet_buffer (p0)->map_t.v6.l4_offset + 6;
637  map_port0 =
638  (i32) *
639  ((u16 *)
640  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
641  }
642  else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
643  IP_PROTOCOL_ICMP6)
644  {
645  error0 =
646  l4_len0 <
647  sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
648  next0 = IP6_MAPT_NEXT_MAPT_ICMP;
649  if (((icmp46_header_t *)
650  u8_ptr_add (ip60,
651  vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
652  ICMP6_echo_reply
653  || ((icmp46_header_t *)
654  u8_ptr_add (ip60,
655  vnet_buffer (p0)->map_t.v6.l4_offset))->
656  code == ICMP6_echo_request)
657  map_port0 =
658  (i32) *
659  ((u16 *)
660  u8_ptr_add (ip60,
661  vnet_buffer (p0)->map_t.v6.l4_offset + 6));
662  }
663  else
664  {
665  // TODO: In case of 1:1 mapping, it might be possible to
666  // do something with those packets.
667  error0 = MAP_ERROR_BAD_PROTOCOL;
668  }
669 
670  if (PREDICT_FALSE (map_port0 != -1) &&
671  (ip60->src_address.as_u64[0] !=
672  map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
673  map_port0)
674  || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
676  (p0)->map_t.
677  v6.saddr,
678  map_port0)))
679  {
680  // Security check when map_port0 is not zero (non-first
681  // fragment, UDP or TCP)
682  error0 =
683  error0 == MAP_ERROR_NONE ? MAP_ERROR_SEC_CHECK : error0;
684  }
685 
686  // Fragmented first packet needs to be cached for following packets
687  if (PREDICT_FALSE
688  (vnet_buffer (p0)->map_t.v6.frag_offset
689  && !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
690  u8_ptr_add (ip60,
691  vnet_buffer (p0)->map_t.
692  v6.frag_offset)))
693  && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
694  && (error0 == MAP_ERROR_NONE))
695  {
697  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
699  (p0)->
700  map_t.v6.
701  frag_offset),
702  d0, map_port0);
703  }
704 
705  if (PREDICT_TRUE
706  (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
707  {
709  thread_index,
710  vnet_buffer (p0)->map_t.
711  map_domain_index, 1,
712  clib_net_to_host_u16 (ip60->
713  payload_length));
714  }
715 
716  next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
717  p0->error = error_node->errors[error0];
718  exit:
719  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
720  to_next, n_left_to_next, pi0,
721  next0);
722  }
723  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
724  }
725  return frame->n_vectors;
726 }
727 
728 static char *map_t_error_strings[] = {
729 #define _(sym, string) string,
731 #undef _
732 };
733 
734 /* *INDENT-OFF* */
736  .function = ip6_map_t_fragmented,
737  .name = "ip6-map-t-fragmented",
738  .vector_size = sizeof (u32),
739  .format_trace = format_map_trace,
741 
742  .n_errors = MAP_N_ERROR,
743  .error_strings = map_t_error_strings,
744 
745  .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
746  .next_nodes =
747  {
748  [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
749  [IP6_MAPT_FRAGMENTED_NEXT_IP4_REWRITE] = "ip4-load-balance",
751  [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
752  },
753 };
754 /* *INDENT-ON* */
755 
756 /* *INDENT-OFF* */
758  .function = ip6_map_t_icmp,
759  .name = "ip6-map-t-icmp",
760  .vector_size = sizeof (u32),
761  .format_trace = format_map_trace,
763 
764  .n_errors = MAP_N_ERROR,
765  .error_strings = map_t_error_strings,
766 
767  .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
768  .next_nodes =
769  {
770  [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
771  [IP6_MAPT_ICMP_NEXT_IP4_REWRITE] = "ip4-load-balance",
773  [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
774  },
775 };
776 /* *INDENT-ON* */
777 
778 /* *INDENT-OFF* */
780  .function = ip6_map_t_tcp_udp,
781  .name = "ip6-map-t-tcp-udp",
782  .vector_size = sizeof (u32),
783  .format_trace = format_map_trace,
785 
786  .n_errors = MAP_N_ERROR,
787  .error_strings = map_t_error_strings,
788 
789  .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
790  .next_nodes =
791  {
792  [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
793  [IP6_MAPT_TCP_UDP_NEXT_IP4_REWRITE] = "ip4-load-balance",
795  [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
796  },
797 };
798 /* *INDENT-ON* */
799 
800 /* *INDENT-OFF* */
801 VNET_FEATURE_INIT(ip4_map_t_feature, static) = {
802  .arc_name = "ip6-unicast",
803  .node_name = "ip6-map-t",
804  .runs_before = VNET_FEATURES("ip6-flow-classify"),
805 };
806 
808  .function = ip6_map_t,
809  .name = "ip6-map-t",
810  .vector_size = sizeof(u32),
811  .format_trace = format_map_trace,
813 
814  .n_errors = MAP_N_ERROR,
815  .error_strings = map_t_error_strings,
816 
817  .n_next_nodes = IP6_MAPT_N_NEXT,
818  .next_nodes =
819  {
820  [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
821  [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
822  [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
823  [IP6_MAPT_NEXT_DROP] = "error-drop",
824  },
825 };
826 /* *INDENT-ON* */
827 
828 /*
829  * fd.io coding-style-patch-verification: ON
830  *
831  * Local Variables:
832  * eval: (c-set-style "gnu")
833  * End:
834  */
#define map_ip4_reass_lock()
Definition: map.h:505
map_domain_t * d
Definition: ip6_map_t.c:98
static_always_inline u8 ip6_translate_tos(const ip6_header_t *ip6)
Translate TOS value from IPv6 to IPv4.
Definition: ip6_to_ip4.h:231
u32 flags
Definition: vhost_user.h:141
static uword ip6_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:466
static u16 ip6_get_port(ip6_header_t *ip6, u8 sender, u16 buffer_len)
Get TCP/UDP port number or ICMP id from IPv6 packet.
Definition: ip6_to_ip4.h:90
map_main_t map_main
Definition: map.c:27
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
u16 udp_checksum(udp_header_t *uh, u32 udp_len, void *ih, u8 version)
Definition: packets.c:114
ip6_mapt_next_t
Definition: ip6_map_t.c:21
ip4_address_t src_address
Definition: ip4_packet.h:170
static int icmp6_to_icmp(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx, ip6_to_ip4_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP6 packet to ICMP4.
Definition: ip6_to_ip4.h:249
#define PREDICT_TRUE(x)
Definition: clib.h:113
u64 as_u64[2]
Definition: ip6_packet.h:51
static_always_inline map_domain_t * ip6_map_get_domain(ip6_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:484
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static uword ip6_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:527
static_always_inline map_domain_t * ip4_map_get_domain(ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:465
uword ip_csum_t
Definition: ip_packet.h:219
static int ip6_to_ip4_set_icmp_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: ip6_map_t.c:103
u16 flags_and_fragment_offset
Definition: ip4_packet.h:151
static_always_inline bool ip6_map_ip4_lookup_bypass(vlib_buffer_t *p0, ip4_header_t *ip)
Definition: map.h:691
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:383
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
#define u8_ptr_add(ptr, index)
Definition: ip_types.h:34
static_always_inline u32 ip6_map_t_embedded_address(map_domain_t *d, ip6_address_t *addr)
Definition: map.h:576
#define static_always_inline
Definition: clib.h:100
static_always_inline u32 map_get_ip4(ip6_address_t *addr, u16 prefix_len)
Definition: map.h:455
#define always_inline
Definition: clib.h:99
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_combined_counter_main_t * domain_counters
Definition: map.h:270
static int map_ip6_to_ip4_tcp_udp(vlib_buffer_t *p, bool udp_checksum)
Definition: ip6_map_t.c:362
ip6_address_t * rules
Definition: map.h:110
static_always_inline int ip6_parse(const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Parse some useful information from IPv6 header.
Definition: ip6_to_ip4.h:59
u8 ea_bits_len
Definition: map.h:118
unsigned int u32
Definition: types.h:88
static_always_inline i32 ip6_map_fragment_get(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d)
Definition: ip6_map_t.c:79
#define frag_id_6to4(id)
Definition: ip6_to_ip4.h:45
vl_api_fib_path_type_t type
Definition: fib_types.api:123
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:648
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
vnet_crypto_main_t * cm
Definition: quic_crypto.c:41
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
#define IP4_FRAG_NODE_NAME
Definition: ip_frag.h:44
long ctx[MAX_CONNS]
Definition: main.c:144
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:416
unsigned short u16
Definition: types.h:57
map_domain_t * domains
Definition: map.h:265
vlib_node_registration_t ip6_map_t_tcp_udp_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_tcp_udp_node)
Definition: ip6_map_t.c:779
static_always_inline int ip6_map_fragment_cache(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d, u16 port)
Definition: ip6_map_t.c:58
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:448
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1443
#define PREDICT_FALSE(x)
Definition: clib.h:112
u16 port
Definition: punt.api:40
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
u8 ip6_src_len
Definition: map.h:117
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
vlib_node_registration_t ip6_map_t_fragmented_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_fragmented_node)
Definition: ip6_map_t.c:735
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:152
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
ip6_mapt_tcp_udp_next_t
Definition: ip6_map_t.c:39
u16 n_vectors
Definition: node.h:397
static uword ip6_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:149
vlib_main_t * vm
Definition: buffer.c:323
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:302
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
#define foreach_map_error
Definition: map.h:350
ip6_mapt_icmp_next_t
Definition: ip6_map_t.c:30
vlib_node_registration_t ip6_map_t_icmp_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_icmp_node)
Definition: ip6_map_t.c:757
static int ip6_to_ip4_set_inner_icmp_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: ip6_map_t.c:126
signed int i32
Definition: types.h:77
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:642
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
ip_dscp_t tos
Definition: ip4_packet.h:141
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:247
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
#define map_ip4_reass_unlock()
Definition: map.h:506
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1363
IPv6 to IPv4 translation.
#define VNET_FEATURES(...)
Definition: feature.h:442
static_always_inline void map_mss_clamping(tcp_header_t *tcp, ip_csum_t *sum, u16 mss_clamping)
Definition: map.h:628
u16 mtu
Definition: map.h:114
static uword ip6_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:297
u16 payload_length
Definition: ip6_packet.h:374
static int map_ip6_to_ip4_fragmented(vlib_buffer_t *p)
Definition: ip6_map_t.c:250
i32 port
Definition: map.h:170
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
u16 tcp_mss
Definition: map.h:284
A collection of combined counters.
Definition: counter.h:188
static char * map_t_error_strings[]
Definition: ip6_map_t.c:728
vlib_node_registration_t ip6_map_t_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_node)
Definition: ip6_map_t.c:807
#define vnet_buffer(b)
Definition: buffer.h:365
VNET_FEATURE_INIT(ip4_map_t_feature, static)
#define IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS
Definition: ip4_packet.h:194
#define u16_net_add(u, val)
Definition: ip_types.h:35
u8 ip_version_and_header_length
Definition: ip4_packet.h:138
ip6_mapt_fragmented_next_t
Definition: ip6_map_t.c:48
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:275
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:230
ip6_address_t dst_address
Definition: ip6_packet.h:383