FD.io VPP  v20.05-21-gb1500e9ff
Vector Packet Processing
nat64_in2out.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT64 IPv6 to IPv4 translation (inside to outside network)
18  */
19 
20 #include <nat/nat64.h>
21 #include <nat/nat_inlines.h>
22 #include <vnet/ip/ip6_to_ip4.h>
23 #include <vnet/fib/fib_table.h>
24 
25 typedef struct
26 {
31 
32 static u8 *
33 format_nat64_in2out_trace (u8 * s, va_list * args)
34 {
35  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
36  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
37  nat64_in2out_trace_t *t = va_arg (*args, nat64_in2out_trace_t *);
38  char *tag;
39 
40  tag = t->is_slow_path ? "NAT64-in2out-slowpath" : "NAT64-in2out";
41 
42  s =
43  format (s, "%s: sw_if_index %d, next index %d", tag, t->sw_if_index,
44  t->next_index);
45 
46  return s;
47 }
48 
49 #define foreach_nat64_in2out_error \
50 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \
51 _(IN2OUT_PACKETS, "good in2out packets processed") \
52 _(NO_TRANSLATION, "no translation") \
53 _(UNKNOWN, "unknown") \
54 _(DROP_FRAGMENT, "drop fragment") \
55 _(TCP_PACKETS, "TCP packets") \
56 _(UDP_PACKETS, "UDP packets") \
57 _(ICMP_PACKETS, "ICMP packets") \
58 _(OTHER_PACKETS, "other protocol packets") \
59 _(FRAGMENTS, "fragments") \
60 _(CACHED_FRAGMENTS, "cached fragments") \
61 _(PROCESSED_FRAGMENTS, "processed fragments")
62 
63 
64 typedef enum
65 {
66 #define _(sym,str) NAT64_IN2OUT_ERROR_##sym,
68 #undef _
71 
72 static char *nat64_in2out_error_strings[] = {
73 #define _(sym,string) string,
75 #undef _
76 };
77 
78 typedef enum
79 {
86 
88 {
93 
94 static inline u8
95 nat64_not_translate (u32 sw_if_index, ip6_address_t ip6_addr)
96 {
97  ip6_address_t *addr;
98  ip6_main_t *im6 = &ip6_main;
99  ip_lookup_main_t *lm6 = &im6->lookup_main;
100  ip_interface_address_t *ia = 0;
101 
102  /* *INDENT-OFF* */
103  foreach_ip_interface_address (lm6, ia, sw_if_index, 0,
104  ({
105  addr = ip_interface_address_get_address (lm6, ia);
106  if (0 == ip6_address_compare (addr, &ip6_addr))
107  return 1;
108  }));
109  /* *INDENT-ON* */
110 
111  return 0;
112 }
113 
114 /**
115  * @brief Check whether is a hairpinning.
116  *
117  * If the destination IP address of the packet is an IPv4 address assigned to
118  * the NAT64 itself, then the packet is a hairpin packet.
119  *
120  * param dst_addr Destination address of the packet.
121  *
122  * @returns 1 if hairpinning, otherwise 0.
123  */
125 is_hairpinning (ip6_address_t * dst_addr)
126 {
127  nat64_main_t *nm = &nat64_main;
128  int i;
129 
130  for (i = 0; i < vec_len (nm->addr_pool); i++)
131  {
132  if (nm->addr_pool[i].addr.as_u32 == dst_addr->as_u32[3])
133  return 1;
134  }
135 
136  return 0;
137 }
138 
139 static int
141  u16 frag_hdr_offset, nat64_in2out_set_ctx_t * ctx)
142 {
143  ip6_header_t *ip6;
144  ip_csum_t csum = 0;
145  ip4_header_t *ip4;
146  u16 fragment_id;
147  u8 frag_more;
148  u16 frag_offset;
149  nat64_main_t *nm = &nat64_main;
150  nat64_db_bib_entry_t *bibe;
151  nat64_db_st_entry_t *ste;
152  ip46_address_t old_saddr, old_daddr;
153  ip4_address_t new_daddr;
154  u32 sw_if_index, fib_index;
155  u8 proto = vnet_buffer (p)->ip.reass.ip_proto;
156  u16 sport = vnet_buffer (p)->ip.reass.l4_src_port;
157  u16 dport = vnet_buffer (p)->ip.reass.l4_dst_port;
158  nat64_db_t *db = &nm->db[ctx->thread_index];
159 
160  ip6 = vlib_buffer_get_current (p);
161 
162  vlib_buffer_advance (p, l4_offset - sizeof (*ip4));
163  ip4 = vlib_buffer_get_current (p);
164 
165  u32 ip_version_traffic_class_and_flow_label =
167  u16 payload_length = ip6->payload_length;
168  u8 hop_limit = ip6->hop_limit;
169 
170  old_saddr.as_u64[0] = ip6->src_address.as_u64[0];
171  old_saddr.as_u64[1] = ip6->src_address.as_u64[1];
172  old_daddr.as_u64[0] = ip6->dst_address.as_u64[0];
173  old_daddr.as_u64[1] = ip6->dst_address.as_u64[1];
174 
175  if (PREDICT_FALSE (frag_hdr_offset))
176  {
177  //Only the first fragment
178  ip6_frag_hdr_t *hdr =
179  (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_hdr_offset);
180  fragment_id = frag_id_6to4 (hdr->identification);
181  frag_more = ip6_frag_hdr_more (hdr);
182  frag_offset = ip6_frag_hdr_offset (hdr);
183  }
184  else
185  {
186  fragment_id = 0;
187  frag_offset = 0;
188  frag_more = 0;
189  }
190 
193  ip4->tos = ip6_translate_tos (ip_version_traffic_class_and_flow_label);
194  ip4->length =
195  u16_net_add (payload_length, sizeof (*ip4) + sizeof (*ip6) - l4_offset);
196  ip4->fragment_id = fragment_id;
198  clib_host_to_net_u16 (frag_offset |
199  (frag_more ? IP4_HEADER_FLAG_MORE_FRAGMENTS : 0));
200  ip4->ttl = hop_limit;
201  ip4->protocol = (proto == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : proto;
202 
203  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
204  fib_index =
206 
207  ste =
208  nat64_db_st_entry_find (db, &old_saddr, &old_daddr, sport, dport, proto,
209  fib_index, 1);
210 
211  if (ste)
212  {
213  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
214  if (!bibe)
215  return -1;
216  }
217  else
218  {
219  bibe =
220  nat64_db_bib_entry_find (db, &old_saddr, sport, proto, fib_index, 1);
221 
222  if (!bibe)
223  {
224  u16 out_port;
225  ip4_address_t out_addr;
227  (fib_index, ip_proto_to_nat_proto (proto), &out_addr,
228  &out_port, ctx->thread_index))
229  return -1;
230 
231  bibe =
233  &old_saddr.ip6, &out_addr, sport,
234  out_port, fib_index, proto, 0);
235  if (!bibe)
236  return -1;
237 
239  db->bib.bib_entries_num);
240  }
241 
242  nat64_extract_ip4 (&old_daddr.ip6, &new_daddr, fib_index);
243  ste =
244  nat64_db_st_entry_create (ctx->thread_index, db, bibe,
245  &old_daddr.ip6, &new_daddr, dport);
246  if (!ste)
247  return -1;
248 
250  db->st.st_entries_num);
251  }
252 
253  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
254  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
255 
256  ip4->checksum = ip4_header_checksum (ip4);
257 
258  if (!vnet_buffer (p)->ip.reass.is_non_first_fragment)
259  {
260  udp_header_t *udp = (udp_header_t *) (ip4 + 1);
261  udp->src_port = bibe->out_port;
262 
263  //UDP checksum is optional over IPv4
264  if (proto == IP_PROTOCOL_UDP)
265  {
266  udp->checksum = 0;
267  }
268  else
269  {
270  tcp_header_t *tcp = (tcp_header_t *) (ip4 + 1);
271  csum = ip_csum_sub_even (tcp->checksum, old_saddr.as_u64[0]);
272  csum = ip_csum_sub_even (csum, old_saddr.as_u64[1]);
273  csum = ip_csum_sub_even (csum, old_daddr.as_u64[0]);
274  csum = ip_csum_sub_even (csum, old_daddr.as_u64[1]);
275  csum = ip_csum_add_even (csum, ip4->dst_address.as_u32);
276  csum = ip_csum_add_even (csum, ip4->src_address.as_u32);
277  csum = ip_csum_sub_even (csum, sport);
278  csum = ip_csum_add_even (csum, udp->src_port);
279  mss_clamping (nm->sm, tcp, &csum);
280  tcp->checksum = ip_csum_fold (csum);
281 
282  nat64_tcp_session_set_state (ste, tcp, 1);
283  }
284  }
285 
286  nat64_session_reset_timeout (ste, ctx->vm);
287 
288  return 0;
289 }
290 
291 static int
293 {
294  nat64_main_t *nm = &nat64_main;
296  nat64_db_bib_entry_t *bibe;
297  nat64_db_st_entry_t *ste;
298  ip46_address_t saddr, daddr;
299  u32 sw_if_index, fib_index;
300  icmp46_header_t *icmp = ip6_next_header (ip6);
301  nat64_db_t *db = &nm->db[ctx->thread_index];
302 
303  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
304  fib_index =
306 
307  saddr.as_u64[0] = ip6->src_address.as_u64[0];
308  saddr.as_u64[1] = ip6->src_address.as_u64[1];
309  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
310  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
311 
312  if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
313  {
314  u16 in_id = ((u16 *) (icmp))[2];
315  ste =
316  nat64_db_st_entry_find (db, &saddr, &daddr, in_id, 0,
317  IP_PROTOCOL_ICMP, fib_index, 1);
318 
319  if (ste)
320  {
321  bibe =
322  nat64_db_bib_entry_by_index (db, IP_PROTOCOL_ICMP,
323  ste->bibe_index);
324  if (!bibe)
325  return -1;
326  }
327  else
328  {
329  bibe =
330  nat64_db_bib_entry_find (db, &saddr, in_id,
331  IP_PROTOCOL_ICMP, fib_index, 1);
332 
333  if (!bibe)
334  {
335  u16 out_id;
336  ip4_address_t out_addr;
338  (fib_index, NAT_PROTOCOL_ICMP, &out_addr, &out_id,
339  ctx->thread_index))
340  return -1;
341 
342  bibe =
344  &ip6->src_address, &out_addr,
345  in_id, out_id, fib_index,
346  IP_PROTOCOL_ICMP, 0);
347  if (!bibe)
348  return -1;
349 
351  db->bib.bib_entries_num);
352  }
353 
354  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
355  ste =
356  nat64_db_st_entry_create (ctx->thread_index, db, bibe,
357  &ip6->dst_address, &daddr.ip4, 0);
358  if (!ste)
359  return -1;
360 
362  db->st.st_entries_num);
363  }
364 
365  nat64_session_reset_timeout (ste, ctx->vm);
366 
367  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
368  ((u16 *) (icmp))[2] = bibe->out_port;
369 
370  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
371  }
372  else
373  {
374  if (!vec_len (nm->addr_pool))
375  return -1;
376 
377  ip4->src_address.as_u32 = nm->addr_pool[0].addr.as_u32;
378  nat64_extract_ip4 (&ip6->dst_address, &ip4->dst_address, fib_index);
379  }
380 
381  return 0;
382 }
383 
384 static int
386  void *arg)
387 {
388  nat64_main_t *nm = &nat64_main;
390  nat64_db_st_entry_t *ste;
391  nat64_db_bib_entry_t *bibe;
392  ip46_address_t saddr, daddr;
393  u32 sw_if_index, fib_index;
394  u8 proto = ip6->protocol;
395  nat64_db_t *db = &nm->db[ctx->thread_index];
396 
397  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
398  fib_index =
400 
401  saddr.as_u64[0] = ip6->src_address.as_u64[0];
402  saddr.as_u64[1] = ip6->src_address.as_u64[1];
403  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
404  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
405 
406  if (proto == IP_PROTOCOL_ICMP6)
407  {
408  icmp46_header_t *icmp = ip6_next_header (ip6);
409  u16 in_id = ((u16 *) (icmp))[2];
410  proto = IP_PROTOCOL_ICMP;
411 
412  if (!
413  (icmp->type == ICMP4_echo_request
414  || icmp->type == ICMP4_echo_reply))
415  return -1;
416 
417  ste =
418  nat64_db_st_entry_find (db, &daddr, &saddr, in_id, 0, proto,
419  fib_index, 1);
420  if (!ste)
421  return -1;
422 
423  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
424  if (!bibe)
425  return -1;
426 
427  ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
428  ((u16 *) (icmp))[2] = bibe->out_port;
429  ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
430  }
431  else
432  {
433  udp_header_t *udp = ip6_next_header (ip6);
434  tcp_header_t *tcp = ip6_next_header (ip6);
435  u16 *checksum;
436  ip_csum_t csum;
437 
438  u16 sport = udp->src_port;
439  u16 dport = udp->dst_port;
440 
441  ste =
442  nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
443  fib_index, 1);
444  if (!ste)
445  return -1;
446 
447  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
448  if (!bibe)
449  return -1;
450 
451  ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
452  udp->dst_port = bibe->out_port;
453  ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
454 
455  if (proto == IP_PROTOCOL_TCP)
456  checksum = &tcp->checksum;
457  else
458  checksum = &udp->checksum;
459  csum = ip_csum_sub_even (*checksum, dport);
460  csum = ip_csum_add_even (csum, udp->dst_port);
461  *checksum = ip_csum_fold (csum);
462  }
463 
464  return 0;
465 }
466 
468 {
469  ip6_address_t src_addr;
470  ip6_address_t dst_addr;
476 
477 static int
478 unk_proto_st_walk (nat64_db_st_entry_t * ste, void *arg)
479 {
480  nat64_main_t *nm = &nat64_main;
482  nat64_db_bib_entry_t *bibe;
483  ip46_address_t saddr, daddr;
484  nat64_db_t *db = &nm->db[ctx->thread_index];
485 
486  if (ip6_address_is_equal (&ste->in_r_addr, &ctx->dst_addr))
487  {
488  bibe = nat64_db_bib_entry_by_index (db, ste->proto, ste->bibe_index);
489  if (!bibe)
490  return -1;
491 
492  if (ip6_address_is_equal (&bibe->in_addr, &ctx->src_addr)
493  && bibe->fib_index == ctx->fib_index)
494  {
495  clib_memset (&saddr, 0, sizeof (saddr));
496  saddr.ip4.as_u32 = bibe->out_addr.as_u32;
497  clib_memset (&daddr, 0, sizeof (daddr));
498  nat64_extract_ip4 (&ctx->dst_addr, &daddr.ip4, ctx->fib_index);
499 
501  (db, &daddr, &saddr, 0, 0, ctx->proto, ctx->fib_index, 0))
502  return -1;
503 
504  ctx->out_addr.as_u32 = bibe->out_addr.as_u32;
505  return 1;
506  }
507  }
508 
509  return 0;
510 }
511 
512 static int
514  u16 l4_offset, u16 frag_hdr_offset,
515  nat64_in2out_set_ctx_t * s_ctx)
516 {
517  ip6_header_t *ip6;
518  ip4_header_t *ip4;
519  u16 fragment_id;
520  u16 frag_offset;
521  u8 frag_more;
522 
523  ip6 = vlib_buffer_get_current (p);
524 
525  ip4 = (ip4_header_t *) u8_ptr_add (ip6, l4_offset - sizeof (*ip4));
526 
527  vlib_buffer_advance (p, l4_offset - sizeof (*ip4));
528 
529  if (PREDICT_FALSE (frag_hdr_offset))
530  {
531  //Only the first fragment
532  ip6_frag_hdr_t *hdr =
533  (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_hdr_offset);
534  fragment_id = frag_id_6to4 (hdr->identification);
535  frag_offset = ip6_frag_hdr_offset (hdr);
536  frag_more = ip6_frag_hdr_more (hdr);
537  }
538  else
539  {
540  fragment_id = 0;
541  frag_offset = 0;
542  frag_more = 0;
543  }
544 
545  nat64_main_t *nm = &nat64_main;
546  nat64_db_bib_entry_t *bibe;
547  nat64_db_st_entry_t *ste;
548  ip46_address_t saddr, daddr, addr;
549  u32 sw_if_index, fib_index;
550  int i;
551  nat64_db_t *db = &nm->db[s_ctx->thread_index];
552 
553  sw_if_index = vnet_buffer (s_ctx->b)->sw_if_index[VLIB_RX];
554  fib_index =
556 
557  saddr.as_u64[0] = ip6->src_address.as_u64[0];
558  saddr.as_u64[1] = ip6->src_address.as_u64[1];
559  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
560  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
561 
562  ste =
563  nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, l4_protocol, fib_index,
564  1);
565 
566  if (ste)
567  {
568  bibe = nat64_db_bib_entry_by_index (db, l4_protocol, ste->bibe_index);
569  if (!bibe)
570  return -1;
571  }
572  else
573  {
574  bibe =
575  nat64_db_bib_entry_find (db, &saddr, 0, l4_protocol, fib_index, 1);
576 
577  if (!bibe)
578  {
579  /* Choose same out address as for TCP/UDP session to same dst */
581  .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
582  .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
583  .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
584  .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
585  .out_addr.as_u32 = 0,
586  .fib_index = fib_index,
587  .proto = l4_protocol,
588  .thread_index = s_ctx->thread_index,
589  };
590 
591  nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
592 
593  if (!ctx.out_addr.as_u32)
594  nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
595 
596  /* Verify if out address is not already in use for protocol */
597  clib_memset (&addr, 0, sizeof (addr));
598  addr.ip4.as_u32 = ctx.out_addr.as_u32;
599  if (nat64_db_bib_entry_find (db, &addr, 0, l4_protocol, 0, 0))
600  ctx.out_addr.as_u32 = 0;
601 
602  if (!ctx.out_addr.as_u32)
603  {
604  for (i = 0; i < vec_len (nm->addr_pool); i++)
605  {
606  addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
608  (db, &addr, 0, l4_protocol, 0, 0))
609  break;
610  }
611  }
612 
613  if (!ctx.out_addr.as_u32)
614  return -1;
615 
616  bibe =
618  &ip6->src_address, &ctx.out_addr,
619  0, 0, fib_index, l4_protocol, 0);
620  if (!bibe)
621  return -1;
622 
624  db->bib.bib_entries_num);
625  }
626 
627  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
628  ste =
629  nat64_db_st_entry_create (s_ctx->thread_index, db, bibe,
630  &ip6->dst_address, &daddr.ip4, 0);
631  if (!ste)
632  return -1;
633 
635  db->st.st_entries_num);
636  }
637 
638  nat64_session_reset_timeout (ste, s_ctx->vm);
639 
640  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
641  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
642 
646  ip4->length = u16_net_add (ip6->payload_length,
647  sizeof (*ip4) + sizeof (*ip6) - l4_offset);
648  ip4->fragment_id = fragment_id;
650  clib_host_to_net_u16 (frag_offset |
651  (frag_more ? IP4_HEADER_FLAG_MORE_FRAGMENTS : 0));
652  ip4->ttl = ip6->hop_limit;
653  ip4->protocol = l4_protocol;
654  ip4->checksum = ip4_header_checksum (ip4);
655 
656  return 0;
657 }
658 
659 static int
661  ip6_header_t * ip6, u32 l4_offset,
663 {
664  nat64_main_t *nm = &nat64_main;
665  nat64_db_bib_entry_t *bibe;
666  nat64_db_st_entry_t *ste;
667  ip46_address_t saddr, daddr;
668  u32 sw_if_index, fib_index;
669  udp_header_t *udp = (udp_header_t *) u8_ptr_add (ip6, l4_offset);
670  tcp_header_t *tcp = (tcp_header_t *) u8_ptr_add (ip6, l4_offset);
671  u8 proto = vnet_buffer (b)->ip.reass.ip_proto;
672  u16 sport = vnet_buffer (b)->ip.reass.l4_src_port;
673  u16 dport = vnet_buffer (b)->ip.reass.l4_dst_port;
674  u16 *checksum = NULL;
675  ip_csum_t csum = 0;
676  nat64_db_t *db = &nm->db[thread_index];
677 
678  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
679  fib_index =
681 
682  saddr.as_u64[0] = ip6->src_address.as_u64[0];
683  saddr.as_u64[1] = ip6->src_address.as_u64[1];
684  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
685  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
686 
687  if (!vnet_buffer (b)->ip.reass.is_non_first_fragment)
688  {
689  if (proto == IP_PROTOCOL_UDP)
690  checksum = &udp->checksum;
691  else
692  checksum = &tcp->checksum;
693  csum = ip_csum_sub_even (*checksum, ip6->src_address.as_u64[0]);
694  csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
695  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
696  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
697  }
698 
699  ste =
700  nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
701  fib_index, 1);
702 
703  if (ste)
704  {
705  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
706  if (!bibe)
707  return -1;
708  }
709  else
710  {
711  bibe = nat64_db_bib_entry_find (db, &saddr, sport, proto, fib_index, 1);
712 
713  if (!bibe)
714  {
715  u16 out_port;
716  ip4_address_t out_addr;
718  (fib_index, ip_proto_to_nat_proto (proto), &out_addr,
719  &out_port, thread_index))
720  return -1;
721 
722  bibe =
723  nat64_db_bib_entry_create (thread_index, db, &ip6->src_address,
724  &out_addr, sport, out_port, fib_index,
725  proto, 0);
726  if (!bibe)
727  return -1;
728 
729  vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
730  db->bib.bib_entries_num);
731  }
732 
733  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
734  ste =
735  nat64_db_st_entry_create (thread_index, db, bibe, &ip6->dst_address,
736  &daddr.ip4, dport);
737  if (!ste)
738  return -1;
739 
740  vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
741  db->st.st_entries_num);
742  }
743 
744  if (proto == IP_PROTOCOL_TCP)
745  nat64_tcp_session_set_state (ste, tcp, 1);
746 
747  nat64_session_reset_timeout (ste, vm);
748 
749  if (!vnet_buffer (b)->ip.reass.is_non_first_fragment)
750  {
751  udp->src_port = bibe->out_port;
752  }
753 
754  nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
755 
756  clib_memset (&daddr, 0, sizeof (daddr));
757  daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
758 
759  bibe = 0;
760  /* *INDENT-OFF* */
761  vec_foreach (db, nm->db)
762  {
763  bibe = nat64_db_bib_entry_find (db, &daddr, dport, proto, 0, 0);
764 
765  if (bibe)
766  break;
767  }
768  /* *INDENT-ON* */
769 
770  if (!bibe)
771  return -1;
772 
773  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
774  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
775 
776  if (!vnet_buffer (b)->ip.reass.is_non_first_fragment)
777  {
778  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
779  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
780  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
781  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
782  csum = ip_csum_sub_even (csum, sport);
783  csum = ip_csum_sub_even (csum, dport);
784  udp->dst_port = bibe->in_port;
785  csum = ip_csum_add_even (csum, udp->src_port);
786  csum = ip_csum_add_even (csum, udp->dst_port);
787  *checksum = ip_csum_fold (csum);
788  }
789 
790  return 0;
791 }
792 
793 static int
796 {
797  nat64_main_t *nm = &nat64_main;
798  nat64_db_bib_entry_t *bibe;
799  nat64_db_st_entry_t *ste;
800  icmp46_header_t *icmp = ip6_next_header (ip6);
801  ip6_header_t *inner_ip6;
802  ip46_address_t saddr, daddr;
803  u32 sw_if_index, fib_index;
804  u8 proto;
805  udp_header_t *udp;
806  tcp_header_t *tcp;
807  u16 *checksum, sport, dport;
808  ip_csum_t csum;
809  nat64_db_t *db = &nm->db[thread_index];
810 
811  if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
812  return -1;
813 
814  inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
815 
816  proto = inner_ip6->protocol;
817 
818  if (proto == IP_PROTOCOL_ICMP6)
819  return -1;
820 
821  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
822  fib_index =
824 
825  saddr.as_u64[0] = inner_ip6->src_address.as_u64[0];
826  saddr.as_u64[1] = inner_ip6->src_address.as_u64[1];
827  daddr.as_u64[0] = inner_ip6->dst_address.as_u64[0];
828  daddr.as_u64[1] = inner_ip6->dst_address.as_u64[1];
829 
830  udp = ip6_next_header (inner_ip6);
831  tcp = ip6_next_header (inner_ip6);
832 
833  sport = udp->src_port;
834  dport = udp->dst_port;
835 
836  if (proto == IP_PROTOCOL_UDP)
837  checksum = &udp->checksum;
838  else
839  checksum = &tcp->checksum;
840 
841  csum = ip_csum_sub_even (*checksum, inner_ip6->src_address.as_u64[0]);
842  csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]);
843  csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]);
844  csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]);
845  csum = ip_csum_sub_even (csum, sport);
846  csum = ip_csum_sub_even (csum, dport);
847 
848  ste =
849  nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
850  fib_index, 1);
851  if (!ste)
852  return -1;
853 
854  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
855  if (!bibe)
856  return -1;
857 
858  dport = udp->dst_port = bibe->out_port;
859  nat64_compose_ip6 (&inner_ip6->dst_address, &bibe->out_addr, fib_index);
860 
861  clib_memset (&saddr, 0, sizeof (saddr));
862  clib_memset (&daddr, 0, sizeof (daddr));
863  saddr.ip4.as_u32 = ste->out_r_addr.as_u32;
864  daddr.ip4.as_u32 = bibe->out_addr.as_u32;
865 
866  ste = 0;
867  /* *INDENT-OFF* */
868  vec_foreach (db, nm->db)
869  {
870  ste = nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
871  0, 0);
872 
873  if (ste)
874  break;
875  }
876  /* *INDENT-ON* */
877 
878  if (!ste)
879  return -1;
880 
881  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
882  if (!bibe)
883  return -1;
884 
885  inner_ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
886  inner_ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
887  udp->src_port = bibe->in_port;
888 
889  csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]);
890  csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]);
891  csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]);
892  csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]);
893  csum = ip_csum_add_even (csum, udp->src_port);
894  csum = ip_csum_add_even (csum, udp->dst_port);
895  *checksum = ip_csum_fold (csum);
896 
897  if (!vec_len (nm->addr_pool))
898  return -1;
899 
900  nat64_compose_ip6 (&ip6->src_address, &nm->addr_pool[0].addr, fib_index);
901  ip6->dst_address.as_u64[0] = inner_ip6->src_address.as_u64[0];
902  ip6->dst_address.as_u64[1] = inner_ip6->src_address.as_u64[1];
903 
904  icmp->checksum = 0;
905  csum = ip_csum_with_carry (0, ip6->payload_length);
906  csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol));
907  csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]);
908  csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]);
909  csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]);
910  csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]);
911  csum =
912  ip_incremental_checksum (csum, icmp,
913  clib_net_to_host_u16 (ip6->payload_length));
914  icmp->checksum = ~ip_csum_fold (csum);
915 
916  return 0;
917 }
918 
919 static int
922 {
923  nat64_main_t *nm = &nat64_main;
924  nat64_db_bib_entry_t *bibe;
925  nat64_db_st_entry_t *ste;
926  ip46_address_t saddr, daddr, addr;
927  u32 sw_if_index, fib_index;
928  u8 proto = ip6->protocol;
929  int i;
930  nat64_db_t *db = &nm->db[thread_index];
931 
932  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
933  fib_index =
935 
936  saddr.as_u64[0] = ip6->src_address.as_u64[0];
937  saddr.as_u64[1] = ip6->src_address.as_u64[1];
938  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
939  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
940 
941  ste =
942  nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, proto, fib_index, 1);
943 
944  if (ste)
945  {
946  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
947  if (!bibe)
948  return -1;
949  }
950  else
951  {
952  bibe = nat64_db_bib_entry_find (db, &saddr, 0, proto, fib_index, 1);
953 
954  if (!bibe)
955  {
956  /* Choose same out address as for TCP/UDP session to same dst */
958  .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
959  .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
960  .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
961  .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
962  .out_addr.as_u32 = 0,
963  .fib_index = fib_index,
964  .proto = proto,
965  .thread_index = thread_index,
966  };
967 
968  nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
969 
970  if (!ctx.out_addr.as_u32)
971  nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
972 
973  /* Verify if out address is not already in use for protocol */
974  clib_memset (&addr, 0, sizeof (addr));
975  addr.ip4.as_u32 = ctx.out_addr.as_u32;
976  if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
977  ctx.out_addr.as_u32 = 0;
978 
979  if (!ctx.out_addr.as_u32)
980  {
981  for (i = 0; i < vec_len (nm->addr_pool); i++)
982  {
983  addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
984  if (!nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
985  break;
986  }
987  }
988 
989  if (!ctx.out_addr.as_u32)
990  return -1;
991 
992  bibe =
993  nat64_db_bib_entry_create (thread_index, db, &ip6->src_address,
994  &ctx.out_addr, 0, 0, fib_index, proto,
995  0);
996  if (!bibe)
997  return -1;
998 
999  vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
1000  db->bib.bib_entries_num);
1001  }
1002 
1003  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
1004  ste =
1005  nat64_db_st_entry_create (thread_index, db, bibe, &ip6->dst_address,
1006  &daddr.ip4, 0);
1007  if (!ste)
1008  return -1;
1009 
1010  vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
1011  db->st.st_entries_num);
1012  }
1013 
1014  nat64_session_reset_timeout (ste, vm);
1015 
1016  nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
1017 
1018  clib_memset (&daddr, 0, sizeof (daddr));
1019  daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
1020 
1021  bibe = 0;
1022  /* *INDENT-OFF* */
1023  vec_foreach (db, nm->db)
1024  {
1025  bibe = nat64_db_bib_entry_find (db, &daddr, 0, proto, 0, 0);
1026 
1027  if (bibe)
1028  break;
1029  }
1030  /* *INDENT-ON* */
1031 
1032  if (!bibe)
1033  return -1;
1034 
1035  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
1036  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
1037 
1038  return 0;
1039 }
1040 
1041 static inline uword
1043  vlib_frame_t * frame, u8 is_slow_path)
1044 {
1045  u32 n_left_from, *from, *to_next;
1046  nat64_in2out_next_t next_index;
1047  u32 pkts_processed = 0;
1048  u32 stats_node_index;
1050  nat64_main_t *nm = &nat64_main;
1051 
1052  u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets =
1053  0, fragments = 0;
1054 
1055  stats_node_index =
1056  is_slow_path ? nm->in2out_slowpath_node_index : nm->in2out_node_index;
1057 
1058  from = vlib_frame_vector_args (frame);
1059  n_left_from = frame->n_vectors;
1060  next_index = node->cached_next_index;
1061 
1062  while (n_left_from > 0)
1063  {
1064  u32 n_left_to_next;
1065 
1066  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1067 
1068  while (n_left_from > 0 && n_left_to_next > 0)
1069  {
1070  u32 bi0;
1071  vlib_buffer_t *b0;
1072  u32 next0;
1073  ip6_header_t *ip60;
1074  u16 l4_offset0, frag_hdr_offset0;
1075  u8 l4_protocol0;
1076  u32 proto0;
1078  u32 sw_if_index0;
1079 
1080  /* speculatively enqueue b0 to the current next frame */
1081  bi0 = from[0];
1082  to_next[0] = bi0;
1083  from += 1;
1084  to_next += 1;
1085  n_left_from -= 1;
1086  n_left_to_next -= 1;
1087 
1088  b0 = vlib_get_buffer (vm, bi0);
1089  ip60 = vlib_buffer_get_current (b0);
1090 
1091  ctx0.b = b0;
1092  ctx0.vm = vm;
1093  ctx0.thread_index = thread_index;
1094 
1096 
1097  if (PREDICT_FALSE
1098  (ip6_parse
1099  (vm, b0, ip60, b0->current_length, &l4_protocol0, &l4_offset0,
1100  &frag_hdr_offset0)))
1101  {
1102  next0 = NAT64_IN2OUT_NEXT_DROP;
1103  b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1104  goto trace0;
1105  }
1106 
1107  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1108 
1109  if (nat64_not_translate (sw_if_index0, ip60->dst_address))
1110  {
1112  goto trace0;
1113  }
1114 
1115  proto0 = ip_proto_to_nat_proto (l4_protocol0);
1116 
1117  if (is_slow_path)
1118  {
1119  if (PREDICT_TRUE (proto0 == NAT_PROTOCOL_OTHER))
1120  {
1121  other_packets++;
1122  if (is_hairpinning (&ip60->dst_address))
1123  {
1126  (vm, b0, ip60, thread_index))
1127  {
1128  next0 = NAT64_IN2OUT_NEXT_DROP;
1129  b0->error =
1130  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1131  }
1132  goto trace0;
1133  }
1134 
1136  (vm, b0, l4_protocol0, l4_offset0, frag_hdr_offset0,
1137  &ctx0))
1138  {
1139  next0 = NAT64_IN2OUT_NEXT_DROP;
1140  b0->error =
1141  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1142  goto trace0;
1143  }
1144  }
1145  goto trace0;
1146  }
1147  else
1148  {
1149  if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
1150  {
1152  goto trace0;
1153  }
1154  }
1155 
1156  if (proto0 == NAT_PROTOCOL_ICMP)
1157  {
1158  icmp_packets++;
1159  if (is_hairpinning (&ip60->dst_address))
1160  {
1163  (vm, b0, ip60, thread_index))
1164  {
1165  next0 = NAT64_IN2OUT_NEXT_DROP;
1166  b0->error =
1167  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1168  }
1169  goto trace0;
1170  }
1171 
1172  if (icmp6_to_icmp
1173  (vm, b0, nat64_in2out_icmp_set_cb, &ctx0,
1175  {
1176  next0 = NAT64_IN2OUT_NEXT_DROP;
1177  b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1178  goto trace0;
1179  }
1180  }
1181  else if (proto0 == NAT_PROTOCOL_TCP || proto0 == NAT_PROTOCOL_UDP)
1182  {
1183  if (proto0 == NAT_PROTOCOL_TCP)
1184  tcp_packets++;
1185  else
1186  udp_packets++;
1187 
1188  if (is_hairpinning (&ip60->dst_address))
1189  {
1192  (vm, b0, ip60, l4_offset0, thread_index))
1193  {
1194  next0 = NAT64_IN2OUT_NEXT_DROP;
1195  b0->error =
1196  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1197  }
1198  goto trace0;
1199  }
1200 
1202  (vm, b0, l4_offset0, frag_hdr_offset0, &ctx0))
1203  {
1204  next0 = NAT64_IN2OUT_NEXT_DROP;
1205  b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1206  goto trace0;
1207  }
1208  }
1209 
1210  trace0:
1212  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1213  {
1215  vlib_add_trace (vm, node, b0, sizeof (*t));
1216  t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1217  t->next_index = next0;
1218  t->is_slow_path = is_slow_path;
1219  }
1220 
1221  pkts_processed += next0 == NAT64_IN2OUT_NEXT_IP4_LOOKUP;
1222 
1223  /* verify speculative enqueue, maybe switch current next frame */
1224  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1225  n_left_to_next, bi0, next0);
1226  }
1227  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1228  }
1229  vlib_node_increment_counter (vm, stats_node_index,
1230  NAT64_IN2OUT_ERROR_IN2OUT_PACKETS,
1231  pkts_processed);
1232  vlib_node_increment_counter (vm, stats_node_index,
1233  NAT64_IN2OUT_ERROR_TCP_PACKETS, tcp_packets);
1234  vlib_node_increment_counter (vm, stats_node_index,
1235  NAT64_IN2OUT_ERROR_UDP_PACKETS, udp_packets);
1236  vlib_node_increment_counter (vm, stats_node_index,
1237  NAT64_IN2OUT_ERROR_ICMP_PACKETS, icmp_packets);
1238  vlib_node_increment_counter (vm, stats_node_index,
1239  NAT64_IN2OUT_ERROR_OTHER_PACKETS,
1240  other_packets);
1241  vlib_node_increment_counter (vm, stats_node_index,
1242  NAT64_IN2OUT_ERROR_FRAGMENTS, fragments);
1243 
1244  return frame->n_vectors;
1245 }
1246 
1249  vlib_frame_t * frame)
1250 {
1251  return nat64_in2out_node_fn_inline (vm, node, frame, 0);
1252 }
1253 
1254 /* *INDENT-OFF* */
1256  .name = "nat64-in2out",
1257  .vector_size = sizeof (u32),
1258  .format_trace = format_nat64_in2out_trace,
1260  .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1261  .error_strings = nat64_in2out_error_strings,
1262  .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1263  /* edit / add dispositions here */
1264  .next_nodes = {
1265  [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1266  [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1267  [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1268  [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1269  },
1270 };
1271 /* *INDENT-ON* */
1272 
1275  vlib_frame_t * frame)
1276 {
1277  return nat64_in2out_node_fn_inline (vm, node, frame, 1);
1278 }
1279 
1280 /* *INDENT-OFF* */
1282  .name = "nat64-in2out-slowpath",
1283  .vector_size = sizeof (u32),
1284  .format_trace = format_nat64_in2out_trace,
1286  .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1287  .error_strings = nat64_in2out_error_strings,
1288  .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1289  /* edit / add dispositions here */
1290  .next_nodes = {
1291  [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1292  [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1293  [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1294  [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1295  },
1296 };
1297 /* *INDENT-ON* */
1298 
1300 {
1308 
1309 
1310 #define foreach_nat64_in2out_handoff_error \
1311 _(CONGESTION_DROP, "congestion drop") \
1312 _(SAME_WORKER, "same worker") \
1313 _(DO_HANDOFF, "do handoff")
1314 
1315 typedef enum
1316 {
1317 #define _(sym,str) NAT64_IN2OUT_HANDOFF_ERROR_##sym,
1319 #undef _
1322 
1324 #define _(sym,string) string,
1326 #undef _
1327 };
1328 
1329 typedef struct
1330 {
1333 
1334 static u8 *
1336 {
1337  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1338  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1340  va_arg (*args, nat64_in2out_handoff_trace_t *);
1341 
1342  s =
1343  format (s, "NAT64-IN2OUT-HANDOFF: next-worker %d", t->next_worker_index);
1344 
1345  return s;
1346 }
1347 
1350  vlib_frame_t * frame)
1351 {
1352  nat64_main_t *nm = &nat64_main;
1353  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1354  u32 n_enq, n_left_from, *from;
1355  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1356  u32 fq_index;
1358  u32 do_handoff = 0, same_worker = 0;
1359 
1360  from = vlib_frame_vector_args (frame);
1361  n_left_from = frame->n_vectors;
1362  vlib_get_buffers (vm, from, bufs, n_left_from);
1363 
1364  b = bufs;
1365  ti = thread_indices;
1366 
1367  fq_index = nm->fq_in2out_index;
1368 
1369  while (n_left_from > 0)
1370  {
1371  ip6_header_t *ip0;
1372 
1373  ip0 = vlib_buffer_get_current (b[0]);
1374  ti[0] = nat64_get_worker_in2out (&ip0->src_address);
1375 
1376  if (ti[0] != thread_index)
1377  do_handoff++;
1378  else
1379  same_worker++;
1380 
1381  if (PREDICT_FALSE
1382  ((node->flags & VLIB_NODE_FLAG_TRACE)
1383  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1384  {
1386  vlib_add_trace (vm, node, b[0], sizeof (*t));
1387  t->next_worker_index = ti[0];
1388  }
1389 
1390  n_left_from -= 1;
1391  ti += 1;
1392  b += 1;
1393  }
1394 
1395  n_enq =
1396  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1397  frame->n_vectors, 1);
1398 
1399  if (n_enq < frame->n_vectors)
1400  vlib_node_increment_counter (vm, node->node_index,
1401  NAT64_IN2OUT_HANDOFF_ERROR_CONGESTION_DROP,
1402  frame->n_vectors - n_enq);
1403  vlib_node_increment_counter (vm, node->node_index,
1404  NAT64_IN2OUT_HANDOFF_ERROR_SAME_WORKER,
1405  same_worker);
1406  vlib_node_increment_counter (vm, node->node_index,
1407  NAT64_IN2OUT_HANDOFF_ERROR_DO_HANDOFF,
1408  do_handoff);
1409 
1410  return frame->n_vectors;
1411 }
1412 
1413 /* *INDENT-OFF* */
1415  .name = "nat64-in2out-handoff",
1416  .vector_size = sizeof (u32),
1417  .format_trace = format_nat64_in2out_handoff_trace,
1420  .error_strings = nat64_in2out_handoff_error_strings,
1421 
1422  .n_next_nodes = 1,
1423 
1424  .next_nodes = {
1425  [0] = "error-drop",
1426  },
1427 };
1428 /* *INDENT-ON* */
1429 
1430 /*
1431  * fd.io coding-style-patch-verification: ON
1432  *
1433  * Local Variables:
1434  * eval: (c-set-style "gnu")
1435  * End:
1436  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
nat64_db_t * db
BIB and session DB per thread.
Definition: nat64.h:83
static int unk_proto_st_walk(nat64_db_st_entry_t *ste, void *arg)
Definition: nat64_in2out.c:478
int nat64_alloc_out_addr_and_port(u32 fib_index, nat_protocol_t proto, ip4_address_t *addr, u16 *port, u32 thread_index)
Alloce IPv4 address and port pair from NAT64 pool.
Definition: nat64.c:527
#define CLIB_UNUSED(x)
Definition: clib.h:86
ip4_address_t src_address
Definition: ip4_packet.h:170
snat_address_t * addr_pool
Address pool vector.
Definition: nat64.h:74
static int nat64_in2out_tcp_udp(vlib_main_t *vm, vlib_buffer_t *p, u16 l4_offset, u16 frag_hdr_offset, nat64_in2out_set_ctx_t *ctx)
Definition: nat64_in2out.c:140
void nat64_extract_ip4(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Extract IPv4 address from the IPv4-embedded IPv6 addresses.
Definition: nat64.c:1107
#define PREDICT_TRUE(x)
Definition: clib.h:119
vlib_node_registration_t nat64_in2out_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_node)
static void * ip_interface_address_get_address(ip_lookup_main_t *lm, ip_interface_address_t *a)
Definition: ip_interface.h:43
static int nat64_in2out_tcp_udp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 l4_offset, u32 thread_index)
Definition: nat64_in2out.c:660
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
nat64_in2out_next_t
Definition: nat64_in2out.c:78
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
Definition: fib_table.c:989
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
nat64_db_bib_entry_t * nat64_db_bib_entry_find(nat64_db_t *db, ip46_address_t *addr, u16 port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 BIB entry.
Definition: nat64_db.c:209
uword ip_csum_t
Definition: ip_packet.h:244
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
Definition: ip_packet.h:247
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u16 flags_and_fragment_offset
Definition: ip4_packet.h:151
#define VLIB_NODE_FN(node)
Definition: node.h:202
u32 in2out_node_index
Definition: nat64.h:116
static u8 * format_nat64_in2out_trace(u8 *s, va_list *args)
Definition: nat64_in2out.c:33
nat64_db_bib_t bib
Definition: nat64_db.h:138
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:472
vlib_node_registration_t nat64_in2out_handoff_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_handoff_node)
struct _tcp_header tcp_header_t
vhost_vring_addr_t addr
Definition: vhost_user.h:254
ip6_address_t src_address
Definition: ip6_packet.h:310
unsigned char u8
Definition: types.h:56
u32 st_entries_num
Definition: nat64_db.h:123
#define u8_ptr_add(ptr, index)
Definition: ip_types.h:43
u32 nat64_get_worker_in2out(ip6_address_t *addr)
Get worker thread index for NAT64 in2out.
Definition: nat64.c:105
#define static_always_inline
Definition: clib.h:106
static nat_protocol_t ip_proto_to_nat_proto(u8 ip_proto)
Common NAT inline functions.
Definition: inlines.h:22
vl_api_interface_index_t sw_if_index
Definition: gre.api:53
nat64_db_st_entry_t * nat64_db_st_entry_create(u32 thread_index, nat64_db_t *db, nat64_db_bib_entry_t *bibe, ip6_address_t *in_r_addr, ip4_address_t *out_r_addr, u16 r_port)
Create new NAT64 session table entry.
Definition: nat64_db.c:376
vl_api_ip6_address_t ip6
Definition: one.api:424
ip4_address_t dst_address
Definition: ip4_packet.h:170
static int nat64_in2out_unk_proto(vlib_main_t *vm, vlib_buffer_t *p, u8 l4_protocol, u16 l4_offset, u16 frag_hdr_offset, nat64_in2out_set_ctx_t *s_ctx)
Definition: nat64_in2out.c:513
snat_main_t * sm
Definition: nat64.h:122
static_always_inline u8 ip6_translate_tos(u32 ip_version_traffic_class_and_flow_label)
Translate TOS value from IPv6 to IPv4.
Definition: ip6_to_ip4.h:332
unsigned int u32
Definition: types.h:88
struct unk_proto_st_walk_ctx_t_ unk_proto_st_walk_ctx_t
void nat64_tcp_session_set_state(nat64_db_st_entry_t *ste, tcp_header_t *tcp, u8 is_ip6)
Set NAT64 TCP session state.
Definition: nat64.c:907
#define VLIB_FRAME_SIZE
Definition: node.h:380
#define frag_id_6to4(id)
Definition: ip6_to_ip4.h:49
u32 in2out_slowpath_node_index
Definition: nat64.h:117
vl_api_fib_path_type_t type
Definition: fib_types.api:123
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:670
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
static void mss_clamping(snat_main_t *sm, tcp_header_t *tcp, ip_csum_t *sum)
Definition: nat_inlines.h:635
void nat64_session_reset_timeout(nat64_db_st_entry_t *ste, vlib_main_t *vm)
Reset NAT64 session timeout.
Definition: nat64.c:868
vl_api_ip_proto_t proto
Definition: acl_types.api:50
long ctx[MAX_CONNS]
Definition: main.c:144
unsigned short u16
Definition: types.h:57
vlib_buffer_t * b
Definition: nat64_in2out.c:89
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
Definition: ip46_cli.c:60
nat64_db_st_t st
Definition: nat64_db.h:139
#define PREDICT_FALSE(x)
Definition: clib.h:118
vl_api_ip4_address_t ip4
Definition: one.api:376
static int nat64_in2out_inner_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:385
ip6_main_t ip6_main
Definition: ip6_forward.c:2784
static void vlib_set_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 value)
Set a simple counter.
Definition: counter.h:94
static char * nat64_in2out_error_strings[]
Definition: nat64_in2out.c:72
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
void nat64_compose_ip6(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Compose IPv4-embedded IPv6 addresses.
Definition: nat64.c:1037
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
vlib_simple_counter_main_t total_sessions
Definition: nat64.h:111
static u8 * format_nat64_in2out_handoff_trace(u8 *s, va_list *args)
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:152
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static int nat64_in2out_unk_proto_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
Definition: nat64_in2out.c:920
static int nat64_in2out_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:292
u16 n_vectors
Definition: node.h:399
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
vlib_node_registration_t nat64_in2out_slowpath_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_slowpath_node)
#define foreach_nat64_in2out_handoff_error
#define ARRAY_LEN(x)
Definition: clib.h:66
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
nat64_main_t nat64_main
Definition: nat64.c:29
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1599
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:371
nat64_db_bib_entry_t * nat64_db_bib_entry_create(u32 thread_index, nat64_db_t *db, ip6_address_t *in_addr, ip4_address_t *out_addr, u16 in_port, u16 out_port, u32 fib_index, u8 proto, u8 is_static)
Create new NAT64 BIB entry.
Definition: nat64_db.c:53
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:664
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:517
nat64_db_st_entry_t * nat64_db_st_entry_find(nat64_db_t *db, ip46_address_t *l_addr, ip46_address_t *r_addr, u16 l_port, u16 r_port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 session table entry.
Definition: nat64_db.c:557
nat64_in2out_handoff_error_t
nat64_in2out_error_t
Definition: nat64_in2out.c:64
void nat64_db_st_walk(nat64_db_t *db, u8 proto, nat64_db_st_walk_fn_t fn, void *ctx)
Walk NAT64 session table.
Definition: nat64_db.c:325
static int nat64_in2out_icmp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
Definition: nat64_in2out.c:794
ip_lookup_main_t lookup_main
Definition: ip6.h:181
u32 fq_in2out_index
Worker handoff.
Definition: nat64.h:86
ip_dscp_t tos
Definition: ip4_packet.h:141
static u8 nat64_not_translate(u32 sw_if_index, ip6_address_t ip6_addr)
Definition: nat64_in2out.c:95
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:272
nat64_db_bib_entry_t * nat64_db_bib_entry_by_index(nat64_db_t *db, u8 proto, u32 bibe_index)
Get BIB entry by index and protocol.
Definition: nat64_db.c:302
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
u32 bib_entries_num
Definition: nat64_db.h:73
IPv6 to IPv4 translation.
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
Definition: ip6_packet.h:167
ip4_address_t addr
Definition: nat.h:314
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
NAT64 global declarations.
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:297
u16 payload_length
Definition: ip6_packet.h:301
vl_api_address_t ip
Definition: l2.api:501
static char * nat64_in2out_handoff_error_strings[]
static int icmp6_to_icmp(vlib_main_t *vm, vlib_buffer_t *p, ip6_to_ip4_icmp_set_fn_t fn, void *ctx, ip6_to_ip4_icmp_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP6 packet to ICMP4.
Definition: ip6_to_ip4.h:350
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define foreach_ip_interface_address(lm, a, sw_if_index, loop, body)
Definition: ip_interface.h:57
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:494
static uword nat64_in2out_node_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_slow_path)
#define vnet_buffer(b)
Definition: buffer.h:417
static_always_inline int is_hairpinning(ip6_address_t *dst_addr)
Check whether is a hairpinning.
Definition: nat64_in2out.c:125
#define vec_foreach(var, vec)
Vector iterator.
#define IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS
Definition: ip4_packet.h:194
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
u16 flags
Copy of main node flags.
Definition: node.h:511
#define u16_net_add(u, val)
Definition: ip_types.h:44
static_always_inline int ip6_parse(vlib_main_t *vm, vlib_buffer_t *b, const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Parse some useful information from IPv6 header.
Definition: ip6_to_ip4.h:65
u8 ip_version_and_header_length
Definition: ip4_packet.h:138
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:304
#define foreach_nat64_in2out_error
Definition: nat64_in2out.c:49
struct nat64_in2out_set_ctx_t_ nat64_in2out_set_ctx_t
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_packet.h:318
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:300
struct nat64_in2out_frag_set_ctx_t_ nat64_in2out_frag_set_ctx_t
Definition: defs.h:46
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:255
ip6_address_t dst_address
Definition: ip6_packet.h:310
vlib_simple_counter_main_t total_bibs
Definition: nat64.h:110