FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
nat64_out2in.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT64 IPv4 to IPv6 translation (otside to inside network)
18  */
19 
20 #include <nat/nat64.h>
21 #include <nat/nat_inlines.h>
22 #include <vnet/ip/ip4_to_ip6.h>
23 #include <vnet/fib/ip4_fib.h>
24 #include <vnet/udp/udp.h>
25 
26 typedef struct
27 {
31 
32 static u8 *
33 format_nat64_out2in_trace (u8 * s, va_list * args)
34 {
35  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
36  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
37  nat64_out2in_trace_t *t = va_arg (*args, nat64_out2in_trace_t *);
38 
39  s =
40  format (s, "NAT64-out2in: sw_if_index %d, next index %d", t->sw_if_index,
41  t->next_index);
42 
43  return s;
44 }
45 
46 #define foreach_nat64_out2in_error \
47 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \
48 _(NO_TRANSLATION, "no translation") \
49 _(UNKNOWN, "unknown")
50 
51 typedef enum
52 {
53 #define _(sym,str) NAT64_OUT2IN_ERROR_##sym,
55 #undef _
58 
59 static char *nat64_out2in_error_strings[] = {
60 #define _(sym,string) string,
62 #undef _
63 };
64 
65 typedef enum
66 {
72 
74 {
79 
80 static int
83 {
86  ip_csum_t csum;
87  u16 *checksum = NULL;
88  ip6_frag_hdr_t *frag;
89  u32 frag_id;
90  ip4_address_t old_src, old_dst;
91 
92  nat64_main_t *nm = &nat64_main;
93  nat64_db_bib_entry_t *bibe;
94  nat64_db_st_entry_t *ste;
95  ip46_address_t saddr;
96  ip46_address_t daddr;
97  ip6_address_t ip6_saddr;
98  u8 proto = vnet_buffer (b)->ip.reass.ip_proto;
99  u16 dport = vnet_buffer (b)->ip.reass.l4_dst_port;
100  u16 sport = vnet_buffer (b)->ip.reass.l4_src_port;
101  u32 sw_if_index, fib_index;
102  nat64_db_t *db = &nm->db[ctx->thread_index];
103 
104  ip4 = vlib_buffer_get_current (b);
105 
106  udp_header_t *udp = ip4_next_header (ip4);
107  tcp_header_t *tcp = ip4_next_header (ip4);
108  if (!vnet_buffer (b)->ip.reass.is_non_first_fragment)
109  {
110  if (ip4->protocol == IP_PROTOCOL_UDP)
111  {
112  checksum = &udp->checksum;
113  //UDP checksum is optional over IPv4 but mandatory for IPv6
114  //We do not check udp->length sanity but use our safe computed value instead
115  if (PREDICT_FALSE (!*checksum))
116  {
117  u16 udp_len =
118  clib_host_to_net_u16 (ip4->length) - sizeof (*ip4);
119  csum = ip_incremental_checksum (0, udp, udp_len);
120  csum =
121  ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
122  csum =
123  ip_csum_with_carry (csum,
124  clib_host_to_net_u16 (IP_PROTOCOL_UDP));
125  csum =
126  ip_csum_with_carry (csum, *((u64 *) (&ip4->src_address)));
127  *checksum = ~ip_csum_fold (csum);
128  }
129  }
130  else
131  {
132  checksum = &tcp->checksum;
133  }
134  }
135 
136  old_src.as_u32 = ip4->src_address.as_u32;
137  old_dst.as_u32 = ip4->dst_address.as_u32;
138 
139  // Deal with fragmented packets
140  u16 frag_offset = ip4_get_fragment_offset (ip4);
141  if (PREDICT_FALSE (ip4_get_fragment_more (ip4) || frag_offset))
142  {
143  ip6 =
144  (ip6_header_t *) u8_ptr_add (ip4,
145  sizeof (*ip4) - sizeof (*ip6) -
146  sizeof (*frag));
147  frag =
148  (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
149  frag_id = frag_id_4to6 (ip4->fragment_id);
150  vlib_buffer_advance (b, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
151  }
152  else
153  {
154  ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
155  vlib_buffer_advance (b, sizeof (*ip4) - sizeof (*ip6));
156  frag = NULL;
157  }
158 
160  clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
161  ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
162  ip6->hop_limit = ip4->ttl;
163  ip6->protocol = ip4->protocol;
164 
165  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
166  fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index);
167 
168  clib_memset (&saddr, 0, sizeof (saddr));
169  saddr.ip4.as_u32 = ip4->src_address.as_u32;
170  clib_memset (&daddr, 0, sizeof (daddr));
171  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
172 
173  ste =
174  nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
175  fib_index, 0);
176  if (ste)
177  {
178  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
179  if (!bibe)
180  return -1;
181  }
182  else
183  {
184  bibe = nat64_db_bib_entry_find (db, &daddr, dport, proto, fib_index, 0);
185 
186  if (!bibe)
187  return -1;
188 
189  nat64_compose_ip6 (&ip6_saddr, &old_src, bibe->fib_index);
190  ste =
191  nat64_db_st_entry_create (ctx->thread_index, db, bibe, &ip6_saddr,
192  &saddr.ip4, sport);
193 
194  if (!ste)
195  return -1;
196 
198  db->st.st_entries_num);
199  }
200 
201  ip6->src_address.as_u64[0] = ste->in_r_addr.as_u64[0];
202  ip6->src_address.as_u64[1] = ste->in_r_addr.as_u64[1];
203 
204  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
205  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
206 
207  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
208 
209  nat64_session_reset_timeout (ste, ctx->vm);
210 
211  if (PREDICT_FALSE (frag != NULL))
212  {
213  frag->next_hdr = ip6->protocol;
214  frag->identification = frag_id;
215  frag->rsv = 0;
216  frag->fragment_offset_and_more =
217  ip6_frag_hdr_offset_and_more (frag_offset, 1);
218  ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
219  ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
220  }
221 
222  if (!vnet_buffer (b)->ip.reass.is_non_first_fragment)
223  {
224  udp->dst_port = bibe->in_port;
225 
226  if (proto == IP_PROTOCOL_TCP)
227  {
228  nat64_tcp_session_set_state (ste, tcp, 0);
229  }
230 
231  csum = ip_csum_sub_even (*checksum, dport);
232  csum = ip_csum_add_even (csum, udp->dst_port);
233  csum = ip_csum_sub_even (csum, old_src.as_u32);
234  csum = ip_csum_sub_even (csum, old_dst.as_u32);
235  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
236  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
237  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
238  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
239  *checksum = ip_csum_fold (csum);
240  }
241 
242  return 0;
243 }
244 
245 static int
247  ip6_header_t * ip6, void *arg)
248 {
249  nat64_main_t *nm = &nat64_main;
251  nat64_db_bib_entry_t *bibe;
252  nat64_db_st_entry_t *ste;
253  ip46_address_t saddr, daddr;
254  ip6_address_t ip6_saddr;
255  u32 sw_if_index, fib_index;
256  icmp46_header_t *icmp = ip4_next_header (ip4);
257  nat64_db_t *db = &nm->db[ctx->thread_index];
258 
259  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
260  fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index);
261 
262  clib_memset (&saddr, 0, sizeof (saddr));
263  saddr.ip4.as_u32 = ip4->src_address.as_u32;
264  clib_memset (&daddr, 0, sizeof (daddr));
265  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
266 
267  if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
268  {
269  u16 out_id = ((u16 *) (icmp))[2];
270  ste =
271  nat64_db_st_entry_find (db, &daddr, &saddr, out_id, 0,
272  IP_PROTOCOL_ICMP, fib_index, 0);
273 
274  if (ste)
275  {
276  bibe =
277  nat64_db_bib_entry_by_index (db, IP_PROTOCOL_ICMP,
278  ste->bibe_index);
279  if (!bibe)
280  return -1;
281  }
282  else
283  {
284  bibe =
285  nat64_db_bib_entry_find (db, &daddr, out_id,
286  IP_PROTOCOL_ICMP, fib_index, 0);
287  if (!bibe)
288  return -1;
289 
290  nat64_compose_ip6 (&ip6_saddr, &ip4->src_address, bibe->fib_index);
291  ste =
293  bibe, &ip6_saddr, &saddr.ip4, 0);
294 
295  if (!ste)
296  return -1;
297 
299  db->st.st_entries_num);
300  }
301 
302  nat64_session_reset_timeout (ste, ctx->vm);
303 
304  ip6->src_address.as_u64[0] = ste->in_r_addr.as_u64[0];
305  ip6->src_address.as_u64[1] = ste->in_r_addr.as_u64[1];
306 
307  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
308  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
309  ((u16 *) (icmp))[2] = bibe->in_port;
310 
311  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
312  }
313  else
314  {
315  ip6_header_t *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
316 
318  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX]);
319  ip6->dst_address.as_u64[0] = inner_ip6->src_address.as_u64[0];
320  ip6->dst_address.as_u64[1] = inner_ip6->src_address.as_u64[1];
321  }
322 
323  return 0;
324 }
325 
326 static int
328  ip6_header_t * ip6, void *arg)
329 {
330  nat64_main_t *nm = &nat64_main;
332  nat64_db_bib_entry_t *bibe;
333  nat64_db_st_entry_t *ste;
334  ip46_address_t saddr, daddr;
335  u32 sw_if_index, fib_index;
336  u8 proto = ip4->protocol;
337  nat64_db_t *db = &nm->db[ctx->thread_index];
338 
339  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
340  fib_index =
342 
343  clib_memset (&saddr, 0, sizeof (saddr));
344  saddr.ip4.as_u32 = ip4->src_address.as_u32;
345  clib_memset (&daddr, 0, sizeof (daddr));
346  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
347 
348  if (proto == IP_PROTOCOL_ICMP6)
349  {
350  icmp46_header_t *icmp = ip4_next_header (ip4);
351  u16 out_id = ((u16 *) (icmp))[2];
352  proto = IP_PROTOCOL_ICMP;
353 
354  if (!
355  (icmp->type == ICMP6_echo_request
356  || icmp->type == ICMP6_echo_reply))
357  return -1;
358 
359  ste =
360  nat64_db_st_entry_find (db, &saddr, &daddr, out_id, 0, proto,
361  fib_index, 0);
362  if (!ste)
363  return -1;
364 
365  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
366  if (!bibe)
367  return -1;
368 
369  ip6->dst_address.as_u64[0] = ste->in_r_addr.as_u64[0];
370  ip6->dst_address.as_u64[1] = ste->in_r_addr.as_u64[1];
371  ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
372  ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
373  ((u16 *) (icmp))[2] = bibe->in_port;
374 
375  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
376  }
377  else
378  {
379  udp_header_t *udp = ip4_next_header (ip4);
380  tcp_header_t *tcp = ip4_next_header (ip4);
381  u16 dport = udp->dst_port;
382  u16 sport = udp->src_port;
383  u16 *checksum;
384  ip_csum_t csum;
385 
386  ste =
387  nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
388  fib_index, 0);
389  if (!ste)
390  return -1;
391 
392  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
393  if (!bibe)
394  return -1;
395 
396  nat64_compose_ip6 (&ip6->dst_address, &daddr.ip4, bibe->fib_index);
397  ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
398  ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
399  udp->src_port = bibe->in_port;
400 
401  if (proto == IP_PROTOCOL_UDP)
402  checksum = &udp->checksum;
403  else
404  checksum = &tcp->checksum;
405  if (*checksum)
406  {
407  csum = ip_csum_sub_even (*checksum, sport);
408  csum = ip_csum_add_even (csum, udp->src_port);
409  *checksum = ip_csum_fold (csum);
410  }
411 
412  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
413  }
414 
415  return 0;
416 }
417 
418 static int
421 {
423  ip6_header_t *ip6;
424  ip6_frag_hdr_t *frag;
425  u32 frag_id;
426 
427  nat64_main_t *nm = &nat64_main;
428  nat64_db_bib_entry_t *bibe;
429  nat64_db_st_entry_t *ste;
430  ip46_address_t saddr, daddr;
431  ip6_address_t ip6_saddr;
432  u32 sw_if_index, fib_index;
433  u8 proto = ip4->protocol;
434  nat64_db_t *db = &nm->db[ctx->thread_index];
435 
436  // Deal with fragmented packets
437  u16 frag_offset = ip4_get_fragment_offset (ip4);
438  if (PREDICT_FALSE (ip4_get_fragment_more (ip4) || frag_offset))
439  {
440  ip6 =
441  (ip6_header_t *) u8_ptr_add (ip4,
442  sizeof (*ip4) - sizeof (*ip6) -
443  sizeof (*frag));
444  frag =
445  (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
446  frag_id = frag_id_4to6 (ip4->fragment_id);
447  vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
448  }
449  else
450  {
451  ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
452  vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
453  frag = NULL;
454  }
455 
457  clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
458  ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
459  ip6->hop_limit = ip4->ttl;
460  ip6->protocol = ip4->protocol;
461 
462  if (PREDICT_FALSE (frag != NULL))
463  {
464  frag->next_hdr = ip6->protocol;
465  frag->identification = frag_id;
466  frag->rsv = 0;
467  frag->fragment_offset_and_more =
468  ip6_frag_hdr_offset_and_more (frag_offset, 1);
469  ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
470  ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
471  }
472 
473  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
474  fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index);
475 
476  clib_memset (&saddr, 0, sizeof (saddr));
477  saddr.ip4.as_u32 = ip4->src_address.as_u32;
478  clib_memset (&daddr, 0, sizeof (daddr));
479  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
480 
481  ste =
482  nat64_db_st_entry_find (db, &daddr, &saddr, 0, 0, proto, fib_index, 0);
483  if (ste)
484  {
485  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
486  if (!bibe)
487  return -1;
488  }
489  else
490  {
491  bibe = nat64_db_bib_entry_find (db, &daddr, 0, proto, fib_index, 0);
492 
493  if (!bibe)
494  return -1;
495 
496  nat64_compose_ip6 (&ip6_saddr, &ip4->src_address, bibe->fib_index);
497  ste = nat64_db_st_entry_create (ctx->thread_index, db,
498  bibe, &ip6_saddr, &saddr.ip4, 0);
499 
500  if (!ste)
501  return -1;
502 
504  db->st.st_entries_num);
505  }
506 
507  nat64_session_reset_timeout (ste, ctx->vm);
508 
509  ip6->src_address.as_u64[0] = ste->in_r_addr.as_u64[0];
510  ip6->src_address.as_u64[1] = ste->in_r_addr.as_u64[1];
511 
512  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
513  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
514 
515  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
516 
517  return 0;
518 }
519 
523 {
524  u32 n_left_from, *from, *to_next;
525  nat64_out2in_next_t next_index;
526  nat64_main_t *nm = &nat64_main;
528 
529  from = vlib_frame_vector_args (frame);
530  n_left_from = frame->n_vectors;
531  next_index = node->cached_next_index;
532  while (n_left_from > 0)
533  {
534  u32 n_left_to_next;
535 
536  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
537 
538  while (n_left_from > 0 && n_left_to_next > 0)
539  {
540  u32 bi0;
541  vlib_buffer_t *b0;
542  u32 next0;
543  ip4_header_t *ip40;
544  u32 proto0;
546  udp_header_t *udp0;
547  u32 sw_if_index0;
548 
549  /* speculatively enqueue b0 to the current next frame */
550  bi0 = from[0];
551  to_next[0] = bi0;
552  from += 1;
553  to_next += 1;
554  n_left_from -= 1;
555  n_left_to_next -= 1;
556 
557  b0 = vlib_get_buffer (vm, bi0);
558  ip40 = vlib_buffer_get_current (b0);
559 
560  ctx0.b = b0;
561  ctx0.vm = vm;
562  ctx0.thread_index = thread_index;
563 
565 
566  proto0 = ip_proto_to_nat_proto (ip40->protocol);
567  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
568  if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
569  {
570  if (nat64_out2in_unk_proto (vm, b0, &ctx0))
571  {
572  next0 = NAT64_OUT2IN_NEXT_DROP;
573  b0->error = node->errors[NAT64_OUT2IN_ERROR_NO_TRANSLATION];
574  }
576  thread_index, sw_if_index0, 1);
577  goto trace0;
578  }
579 
580  if (proto0 == NAT_PROTOCOL_ICMP)
581  {
583  thread_index, sw_if_index0, 1);
584  if (icmp_to_icmp6
585  (b0, nat64_out2in_icmp_set_cb, &ctx0,
587  {
588  next0 = NAT64_OUT2IN_NEXT_DROP;
589  b0->error = node->errors[NAT64_OUT2IN_ERROR_NO_TRANSLATION];
590  goto trace0;
591  }
592  }
593  else
594  {
595  if (proto0 == NAT_PROTOCOL_TCP)
597  thread_index, sw_if_index0, 1);
598  else
600  thread_index, sw_if_index0, 1);
601 
602  if (nat64_out2in_tcp_udp (vm, b0, &ctx0))
603  {
604  udp0 = ip4_next_header (ip40);
605  /*
606  * Send DHCP packets to the ipv4 stack, or we won't
607  * be able to use dhcp client on the outside interface
608  */
609  if ((proto0 == NAT_PROTOCOL_UDP)
610  && (udp0->dst_port ==
611  clib_host_to_net_u16 (UDP_DST_PORT_dhcp_to_client)))
612  {
614  goto trace0;
615  }
616  next0 = NAT64_OUT2IN_NEXT_DROP;
617  b0->error = node->errors[NAT64_OUT2IN_ERROR_NO_TRANSLATION];
618  goto trace0;
619  }
620  }
621 
622  trace0:
623  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
624  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
625  {
627  vlib_add_trace (vm, node, b0, sizeof (*t));
628  t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
629  t->next_index = next0;
630  }
631 
632  if (next0 == NAT64_OUT2IN_NEXT_DROP)
633  {
635  thread_index, sw_if_index0, 1);
636  }
637 
638  /* verify speculative enqueue, maybe switch current next frame */
639  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
640  n_left_to_next, bi0, next0);
641  }
642  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
643  }
644  return frame->n_vectors;
645 }
646 
647 /* *INDENT-OFF* */
649  .name = "nat64-out2in",
650  .vector_size = sizeof (u32),
651  .format_trace = format_nat64_out2in_trace,
654  .error_strings = nat64_out2in_error_strings,
655  .n_next_nodes = NAT64_OUT2IN_N_NEXT,
656  /* edit / add dispositions here */
657  .next_nodes = {
658  [NAT64_OUT2IN_NEXT_DROP] = "error-drop",
659  [NAT64_OUT2IN_NEXT_IP6_LOOKUP] = "ip6-lookup",
660  [NAT64_OUT2IN_NEXT_IP4_LOOKUP] = "ip4-lookup",
661  },
662 };
663 /* *INDENT-ON* */
664 
666 {
674 
675 #define foreach_nat64_out2in_handoff_error \
676 _(CONGESTION_DROP, "congestion drop") \
677 _(SAME_WORKER, "same worker") \
678 _(DO_HANDOFF, "do handoff")
679 
680 typedef enum
681 {
682 #define _(sym,str) NAT64_OUT2IN_HANDOFF_ERROR_##sym,
684 #undef _
687 
689 #define _(sym,string) string,
691 #undef _
692 };
693 
694 typedef struct
695 {
698 
699 static u8 *
701 {
702  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
703  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
705  va_arg (*args, nat64_out2in_handoff_trace_t *);
706 
707  s =
708  format (s, "NAT64-OUT2IN-HANDOFF: next-worker %d", t->next_worker_index);
709 
710  return s;
711 }
712 
716 {
717  nat64_main_t *nm = &nat64_main;
718  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
719  u32 n_enq, n_left_from, *from;
720  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
721  u32 fq_index;
723  u32 do_handoff = 0, same_worker = 0;
724 
725  from = vlib_frame_vector_args (frame);
726  n_left_from = frame->n_vectors;
727  vlib_get_buffers (vm, from, bufs, n_left_from);
728 
729  b = bufs;
730  ti = thread_indices;
731 
732  fq_index = nm->fq_out2in_index;
733 
734  while (n_left_from > 0)
735  {
736  ip4_header_t *ip0;
737 
738  ip0 = vlib_buffer_get_current (b[0]);
739  ti[0] = nat64_get_worker_out2in (b[0], ip0);
740 
741  if (ti[0] != thread_index)
742  do_handoff++;
743  else
744  same_worker++;
745 
746  if (PREDICT_FALSE
747  ((node->flags & VLIB_NODE_FLAG_TRACE)
748  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
749  {
751  vlib_add_trace (vm, node, b[0], sizeof (*t));
752  t->next_worker_index = ti[0];
753  }
754 
755  n_left_from -= 1;
756  ti += 1;
757  b += 1;
758  }
759 
760  n_enq =
761  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
762  frame->n_vectors, 1);
763 
764  if (n_enq < frame->n_vectors)
765  vlib_node_increment_counter (vm, node->node_index,
766  NAT64_OUT2IN_HANDOFF_ERROR_CONGESTION_DROP,
767  frame->n_vectors - n_enq);
768  vlib_node_increment_counter (vm, node->node_index,
769  NAT64_OUT2IN_HANDOFF_ERROR_SAME_WORKER,
770  same_worker);
771  vlib_node_increment_counter (vm, node->node_index,
772  NAT64_OUT2IN_HANDOFF_ERROR_DO_HANDOFF,
773  do_handoff);
774 
775  return frame->n_vectors;
776 }
777 
778 /* *INDENT-OFF* */
780  .name = "nat64-out2in-handoff",
781  .vector_size = sizeof (u32),
782  .format_trace = format_nat64_out2in_handoff_trace,
785  .error_strings = nat64_out2in_handoff_error_strings,
786 
787  .n_next_nodes = 1,
788 
789  .next_nodes = {
790  [0] = "error-drop",
791  },
792 };
793 /* *INDENT-ON* */
794 
795 /*
796  * fd.io coding-style-patch-verification: ON
797  *
798  * Local Variables:
799  * eval: (c-set-style "gnu")
800  * End:
801  */
nat64_out2in_handoff_error_t
Definition: nat64_out2in.c:680
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
nat64_db_t * db
BIB and session DB per thread.
Definition: nat64.h:83
#define CLIB_UNUSED(x)
Definition: clib.h:87
static char * nat64_out2in_error_strings[]
Definition: nat64_out2in.c:59
ip4_address_t src_address
Definition: ip4_packet.h:125
struct nat64_out2in_frag_set_ctx_t_ nat64_out2in_frag_set_ctx_t
unsigned long u64
Definition: types.h:89
vlib_buffer_t * b
Definition: nat64_out2in.c:75
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
Definition: fib_table.c:989
nat64_out2in_error_t
Definition: nat64_out2in.c:51
u32 thread_index
Definition: main.h:249
nat64_db_bib_entry_t * nat64_db_bib_entry_find(nat64_db_t *db, ip46_address_t *addr, u16 port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 BIB entry.
Definition: nat64_db.c:209
nat64_out2in_next_t
Definition: nat64_out2in.c:65
uword ip_csum_t
Definition: ip_packet.h:244
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
Definition: ip_packet.h:247
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
#define VLIB_NODE_FN(node)
Definition: node.h:202
static int nat64_out2in_icmp_set_cb(vlib_buffer_t *b, ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: nat64_out2in.c:246
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:310
struct nat64_out2in_set_ctx_t_ nat64_out2in_set_ctx_t
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
u32 st_entries_num
Definition: nat64_db.h:123
vlib_node_registration_t nat64_out2in_handoff_node
(constructor) VLIB_REGISTER_NODE (nat64_out2in_handoff_node)
Definition: nat64_out2in.c:779
#define u8_ptr_add(ptr, index)
Definition: ip_types.h:43
u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index)
Definition: ip4_fib.c:230
static nat_protocol_t ip_proto_to_nat_proto(u8 ip_proto)
Common NAT inline functions.
Definition: inlines.h:22
nat64_db_st_entry_t * nat64_db_st_entry_create(u32 thread_index, nat64_db_t *db, nat64_db_bib_entry_t *bibe, ip6_address_t *in_r_addr, ip4_address_t *out_r_addr, u16 r_port)
Create new NAT64 session table entry.
Definition: nat64_db.c:376
static u16 ip4_get_fragment_more(const ip4_header_t *i)
Definition: ip4_packet.h:161
vl_api_ip6_address_t ip6
Definition: one.api:424
ip4_address_t dst_address
Definition: ip4_packet.h:125
struct nat64_main_t::@102 counters
#define frag_id_4to6(id)
Definition: ip4_to_ip6.h:40
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:196
unsigned int u32
Definition: types.h:88
void nat64_tcp_session_set_state(nat64_db_st_entry_t *ste, tcp_header_t *tcp, u8 is_ip6)
Set NAT64 TCP session state.
Definition: nat64.c:919
#define VLIB_FRAME_SIZE
Definition: node.h:377
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
void nat64_session_reset_timeout(nat64_db_st_entry_t *ste, vlib_main_t *vm)
Reset NAT64 session timeout.
Definition: nat64.c:880
vl_api_ip_proto_t proto
Definition: acl_types.api:50
long ctx[MAX_CONNS]
Definition: main.c:144
#define foreach_nat64_out2in_handoff_error
Definition: nat64_out2in.c:675
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static char * nat64_out2in_handoff_error_strings[]
Definition: nat64_out2in.c:688
static u8 * format_nat64_out2in_trace(u8 *s, va_list *args)
Definition: nat64_out2in.c:33
nat64_db_st_t st
Definition: nat64_db.h:139
#define PREDICT_FALSE(x)
Definition: clib.h:120
vl_api_ip4_address_t ip4
Definition: one.api:376
static void vlib_set_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 value)
Set a simple counter.
Definition: counter.h:94
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
void nat64_compose_ip6(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Compose IPv4-embedded IPv6 addresses.
Definition: nat64.c:1049
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
vlib_simple_counter_main_t total_sessions
Definition: nat64.h:111
static int nat64_out2in_tcp_udp(vlib_main_t *vm, vlib_buffer_t *b, nat64_out2in_set_ctx_t *ctx)
Definition: nat64_out2in.c:81
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static u8 * format_nat64_out2in_handoff_trace(u8 *s, va_list *args)
Definition: nat64_out2in.c:700
static u16 ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:155
#define ARRAY_LEN(x)
Definition: clib.h:67
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
static int nat64_out2in_unk_proto(vlib_main_t *vm, vlib_buffer_t *p, nat64_out2in_set_ctx_t *ctx)
Definition: nat64_out2in.c:419
nat64_main_t nat64_main
Definition: nat64.c:29
u32 fq_out2in_index
Definition: nat64.h:87
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1582
struct nat64_main_t::@102::@104 out2in
nat64_db_st_entry_t * nat64_db_st_entry_find(nat64_db_t *db, ip46_address_t *l_addr, ip46_address_t *r_addr, u16 l_port, u16 r_port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 session table entry.
Definition: nat64_db.c:557
u32 nat64_get_worker_out2in(vlib_buffer_t *b, ip4_header_t *ip)
Get worker thread index for NAT64 out2in.
Definition: nat64.c:128
static int nat64_out2in_inner_icmp_set_cb(vlib_buffer_t *b, ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: nat64_out2in.c:327
ip_dscp_t tos
Definition: ip4_packet.h:96
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:272
nat64_db_bib_entry_t * nat64_db_bib_entry_by_index(nat64_db_t *db, u8 proto, u32 bibe_index)
Get BIB entry by index and protocol.
Definition: nat64_db.c:302
static int icmp_to_icmp6(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx, ip4_to_ip6_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP4 packet to ICMP6.
Definition: ip4_to_ip6.h:220
vlib_node_registration_t nat64_out2in_node
(constructor) VLIB_REGISTER_NODE (nat64_out2in_node)
Definition: nat64_out2in.c:648
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
NAT64 global declarations.
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:297
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:301
vl_api_address_t ip
Definition: l2.api:501
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1583
VLIB buffer representation.
Definition: buffer.h:102
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:494
#define vnet_buffer(b)
Definition: buffer.h:417
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:577
#define u16_net_add(u, val)
Definition: ip_types.h:44
#define ip6_frag_hdr_offset_and_more(offset, more)
Definition: ip6_packet.h:673
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_packet.h:318
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:300
vl_api_interface_index_t sw_if_index
Definition: wireguard.api:33
Definition: defs.h:46
#define foreach_nat64_out2in_error
Definition: nat64_out2in.c:46
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:255
ip6_address_t dst_address
Definition: ip6_packet.h:310