FD.io VPP  v19.08-27-gf4dcae4
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <lb/lb.h>
17 #include <vnet/fib/ip4_fib.h>
18 
19 #include <vnet/gre/packet.h>
20 #include <lb/lbhash.h>
21 
22 #define foreach_lb_error \
23  _(NONE, "no error") \
24  _(PROTO_NOT_SUPPORTED, "protocol not supported")
25 
26 typedef enum
27 {
28 #define _(sym,str) LB_ERROR_##sym,
30 #undef _
32 } lb_error_t;
33 
34 static char *lb_error_strings[] =
35  {
36 #define _(sym,string) string,
38 #undef _
39  };
40 
41 typedef struct
42 {
45 } lb_trace_t;
46 
47 typedef struct
48 {
50 
53 
54 typedef struct
55 {
61 
62 u8 *
63 format_lb_trace (u8 * s, va_list * args)
64 {
65  lb_main_t *lbm = &lb_main;
67 = va_arg (*args, vlib_main_t *);
68  CLIB_UNUSED(vlib_node_t * node)
69  = va_arg (*args, vlib_node_t *);
70  lb_trace_t *t = va_arg (*args, lb_trace_t *);
71  if (pool_is_free_index(lbm->vips, t->vip_index))
72  {
73  s = format (s, "lb vip[%d]: This VIP was freed since capture\n");
74  }
75  else
76  {
77  s = format (s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip,
78  &lbm->vips[t->vip_index]);
79  }
80  if (pool_is_free_index(lbm->ass, t->as_index))
81  {
82  s = format (s, "lb as[%d]: This AS was freed since capture\n");
83  }
84  else
85  {
86  s = format (s, "lb as[%d]: %U\n", t->as_index, format_lb_as,
87  &lbm->ass[t->as_index]);
88  }
89  return s;
90 }
91 
92 u8 *
93 format_lb_nat_trace (u8 * s, va_list * args)
94 {
95  lb_main_t *lbm = &lb_main;
97 = va_arg (*args, vlib_main_t *);
98  CLIB_UNUSED(vlib_node_t * node)
99  = va_arg (*args, vlib_node_t *);
100  lb_nat_trace_t *t = va_arg (*args, lb_nat_trace_t *);
101 
102  if (pool_is_free_index(lbm->vips, t->vip_index))
103  {
104  s = format (s, "lb vip[%d]: This VIP was freed since capture\n");
105  }
106  else
107  {
108  s = format (s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip,
109  &lbm->vips[t->vip_index]);
110  }
111  if (pool_is_free_index(lbm->ass, t->as_index))
112  {
113  s = format (s, "lb as[%d]: This AS was freed since capture\n");
114  }
115  else
116  {
117  s = format (s, "lb as[%d]: %U\n", t->as_index, format_lb_as,
118  &lbm->ass[t->as_index]);
119  }
120  s = format (s, "lb nat: rx_sw_if_index = %d, next_index = %d",
121  t->rx_sw_if_index, t->next_index);
122 
123  return s;
124 }
125 
126 lb_hash_t *
127 lb_get_sticky_table (u32 thread_index)
128 {
129  lb_main_t *lbm = &lb_main;
130  lb_hash_t *sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
131  //Check if size changed
132  if (PREDICT_FALSE(
133  sticky_ht && (lbm->per_cpu_sticky_buckets != lb_hash_nbuckets(sticky_ht))))
134  {
135  //Dereference everything in there
136  lb_hash_bucket_t *b;
137  u32 i;
138  lb_hash_foreach_entry(sticky_ht, b, i)
139  {
140  vlib_refcount_add (&lbm->as_refcount, thread_index, b->value[i], -1);
141  vlib_refcount_add (&lbm->as_refcount, thread_index, 0, 1);
142  }
143 
144  lb_hash_free (sticky_ht);
145  sticky_ht = NULL;
146  }
147 
148  //Create if necessary
149  if (PREDICT_FALSE(sticky_ht == NULL))
150  {
151  lbm->per_cpu[thread_index].sticky_ht = lb_hash_alloc (
153  sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
154  clib_warning("Regenerated sticky table %p", sticky_ht);
155  }
156 
157  ASSERT(sticky_ht);
158 
159  //Update timeout
160  sticky_ht->timeout = lbm->flow_timeout;
161  return sticky_ht;
162 }
163 
164 u64
166 {
167  return 0;
168 }
169 
170 u64
172 {
173  return 0;
174 }
175 
178  u32 *hash, u32 *vip_idx, u8 per_port_vip)
179 {
182 
183  /* For vip case, retrieve vip index for ip lookup */
184  *vip_idx = vnet_buffer (p)->ip.adj_index[VLIB_TX];
185 
186  if (per_port_vip)
187  {
188  /* For per-port-vip case, ip lookup stores dummy index */
189  key.vip_prefix_index = *vip_idx;
190  }
191 
192  if (is_input_v4)
193  {
194  ip4_header_t *ip40;
195  u64 ports;
196 
197  ip40 = vlib_buffer_get_current (p);
198  if (PREDICT_TRUE(
199  ip40->protocol == IP_PROTOCOL_TCP
200  || ip40->protocol == IP_PROTOCOL_UDP))
201  ports = ((u64) ((udp_header_t *) (ip40 + 1))->src_port << 16)
202  | ((u64) ((udp_header_t *) (ip40 + 1))->dst_port);
203  else
204  ports = lb_node_get_other_ports4 (ip40);
205 
206  *hash = lb_hash_hash (*((u64 *) &ip40->address_pair), ports, 0, 0, 0);
207 
208  if (per_port_vip)
209  {
210  key.protocol = ip40->protocol;
211  key.port = (u16)(ports & 0xFFFF);
212  }
213  }
214  else
215  {
216  ip6_header_t *ip60;
217  ip60 = vlib_buffer_get_current (p);
218  u64 ports;
219 
220  if (PREDICT_TRUE(
221  ip60->protocol == IP_PROTOCOL_TCP
222  || ip60->protocol == IP_PROTOCOL_UDP))
223  ports = ((u64) ((udp_header_t *) (ip60 + 1))->src_port << 16)
224  | ((u64) ((udp_header_t *) (ip60 + 1))->dst_port);
225  else
226  ports = lb_node_get_other_ports6 (ip60);
227 
228  *hash = lb_hash_hash (ip60->src_address.as_u64[0],
229  ip60->src_address.as_u64[1],
230  ip60->dst_address.as_u64[0],
231  ip60->dst_address.as_u64[1], ports);
232 
233  if (per_port_vip)
234  {
235  key.protocol = ip60->protocol;
236  key.port = (u16)(ports & 0xFFFF);
237  }
238  }
239 
240  /* For per-port-vip case, retrieve vip index for vip_port_filter table */
241  if (per_port_vip)
242  {
243  kv.key = key.as_u64;
244  if (clib_bihash_search_8_8(&lbm->vip_index_per_port, &kv, &value) < 0)
245  {
246  /* return default vip */
247  *vip_idx = 0;
248  return;
249  }
250  *vip_idx = value.value;
251  }
252 }
253 
256  vlib_node_runtime_t * node,
257  vlib_frame_t * frame,
258  u8 is_input_v4, //Compile-time parameter stating that is input is v4 (or v6)
259  lb_encap_type_t encap_type, //Compile-time parameter is GRE4/GRE6/L3DSR/NAT4/NAT6
260  u8 per_port_vip) //Compile-time parameter stating that is per_port_vip or not
261 {
262  lb_main_t *lbm = &lb_main;
263  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
264  u32 thread_index = vm->thread_index;
265  u32 lb_time = lb_hash_time_now (vm);
266 
267  lb_hash_t *sticky_ht = lb_get_sticky_table (thread_index);
268  from = vlib_frame_vector_args (frame);
269  n_left_from = frame->n_vectors;
270  next_index = node->cached_next_index;
271 
272  u32 nexthash0 = 0;
273  u32 next_vip_idx0 = ~0;
274  if (PREDICT_TRUE(n_left_from > 0))
275  {
276  vlib_buffer_t *p0 = vlib_get_buffer (vm, from[0]);
277  lb_node_get_hash (lbm, p0, is_input_v4, &nexthash0,
278  &next_vip_idx0, per_port_vip);
279  }
280 
281  while (n_left_from > 0)
282  {
283  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
284  while (n_left_from > 0 && n_left_to_next > 0)
285  {
286  u32 pi0;
287  vlib_buffer_t *p0;
288  lb_vip_t *vip0;
289  u32 asindex0 = 0;
290  u16 len0;
291  u32 available_index0;
292  u8 counter = 0;
293  u32 hash0 = nexthash0;
294  u32 vip_index0 = next_vip_idx0;
295  u32 next0;
296 
297  if (PREDICT_TRUE(n_left_from > 1))
298  {
299  vlib_buffer_t *p1 = vlib_get_buffer (vm, from[1]);
300  //Compute next hash and prefetch bucket
301  lb_node_get_hash (lbm, p1, is_input_v4,
302  &nexthash0, &next_vip_idx0,
303  per_port_vip);
304  lb_hash_prefetch_bucket (sticky_ht, nexthash0);
305  //Prefetch for encap, next
306  CLIB_PREFETCH(vlib_buffer_get_current (p1) - 64, 64, STORE);
307  }
308 
309  if (PREDICT_TRUE(n_left_from > 2))
310  {
311  vlib_buffer_t *p2;
312  p2 = vlib_get_buffer (vm, from[2]);
313  /* prefetch packet header and data */
314  vlib_prefetch_buffer_header(p2, STORE);
315  CLIB_PREFETCH(vlib_buffer_get_current (p2), 64, STORE);
316  }
317 
318  pi0 = to_next[0] = from[0];
319  from += 1;
320  n_left_from -= 1;
321  to_next += 1;
322  n_left_to_next -= 1;
323 
324  p0 = vlib_get_buffer (vm, pi0);
325 
326  vip0 = pool_elt_at_index(lbm->vips, vip_index0);
327 
328  if (is_input_v4)
329  {
330  ip4_header_t *ip40;
331  ip40 = vlib_buffer_get_current (p0);
332  len0 = clib_net_to_host_u16 (ip40->length);
333  }
334  else
335  {
336  ip6_header_t *ip60;
337  ip60 = vlib_buffer_get_current (p0);
338  len0 = clib_net_to_host_u16 (ip60->payload_length)
339  + sizeof(ip6_header_t);
340  }
341 
342  lb_hash_get (sticky_ht, hash0,
343  vip_index0, lb_time,
344  &available_index0, &asindex0);
345 
346  if (PREDICT_TRUE(asindex0 != 0))
347  {
348  //Found an existing entry
349  counter = LB_VIP_COUNTER_NEXT_PACKET;
350  }
351  else if (PREDICT_TRUE(available_index0 != ~0))
352  {
353  //There is an available slot for a new flow
354  asindex0 =
355  vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].as_index;
356  counter = LB_VIP_COUNTER_FIRST_PACKET;
357  counter = (asindex0 == 0) ? LB_VIP_COUNTER_NO_SERVER : counter;
358 
359  //TODO: There are race conditions with as0 and vip0 manipulation.
360  //Configuration may be changed, vectors resized, etc...
361 
362  //Dereference previously used
364  &lbm->as_refcount, thread_index,
365  lb_hash_available_value (sticky_ht, hash0, available_index0),
366  -1);
367  vlib_refcount_add (&lbm->as_refcount, thread_index, asindex0, 1);
368 
369  //Add sticky entry
370  //Note that when there is no AS configured, an entry is configured anyway.
371  //But no configured AS is not something that should happen
372  lb_hash_put (sticky_ht, hash0, asindex0,
373  vip_index0,
374  available_index0, lb_time);
375  }
376  else
377  {
378  //Could not store new entry in the table
379  asindex0 =
380  vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].as_index;
381  counter = LB_VIP_COUNTER_UNTRACKED_PACKET;
382  }
383 
385  &lbm->vip_counters[counter], thread_index,
386  vip_index0,
387  1);
388 
389  //Now let's encap
390  if ((encap_type == LB_ENCAP_TYPE_GRE4)
391  || (encap_type == LB_ENCAP_TYPE_GRE6))
392  {
393  gre_header_t *gre0;
394  if (encap_type == LB_ENCAP_TYPE_GRE4) /* encap GRE4*/
395  {
396  ip4_header_t *ip40;
398  p0, -sizeof(ip4_header_t) - sizeof(gre_header_t));
399  ip40 = vlib_buffer_get_current (p0);
400  gre0 = (gre_header_t *) (ip40 + 1);
401  ip40->src_address = lbm->ip4_src_address;
402  ip40->dst_address = lbm->ass[asindex0].address.ip4;
403  ip40->ip_version_and_header_length = 0x45;
404  ip40->ttl = 128;
405  ip40->fragment_id = 0;
406  ip40->flags_and_fragment_offset = 0;
407  ip40->length = clib_host_to_net_u16 (
408  len0 + sizeof(gre_header_t) + sizeof(ip4_header_t));
409  ip40->protocol = IP_PROTOCOL_GRE;
410  ip40->checksum = ip4_header_checksum (ip40);
411  }
412  else /* encap GRE6*/
413  {
414  ip6_header_t *ip60;
416  p0, -sizeof(ip6_header_t) - sizeof(gre_header_t));
417  ip60 = vlib_buffer_get_current (p0);
418  gre0 = (gre_header_t *) (ip60 + 1);
419  ip60->dst_address = lbm->ass[asindex0].address.ip6;
420  ip60->src_address = lbm->ip6_src_address;
421  ip60->hop_limit = 128;
423  clib_host_to_net_u32 (0x6 << 28);
424  ip60->payload_length = clib_host_to_net_u16 (
425  len0 + sizeof(gre_header_t));
426  ip60->protocol = IP_PROTOCOL_GRE;
427  }
428 
429  gre0->flags_and_version = 0;
430  gre0->protocol =
431  (is_input_v4) ?
432  clib_host_to_net_u16 (0x0800) :
433  clib_host_to_net_u16 (0x86DD);
434  }
435  else if (encap_type == LB_ENCAP_TYPE_L3DSR) /* encap L3DSR*/
436  {
437  ip4_header_t *ip40;
438  tcp_header_t *th0;
439  ip_csum_t csum;
440  u32 old_dst, new_dst;
441  u8 old_tos, new_tos;
442 
443  ip40 = vlib_buffer_get_current (p0);
444  old_dst = ip40->dst_address.as_u32;
445  new_dst = lbm->ass[asindex0].address.ip4.as_u32;
446  ip40->dst_address.as_u32 = lbm->ass[asindex0].address.ip4.as_u32;
447  /* Get and rewrite DSCP bit */
448  old_tos = ip40->tos;
449  new_tos = (u8) ((vip0->encap_args.dscp & 0x3F) << 2);
450  ip40->tos = (u8) ((vip0->encap_args.dscp & 0x3F) << 2);
451 
452  csum = ip40->checksum;
453  csum = ip_csum_update (csum, old_tos, new_tos,
454  ip4_header_t,
455  tos /* changed member */);
456  csum = ip_csum_update (csum, old_dst, new_dst,
457  ip4_header_t,
458  dst_address /* changed member */);
459  ip40->checksum = ip_csum_fold (csum);
460 
461  /* Recomputing L4 checksum after dst-IP modifying */
462  th0 = ip4_next_header (ip40);
463  th0->checksum = 0;
464  th0->checksum = ip4_tcp_udp_compute_checksum (vm, p0, ip40);
465  }
466  else if ((encap_type == LB_ENCAP_TYPE_NAT4)
467  || (encap_type == LB_ENCAP_TYPE_NAT6))
468  {
469  ip_csum_t csum;
470  udp_header_t *uh;
471 
472  /* do NAT */
473  if ((is_input_v4 == 1) && (encap_type == LB_ENCAP_TYPE_NAT4))
474  {
475  /* NAT44 */
476  ip4_header_t *ip40;
477  u32 old_dst;
478  ip40 = vlib_buffer_get_current (p0);
479  uh = (udp_header_t *) (ip40 + 1);
480  old_dst = ip40->dst_address.as_u32;
481  ip40->dst_address = lbm->ass[asindex0].address.ip4;
482 
483  csum = ip40->checksum;
484  csum = ip_csum_sub_even (csum, old_dst);
485  csum = ip_csum_add_even (
486  csum, lbm->ass[asindex0].address.ip4.as_u32);
487  ip40->checksum = ip_csum_fold (csum);
488 
489  if (ip40->protocol == IP_PROTOCOL_UDP)
490  {
491  uh->dst_port = vip0->encap_args.target_port;
492  csum = uh->checksum;
493  csum = ip_csum_sub_even (csum, old_dst);
494  csum = ip_csum_add_even (
495  csum, lbm->ass[asindex0].address.ip4.as_u32);
496  uh->checksum = ip_csum_fold (csum);
497  }
498  else
499  {
500  asindex0 = 0;
501  }
502  }
503  else if ((is_input_v4 == 0) && (encap_type == LB_ENCAP_TYPE_NAT6))
504  {
505  /* NAT66 */
506  ip6_header_t *ip60;
507  ip6_address_t old_dst;
508 
509  ip60 = vlib_buffer_get_current (p0);
510  uh = (udp_header_t *) (ip60 + 1);
511 
512  old_dst.as_u64[0] = ip60->dst_address.as_u64[0];
513  old_dst.as_u64[1] = ip60->dst_address.as_u64[1];
514  ip60->dst_address.as_u64[0] =
515  lbm->ass[asindex0].address.ip6.as_u64[0];
516  ip60->dst_address.as_u64[1] =
517  lbm->ass[asindex0].address.ip6.as_u64[1];
518 
519  if (PREDICT_TRUE(ip60->protocol == IP_PROTOCOL_UDP))
520  {
521  uh->dst_port = vip0->encap_args.target_port;
522  csum = uh->checksum;
523  csum = ip_csum_sub_even (csum, old_dst.as_u64[0]);
524  csum = ip_csum_sub_even (csum, old_dst.as_u64[1]);
525  csum = ip_csum_add_even (
526  csum, lbm->ass[asindex0].address.ip6.as_u64[0]);
527  csum = ip_csum_add_even (
528  csum, lbm->ass[asindex0].address.ip6.as_u64[1]);
529  uh->checksum = ip_csum_fold (csum);
530  }
531  else
532  {
533  asindex0 = 0;
534  }
535  }
536  }
537  next0 = lbm->ass[asindex0].dpo.dpoi_next_node;
538  //Note that this is going to error if asindex0 == 0
539  vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
540  lbm->ass[asindex0].dpo.dpoi_index;
541 
542  if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
543  {
544  lb_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof(*tr));
545  tr->as_index = asindex0;
546  tr->vip_index = vip_index0;
547  }
548 
549  //Enqueue to next
551  vm, node, next_index, to_next, n_left_to_next, pi0, next0);
552  }
553  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
554  }
555 
556  return frame->n_vectors;
557 }
558 
559 u8 *
560 format_nodeport_lb_trace (u8 * s, va_list * args)
561 {
562  lb_main_t *lbm = &lb_main;
564 = va_arg (*args, vlib_main_t *);
565  CLIB_UNUSED(vlib_node_t * node)
566  = va_arg (*args, vlib_node_t *);
567  lb_nodeport_trace_t *t = va_arg (*args, lb_nodeport_trace_t *);
568  if (pool_is_free_index(lbm->vips, t->vip_index))
569  {
570  s = format (s, "lb vip[%d]: This VIP was freed since capture\n");
571  }
572  else
573  {
574  s = format (s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip,
575  &lbm->vips[t->vip_index]);
576  }
577 
578  s = format (s, " lb node_port: %d", t->node_port);
579 
580  return s;
581 }
582 
583 static uword
585  vlib_frame_t * frame, u8 is_input_v4)
586 {
587  lb_main_t *lbm = &lb_main;
588  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
589 
590  from = vlib_frame_vector_args (frame);
591  n_left_from = frame->n_vectors;
592  next_index = node->cached_next_index;
593 
594  while (n_left_from > 0)
595  {
596  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
597 
598  while (n_left_from > 0 && n_left_to_next > 0)
599  {
600  u32 pi0;
601  vlib_buffer_t *p0;
602  udp_header_t * udp_0;
603  uword * entry0;
604 
605  if (PREDICT_TRUE(n_left_from > 1))
606  {
607  vlib_buffer_t *p1 = vlib_get_buffer (vm, from[1]);
608  //Prefetch for encap, next
609  CLIB_PREFETCH(vlib_buffer_get_current (p1) - 64, 64, STORE);
610  }
611 
612  if (PREDICT_TRUE(n_left_from > 2))
613  {
614  vlib_buffer_t *p2;
615  p2 = vlib_get_buffer (vm, from[2]);
616  /* prefetch packet header and data */
617  vlib_prefetch_buffer_header(p2, STORE);
618  CLIB_PREFETCH(vlib_buffer_get_current (p2), 64, STORE);
619  }
620 
621  pi0 = to_next[0] = from[0];
622  from += 1;
623  n_left_from -= 1;
624  to_next += 1;
625  n_left_to_next -= 1;
626 
627  p0 = vlib_get_buffer (vm, pi0);
628 
629  if (is_input_v4)
630  {
631  ip4_header_t *ip40;
633  p0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
634  ip40 = vlib_buffer_get_current (p0);
635  udp_0 = (udp_header_t *) (ip40 + 1);
636  }
637  else
638  {
639  ip6_header_t *ip60;
641  p0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
642  ip60 = vlib_buffer_get_current (p0);
643  udp_0 = (udp_header_t *) (ip60 + 1);
644  }
645 
646  entry0 = hash_get_mem(lbm->vip_index_by_nodeport, &(udp_0->dst_port));
647 
648  //Enqueue to next
649  vnet_buffer(p0)->ip.adj_index[VLIB_TX] = entry0 ? entry0[0]
651 
652  if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
653  {
654  lb_nodeport_trace_t *tr = vlib_add_trace (vm, node, p0,
655  sizeof(*tr));
656  tr->vip_index = entry0 ? entry0[0] : ADJ_INDEX_INVALID;
657  tr->node_port = (u32) clib_net_to_host_u16 (udp_0->dst_port);
658  }
659 
660  vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
661  n_left_to_next, pi0,
662  is_input_v4 ?
664  }
665  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
666  }
667 
668  return frame->n_vectors;
669 
670 }
671 
672 /**
673  * @brief Match NAT44 static mapping.
674  *
675  * @param sm NAT main.
676  * @param match Address and port to match.
677  * @param index index to the pool.
678  *
679  * @returns 0 if match found, otherwise -1.
680  */
681 int
683 {
685  clib_bihash_8_8_t *mapping_hash = &lbm->mapping_by_as4;
686 
687  kv4.key = match->as_u64;
688  kv4.value = 0;
689  if (clib_bihash_search_8_8 (mapping_hash, &kv4, &value))
690  {
691  return 1;
692  }
693 
694  *index = value.value;
695  return 0;
696 }
697 
698 /**
699  * @brief Match NAT66 static mapping.
700  *
701  * @param sm NAT main.
702  * @param match Address and port to match.
703  * @param mapping External or local address and port of the matched mapping.
704  *
705  * @returns 0 if match found otherwise 1.
706  */
707 int
709 {
711  lb_snat6_key_t m_key6;
712  clib_bihash_24_8_t *mapping_hash = &lbm->mapping_by_as6;
713 
714  m_key6.addr.as_u64[0] = match->addr.as_u64[0];
715  m_key6.addr.as_u64[1] = match->addr.as_u64[1];
716  m_key6.port = match->port;
717  m_key6.protocol = 0;
718  m_key6.fib_index = 0;
719 
720  kv6.key[0] = m_key6.as_u64[0];
721  kv6.key[1] = m_key6.as_u64[1];
722  kv6.key[2] = m_key6.as_u64[2];
723  kv6.value = 0;
724  if (clib_bihash_search_24_8 (mapping_hash, &kv6, &value))
725  {
726  return 1;
727  }
728 
729  *index = value.value;
730  return 0;
731 }
732 
733 static uword
735  vlib_frame_t * frame, u32 is_nat4)
736 {
737  u32 n_left_from, *from, *to_next;
738  u32 next_index;
739  u32 pkts_processed = 0;
740  lb_main_t *lbm = &lb_main;
741  u32 stats_node_index;
742 
743  stats_node_index =
744  is_nat4 ? lb_nat4_in2out_node.index : lb_nat6_in2out_node.index;
745 
746  from = vlib_frame_vector_args (frame);
747  n_left_from = frame->n_vectors;
748  next_index = node->cached_next_index;
749 
750  while (n_left_from > 0)
751  {
752  u32 n_left_to_next;
753 
754  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
755 
756  while (n_left_from > 0 && n_left_to_next > 0)
757  {
758  u32 bi0;
759  vlib_buffer_t * b0;
760  u32 next0;
761  u32 sw_if_index0;
762  ip_csum_t csum;
763  u16 old_port0, new_port0;
764  udp_header_t * udp0;
765  tcp_header_t * tcp0;
766 
767  u32 proto0;
768  u32 rx_fib_index0;
769 
770  /* speculatively enqueue b0 to the current next frame */
771  bi0 = from[0];
772  to_next[0] = bi0;
773  from += 1;
774  to_next += 1;
775  n_left_from -= 1;
776  n_left_to_next -= 1;
777 
778  b0 = vlib_get_buffer (vm, bi0);
780  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
782  sw_if_index0);
783 
784  if (is_nat4)
785  {
786  ip4_header_t * ip40;
787  u32 old_addr0, new_addr0;
788  lb_snat4_key_t key40;
789  lb_snat_mapping_t *sm40;
790  u32 index40;
791 
792  ip40 = vlib_buffer_get_current (b0);
793  udp0 = ip4_next_header (ip40);
794  tcp0 = (tcp_header_t *) udp0;
795  proto0 = lb_ip_proto_to_nat_proto (ip40->protocol);
796 
797  key40.addr = ip40->src_address;
798  key40.protocol = proto0;
799  key40.port = udp0->src_port;
800  key40.fib_index = rx_fib_index0;
801 
802  if (lb_nat44_mapping_match (lbm, &key40, &index40))
803  {
804  next0 = LB_NAT4_IN2OUT_NEXT_DROP;
805  goto trace0;
806  }
807 
808  sm40 = pool_elt_at_index(lbm->snat_mappings, index40);
809  new_addr0 = sm40->src_ip.ip4.as_u32;
810  new_port0 = sm40->src_port;
811  vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm40->fib_index;
812  old_addr0 = ip40->src_address.as_u32;
813  ip40->src_address.as_u32 = new_addr0;
814 
815  csum = ip40->checksum;
816  csum = ip_csum_sub_even (csum, old_addr0);
817  csum = ip_csum_add_even (csum, new_addr0);
818  ip40->checksum = ip_csum_fold (csum);
819 
820  if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_TCP))
821  {
822  old_port0 = tcp0->src_port;
823  tcp0->src_port = new_port0;
824 
825  csum = tcp0->checksum;
826  csum = ip_csum_sub_even (csum, old_addr0);
827  csum = ip_csum_sub_even (csum, old_port0);
828  csum = ip_csum_add_even (csum, new_addr0);
829  csum = ip_csum_add_even (csum, new_port0);
830  tcp0->checksum = ip_csum_fold (csum);
831  }
832  else if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_UDP))
833  {
834  old_port0 = udp0->src_port;
835  udp0->src_port = new_port0;
836 
837  csum = udp0->checksum;
838  csum = ip_csum_sub_even (csum, old_addr0);
839  csum = ip_csum_sub_even (csum, old_port0);
840  csum = ip_csum_add_even (csum, new_addr0);
841  csum = ip_csum_add_even (csum, new_port0);
842  udp0->checksum = ip_csum_fold (csum);
843  }
844 
845  pkts_processed += next0 != LB_NAT4_IN2OUT_NEXT_DROP;
846  }
847  else
848  {
849  ip6_header_t * ip60;
850  ip6_address_t old_addr0, new_addr0;
851  lb_snat6_key_t key60;
852  lb_snat_mapping_t *sm60;
853  u32 index60;
854 
855  ip60 = vlib_buffer_get_current (b0);
856  udp0 = ip6_next_header (ip60);
857  tcp0 = (tcp_header_t *) udp0;
858  proto0 = lb_ip_proto_to_nat_proto (ip60->protocol);
859 
860  key60.addr.as_u64[0] = ip60->src_address.as_u64[0];
861  key60.addr.as_u64[1] = ip60->src_address.as_u64[1];
862  key60.protocol = proto0;
863  key60.port = udp0->src_port;
864  key60.fib_index = rx_fib_index0;
865 
866  if (lb_nat66_mapping_match (lbm, &key60, &index60))
867  {
868  next0 = LB_NAT6_IN2OUT_NEXT_DROP;
869  goto trace0;
870  }
871 
872  sm60 = pool_elt_at_index(lbm->snat_mappings, index60);
873  new_addr0.as_u64[0] = sm60->src_ip.as_u64[0];
874  new_addr0.as_u64[1] = sm60->src_ip.as_u64[1];
875  new_port0 = sm60->src_port;
876  vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm60->fib_index;
877  old_addr0.as_u64[0] = ip60->src_address.as_u64[0];
878  old_addr0.as_u64[1] = ip60->src_address.as_u64[1];
879  ip60->src_address.as_u64[0] = new_addr0.as_u64[0];
880  ip60->src_address.as_u64[1] = new_addr0.as_u64[1];
881 
882  if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_TCP))
883  {
884  old_port0 = tcp0->src_port;
885  tcp0->src_port = new_port0;
886 
887  csum = tcp0->checksum;
888  csum = ip_csum_sub_even (csum, old_addr0.as_u64[0]);
889  csum = ip_csum_sub_even (csum, old_addr0.as_u64[1]);
890  csum = ip_csum_add_even (csum, new_addr0.as_u64[0]);
891  csum = ip_csum_add_even (csum, new_addr0.as_u64[1]);
892  csum = ip_csum_sub_even (csum, old_port0);
893  csum = ip_csum_add_even (csum, new_port0);
894  tcp0->checksum = ip_csum_fold (csum);
895  }
896  else if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_UDP))
897  {
898  old_port0 = udp0->src_port;
899  udp0->src_port = new_port0;
900 
901  csum = udp0->checksum;
902  csum = ip_csum_sub_even (csum, old_addr0.as_u64[0]);
903  csum = ip_csum_sub_even (csum, old_addr0.as_u64[1]);
904  csum = ip_csum_add_even (csum, new_addr0.as_u64[0]);
905  csum = ip_csum_add_even (csum, new_addr0.as_u64[1]);
906  csum = ip_csum_sub_even (csum, old_port0);
907  csum = ip_csum_add_even (csum, new_port0);
908  udp0->checksum = ip_csum_fold (csum);
909  }
910 
911  pkts_processed += next0 != LB_NAT4_IN2OUT_NEXT_DROP;
912  }
913 
914  trace0: if (PREDICT_FALSE(
915  (node->flags & VLIB_NODE_FLAG_TRACE) && (b0->flags & VLIB_BUFFER_IS_TRACED)))
916  {
917  lb_nat_trace_t *t = vlib_add_trace (vm, node, b0, sizeof(*t));
918  t->rx_sw_if_index = sw_if_index0;
919  t->next_index = next0;
920  }
921 
922  /* verify speculative enqueue, maybe switch current next frame */
923  vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
924  n_left_to_next, bi0, next0);
925  }
926 
927  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
928  }
929 
930  vlib_node_increment_counter (vm, stats_node_index,
931  LB_NAT_IN2OUT_ERROR_IN2OUT_PACKETS,
932  pkts_processed);
933  return frame->n_vectors;
934 }
935 
936 static uword
938  vlib_frame_t * frame)
939 {
940  return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE6, 0);
941 }
942 
943 static uword
945  vlib_frame_t * frame)
946 {
947  return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE4, 0);
948 }
949 
950 static uword
952  vlib_frame_t * frame)
953 {
954  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE6, 0);
955 }
956 
957 static uword
959  vlib_frame_t * frame)
960 {
961  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE4, 0);
962 }
963 
964 static uword
966  vlib_frame_t * frame)
967 {
968  return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE6, 1);
969 }
970 
971 static uword
973  vlib_frame_t * frame)
974 {
975  return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE4, 1);
976 }
977 
978 static uword
980  vlib_frame_t * frame)
981 {
982  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE6, 1);
983 }
984 
985 static uword
987  vlib_frame_t * frame)
988 {
989  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE4, 1);
990 }
991 
992 static uword
994  vlib_frame_t * frame)
995 {
996  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_L3DSR, 0);
997 }
998 
999 static uword
1001  vlib_frame_t * frame)
1002 {
1003  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_L3DSR, 1);
1004 }
1005 
1006 static uword
1008  vlib_frame_t * frame)
1009 {
1010  return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_NAT6, 1);
1011 }
1012 
1013 static uword
1015  vlib_frame_t * frame)
1016 {
1017  return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_NAT4, 1);
1018 }
1019 
1020 static uword
1022  vlib_frame_t * frame)
1023 {
1024  return lb_nat_in2out_node_fn (vm, node, frame, 1);
1025 }
1026 
1027 static uword
1029  vlib_frame_t * frame)
1030 {
1031  return lb_nat_in2out_node_fn (vm, node, frame, 0);
1032 }
1033 
1035  {
1036  .function = lb6_gre6_node_fn,
1037  .name = "lb6-gre6",
1038  .vector_size = sizeof(u32),
1039  .format_trace = format_lb_trace,
1040  .n_errors = LB_N_ERROR,
1041  .error_strings = lb_error_strings,
1042  .n_next_nodes = LB_N_NEXT,
1043  .next_nodes =
1044  { [LB_NEXT_DROP] = "error-drop" },
1045  };
1046 
1048  {
1049  .function = lb6_gre4_node_fn,
1050  .name = "lb6-gre4",
1051  .vector_size = sizeof(u32),
1052  .format_trace = format_lb_trace,
1053  .n_errors = LB_N_ERROR,
1054  .error_strings = lb_error_strings,
1055  .n_next_nodes = LB_N_NEXT,
1056  .next_nodes =
1057  { [LB_NEXT_DROP] = "error-drop" },
1058  };
1059 
1061  {
1062  .function = lb4_gre6_node_fn,
1063  .name = "lb4-gre6",
1064  .vector_size = sizeof(u32),
1065  .format_trace = format_lb_trace,
1066  .n_errors = LB_N_ERROR,
1067  .error_strings = lb_error_strings,
1068  .n_next_nodes = LB_N_NEXT,
1069  .next_nodes =
1070  { [LB_NEXT_DROP] = "error-drop" },
1071  };
1072 
1074  {
1075  .function = lb4_gre4_node_fn,
1076  .name = "lb4-gre4",
1077  .vector_size = sizeof(u32),
1078  .format_trace = format_lb_trace,
1079  .n_errors = LB_N_ERROR,
1080  .error_strings = lb_error_strings,
1081  .n_next_nodes = LB_N_NEXT,
1082  .next_nodes =
1083  { [LB_NEXT_DROP] = "error-drop" },
1084  };
1085 
1087  {
1088  .function = lb6_gre6_port_node_fn,
1089  .name = "lb6-gre6-port",
1090  .vector_size = sizeof(u32),
1091  .format_trace = format_lb_trace,
1092  .n_errors = LB_N_ERROR,
1093  .error_strings = lb_error_strings,
1094  .n_next_nodes = LB_N_NEXT,
1095  .next_nodes =
1096  { [LB_NEXT_DROP] = "error-drop" },
1097  };
1098 
1100  {
1101  .function = lb6_gre4_port_node_fn,
1102  .name = "lb6-gre4-port",
1103  .vector_size = sizeof(u32),
1104  .format_trace = format_lb_trace,
1105  .n_errors = LB_N_ERROR,
1106  .error_strings = lb_error_strings,
1107  .n_next_nodes = LB_N_NEXT,
1108  .next_nodes =
1109  { [LB_NEXT_DROP] = "error-drop" },
1110  };
1111 
1113  {
1114  .function = lb4_gre6_port_node_fn,
1115  .name = "lb4-gre6-port",
1116  .vector_size = sizeof(u32),
1117  .format_trace = format_lb_trace,
1118  .n_errors = LB_N_ERROR,
1119  .error_strings = lb_error_strings,
1120  .n_next_nodes = LB_N_NEXT,
1121  .next_nodes =
1122  { [LB_NEXT_DROP] = "error-drop" },
1123  };
1124 
1126  {
1127  .function = lb4_gre4_port_node_fn,
1128  .name = "lb4-gre4-port",
1129  .vector_size = sizeof(u32),
1130  .format_trace = format_lb_trace,
1131  .n_errors = LB_N_ERROR,
1132  .error_strings = lb_error_strings,
1133  .n_next_nodes = LB_N_NEXT,
1134  .next_nodes =
1135  { [LB_NEXT_DROP] = "error-drop" },
1136  };
1137 
1139  {
1140  .function = lb4_l3dsr_port_node_fn,
1141  .name = "lb4-l3dsr-port",
1142  .vector_size = sizeof(u32),
1143  .format_trace = format_lb_trace,
1144  .n_errors = LB_N_ERROR,
1145  .error_strings = lb_error_strings,
1146  .n_next_nodes = LB_N_NEXT,
1147  .next_nodes =
1148  { [LB_NEXT_DROP] = "error-drop" },
1149  };
1150 
1152  {
1153  .function = lb4_l3dsr_node_fn,
1154  .name = "lb4-l3dsr",
1155  .vector_size = sizeof(u32),
1156  .format_trace = format_lb_trace,
1157  .n_errors = LB_N_ERROR,
1158  .error_strings = lb_error_strings,
1159  .n_next_nodes = LB_N_NEXT,
1160  .next_nodes =
1161  { [LB_NEXT_DROP] = "error-drop" },
1162  };
1163 
1165  {
1166  .function = lb6_nat6_port_node_fn,
1167  .name = "lb6-nat6-port",
1168  .vector_size = sizeof(u32),
1169  .format_trace = format_lb_trace,
1170  .n_errors = LB_N_ERROR,
1171  .error_strings = lb_error_strings,
1172  .n_next_nodes = LB_N_NEXT,
1173  .next_nodes =
1174  { [LB_NEXT_DROP] = "error-drop" },
1175  };
1176 
1178  {
1179  .function = lb4_nat4_port_node_fn,
1180  .name = "lb4-nat4-port",
1181  .vector_size = sizeof(u32),
1182  .format_trace = format_lb_trace,
1183  .n_errors = LB_N_ERROR,
1184  .error_strings = lb_error_strings,
1185  .n_next_nodes = LB_N_NEXT,
1186  .next_nodes =
1187  { [LB_NEXT_DROP] = "error-drop" },
1188  };
1189 
1190 static uword
1192  vlib_frame_t * frame)
1193 {
1194  return lb_nodeport_node_fn (vm, node, frame, 1);
1195 }
1196 
1197 static uword
1199  vlib_frame_t * frame)
1200 {
1201  return lb_nodeport_node_fn (vm, node, frame, 0);
1202 }
1203 
1205  {
1206  .function = lb4_nodeport_node_fn,
1207  .name = "lb4-nodeport",
1208  .vector_size = sizeof(u32),
1209  .format_trace = format_nodeport_lb_trace,
1210  .n_errors = LB_N_ERROR,
1211  .error_strings = lb_error_strings,
1212  .n_next_nodes = LB4_NODEPORT_N_NEXT,
1213  .next_nodes =
1214  {
1215  [LB4_NODEPORT_NEXT_IP4_NAT4] = "lb4-nat4-port",
1216  [LB4_NODEPORT_NEXT_DROP] = "error-drop",
1217  },
1218  };
1219 
1221  {
1222  .function = lb6_nodeport_node_fn,
1223  .name = "lb6-nodeport",
1224  .vector_size = sizeof(u32),
1225  .format_trace = format_nodeport_lb_trace,
1226  .n_errors = LB_N_ERROR,
1227  .error_strings = lb_error_strings,
1228  .n_next_nodes = LB6_NODEPORT_N_NEXT,
1229  .next_nodes =
1230  {
1231  [LB6_NODEPORT_NEXT_IP6_NAT6] = "lb6-nat6-port",
1232  [LB6_NODEPORT_NEXT_DROP] = "error-drop",
1233  },
1234  };
1235 
1237  {
1238  .arc_name = "ip4-unicast",
1239  .node_name = "lb-nat4-in2out",
1240  .runs_before = VNET_FEATURES("ip4-lookup"),
1241  };
1242 
1244  {
1245  .function = lb_nat4_in2out_node_fn,
1246  .name = "lb-nat4-in2out",
1247  .vector_size = sizeof(u32),
1248  .format_trace = format_lb_nat_trace,
1249  .n_errors = LB_N_ERROR,
1250  .error_strings = lb_error_strings,
1251  .n_next_nodes = LB_NAT4_IN2OUT_N_NEXT,
1252  .next_nodes =
1253  {
1254  [LB_NAT4_IN2OUT_NEXT_DROP] = "error-drop",
1255  [LB_NAT4_IN2OUT_NEXT_LOOKUP] = "ip4-lookup",
1256  },
1257  };
1258 
1260  {
1261  .arc_name = "ip6-unicast",
1262  .node_name = "lb-nat6-in2out",
1263  .runs_before = VNET_FEATURES("ip6-lookup"),
1264  };
1265 
1267  {
1268  .function = lb_nat6_in2out_node_fn,
1269  .name = "lb-nat6-in2out",
1270  .vector_size = sizeof(u32),
1271  .format_trace = format_lb_nat_trace,
1272  .n_errors = LB_N_ERROR,
1273  .error_strings = lb_error_strings,
1274  .n_next_nodes = LB_NAT6_IN2OUT_N_NEXT,
1275  .next_nodes =
1276  {
1277  [LB_NAT6_IN2OUT_NEXT_DROP] = "error-drop",
1278  [LB_NAT6_IN2OUT_NEXT_LOOKUP] = "ip6-lookup",
1279  },
1280  };
1281 
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u8 * format_lb_trace(u8 *s, va_list *args)
Definition: node.c:63
format_function_t format_lb_vip
Definition: lb.h:388
u64 as_u64
Definition: lb.h:423
u32 lb_hash_time_now(vlib_main_t *vm)
Definition: lb.c:93
static u32 lb_ip_proto_to_nat_proto(u8 ip_proto)
Definition: lb.h:402
static uword lb6_gre4_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:972
u64 as_u64[3]
Definition: lb.h:438
static uword lb6_gre4_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:944
#define CLIB_UNUSED(x)
Definition: clib.h:82
u32 vip_index
Definition: node.c:56
vlib_node_registration_t lb6_gre6_port_node
(constructor) VLIB_REGISTER_NODE (lb6_gre6_port_node)
Definition: node.c:1086
static uword lb4_l3dsr_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:993
vlib_node_registration_t lb6_gre6_node
(constructor) VLIB_REGISTER_NODE (lb6_gre6_node)
Definition: node.c:1034
ip4_address_t src_address
Definition: ip4_packet.h:170
#define foreach_lb_error
Definition: node.c:22
u32 fib_index
Definition: lb.h:436
u32 per_cpu_sticky_buckets
Number of buckets in the per-cpu sticky hash table.
Definition: lb.h:522
#define PREDICT_TRUE(x)
Definition: clib.h:112
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
static uword lb_nat6_in2out_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1028
#define NULL
Definition: clib.h:58
static_always_inline lb_hash_t * lb_hash_alloc(u32 buckets, u32 timeout)
Definition: lbhash.h:81
Definition: lb.h:56
vlib_node_registration_t lb6_gre4_port_node
(constructor) VLIB_REGISTER_NODE (lb6_gre4_port_node)
Definition: node.c:1099
vlib_node_registration_t lb_nat4_in2out_node
(constructor) VLIB_REGISTER_NODE (lb_nat4_in2out_node)
Definition: node.c:1243
u32 thread_index
Definition: main.h:197
vlib_node_registration_t lb4_nat4_port_node
(constructor) VLIB_REGISTER_NODE (lb4_nat4_port_node)
Definition: node.c:1177
u16 port
Definition: lb.h:419
int i
u32 vip_prefix_index
Definition: lb.h:245
uword ip_csum_t
Definition: ip_packet.h:219
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
u16 flags_and_fragment_offset
Definition: ip4_packet.h:151
static_always_inline u32 lb_hash_available_value(lb_hash_t *h, u32 hash, u32 available_index)
Definition: lbhash.h:169
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:383
lb_hash_t * sticky_ht
Each CPU has its own sticky flow hash table.
Definition: lb.h:467
unsigned char u8
Definition: types.h:56
static uword lb4_nodeport_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1191
u32 next_index
Definition: node.c:59
ip46_address_t address
Destination address used to tunnel traffic towards that application server.
Definition: lb.h:120
static uword lb4_gre6_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:951
u8 * format_nodeport_lb_trace(u8 *s, va_list *args)
Definition: node.c:560
u32 timeout
Definition: lbhash.h:60
u32 vip_index
Definition: node.c:43
u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index)
Definition: ip4_fib.c:224
#define lb_hash_nbuckets(h)
Definition: lbhash.h:64
#define static_always_inline
Definition: clib.h:99
i64 word
Definition: types.h:111
ip4_address_t dst_address
Definition: ip4_packet.h:170
lb_main_t lb_main
Definition: lb.c:29
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
u32 flow_timeout
Flow timeout in seconds.
Definition: lb.h:527
vlib_node_registration_t lb6_nat6_port_node
(constructor) VLIB_REGISTER_NODE (lb6_nat6_port_node)
Definition: node.c:1164
Definition: lb.h:470
u16 protocol
Definition: lb.h:420
vlib_refcount_t as_refcount
Each AS has an associated reference counter.
Definition: lb.h:494
lb_vip_encap_args_t encap_args
Definition: lb.h:316
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:241
unsigned int u32
Definition: types.h:88
lb_error_t
Definition: node.c:26
format_function_t format_lb_as
Definition: lb.h:165
lb_vip_t * vips
Pool of all Virtual IPs.
Definition: lb.h:474
vlib_node_registration_t lb4_nodeport_node
(constructor) VLIB_REGISTER_NODE (lb4_nodeport_node)
Definition: node.c:1204
ip4_address_t ip4_src_address
Source address used for IPv4 encapsulated traffic.
Definition: lb.h:517
static uword lb4_l3dsr_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1000
static uword lb4_nat4_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1014
#define ADJ_INDEX_INVALID
Invalid ADJ index - used when no adj is known likewise blazoned capitals INVALID speak volumes where ...
Definition: adj_types.h:36
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
u32 value[LBHASH_ENTRY_PER_BUCKET]
Definition: lbhash.h:55
u64 key
the key
Definition: bihash_8_8.h:35
unsigned short u16
Definition: types.h:57
vlib_node_registration_t lb4_gre4_node
(constructor) VLIB_REGISTER_NODE (lb4_gre4_node)
Definition: node.c:1073
u16 src_port
Network byte order for vip + port case, src_port = port; for node ip + node_port, src_port = node_por...
Definition: lb.h:456
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static uword lb4_gre6_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:979
ip4_address_pair_t address_pair
Definition: ip4_packet.h:172
#define PREDICT_FALSE(x)
Definition: clib.h:111
static_always_inline void lb_hash_prefetch_bucket(lb_hash_t *ht, u32 hash)
Definition: lbhash.h:107
clib_bihash_8_8_t mapping_by_as4
Definition: lb.h:554
lb_hash_t * lb_get_sticky_table(u32 thread_index)
Definition: node.c:127
u32 as_index
Definition: node.c:57
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
vlib_node_registration_t lb6_gre4_node
(constructor) VLIB_REGISTER_NODE (lb6_gre4_node)
Definition: node.c:1047
vlib_node_registration_t lb4_l3dsr_port_node
(constructor) VLIB_REGISTER_NODE (lb4_l3dsr_port_node)
Definition: node.c:1138
u16 flags_and_version
Definition: packet.h:40
u16 port
Definition: lb.h:246
lb_encap_type_t
Definition: lb.h:184
static uword lb_nodeport_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_input_v4)
Definition: node.c:584
static uword lb6_gre6_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:965
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
static_always_inline void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v)
Definition: refcount.h:68
u8 * format_lb_nat_trace(u8 *s, va_list *args)
Definition: node.c:93
ip46_address_t src_ip
for vip + port case, src_ip = vip; for node ip + node_port, src_ip = node_ip
Definition: lb.h:447
#define lb_hash_foreach_entry(h, bucket, i)
Definition: lbhash.h:72
u64 value
the value
Definition: bihash_8_8.h:36
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 rx_sw_if_index
Definition: node.c:58
u16 n_vectors
Definition: node.h:395
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
u64 lb_node_get_other_ports6(ip6_header_t *ip60)
Definition: node.c:171
u32 as_index
Definition: node.c:44
u16 protocol
Definition: packet.h:55
vlib_node_registration_t lb4_gre4_port_node
(constructor) VLIB_REGISTER_NODE (lb4_gre4_port_node)
Definition: node.c:1125
#define clib_warning(format, args...)
Definition: error.h:59
static uword lb4_gre4_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:986
int lb_nat66_mapping_match(lb_main_t *lbm, lb_snat6_key_t *match, u32 *index)
Match NAT66 static mapping.
Definition: node.c:708
vlib_node_registration_t lb4_l3dsr_node
(constructor) VLIB_REGISTER_NODE (lb4_l3dsr_node)
Definition: node.c:1151
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:283
VNET_FEATURE_INIT(bond_input, static)
u32 as_index
Definition: lb.h:168
8 octet key, 8 octet key value pair
Definition: bihash_8_8.h:33
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:458
static uword lb_nat4_in2out_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1021
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:410
static_always_inline void lb_hash_free(lb_hash_t *h)
Definition: lbhash.h:100
lb_as_t * ass
Pool of ASs.
Definition: lb.h:487
uword * vip_index_by_nodeport
Definition: lb.h:497
ip6_address_t addr
Definition: lb.h:433
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
u8 value
Definition: qos.api:53
#define ASSERT(truth)
vlib_node_registration_t lb_nat6_in2out_node
(constructor) VLIB_REGISTER_NODE (lb_nat6_in2out_node)
Definition: node.c:1266
static_always_inline uword lb_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_input_v4, lb_encap_type_t encap_type, u8 per_port_vip)
Definition: node.c:255
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
Definition: ip4_forward.c:1144
ip_dscp_t tos
Definition: ip4_packet.h:141
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:247
u32 new_flow_table_mask
New flows table length - 1 (length MUST be a power of 2)
Definition: lb.h:276
static char * lb_error_strings[]
Definition: node.c:34
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
lb_per_cpu_t * per_cpu
Some global data is per-cpu.
Definition: lb.h:502
#define VNET_FEATURES(...)
Definition: feature.h:435
vlib_node_registration_t lb4_gre6_port_node
(constructor) VLIB_REGISTER_NODE (lb4_gre6_port_node)
Definition: node.c:1112
u16 target_port
Definition: lb.h:233
vlib_simple_counter_main_t vip_counters[LB_N_VIP_COUNTERS]
Per VIP counter.
Definition: lb.h:532
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
u64 as_u64
Definition: lb.h:250
vlib_node_registration_t lb6_nodeport_node
(constructor) VLIB_REGISTER_NODE (lb6_nodeport_node)
Definition: node.c:1220
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:370
Definition: defs.h:47
ip6_address_t ip6_src_address
Source address used in IPv6 encapsulated traffic.
Definition: lb.h:512
u16 payload_length
Definition: ip6_packet.h:374
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:186
int lb_nat44_mapping_match(lb_main_t *lbm, lb_snat4_key_t *match, u32 *index)
Match NAT44 static mapping.
Definition: node.c:682
u64 lb_node_get_other_ports4(ip4_header_t *ip40)
Definition: node.c:165
VLIB buffer representation.
Definition: buffer.h:102
static uword lb6_nodeport_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1198
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
lb_snat_mapping_t * snat_mappings
Definition: lb.h:558
typedef key
Definition: ipsec.api:245
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:269
u8 protocol
Definition: lb.h:247
u16 port
Definition: lb.h:434
static uword lb6_gre6_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:937
#define hash_get_mem(h, key)
Definition: hash.h:269
#define vnet_buffer(b)
Definition: buffer.h:361
u32 fib_index
Definition: lb.h:459
clib_bihash_8_8_t vip_index_per_port
Definition: lb.h:551
static_always_inline u32 lb_hash_hash(u64 k0, u64 k1, u64 k2, u64 k3, u64 k4)
Definition: lb_hash_hash.h:46
u16 protocol
Definition: lb.h:435
dpo_id_t dpo
The next DPO in the graph to follow.
Definition: lb.h:161
u16 flags
Copy of main node flags.
Definition: node.h:507
static_always_inline void lb_node_get_hash(lb_main_t *lbm, vlib_buffer_t *p, u8 is_input_v4, u32 *hash, u32 *vip_idx, u8 per_port_vip)
Definition: node.c:177
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:182
ip4_address_t addr
Definition: lb.h:418
u16 dst_port
Definition: udp.api:42
clib_bihash_24_8_t mapping_by_as6
Definition: lb.h:555
u8 ip_version_and_header_length
Definition: ip4_packet.h:138
lb_new_flow_entry_t * new_flow_table
Vector mapping (flow-hash & new_connect_table_mask) to AS index.
Definition: lb.h:270
static uword lb_nat_in2out_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_nat4)
Definition: node.c:734
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
static uword lb4_gre4_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:958
static uword lb6_nat6_port_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:1007
Load balancing service is provided per VIP+protocol+port.
Definition: lb.h:262
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static_always_inline void lb_hash_get(lb_hash_t *ht, u32 hash, u32 vip, u32 time_now, u32 *available_index, u32 *found_value)
Definition: lbhash.h:114
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
u16 fib_index
Definition: lb.h:420
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:275
Definition: defs.h:46
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:230
ip6_address_t dst_address
Definition: ip6_packet.h:383
vlib_node_registration_t lb4_gre6_node
(constructor) VLIB_REGISTER_NODE (lb4_gre6_node)
Definition: node.c:1060
static_always_inline void lb_hash_put(lb_hash_t *h, u32 hash, u32 value, u32 vip, u32 available_index, u32 time_now)
Definition: lbhash.h:175