FD.io VPP  v21.01
Vector Packet Processing
gtpu_decap.c
Go to the documentation of this file.
1 /*
2  * decap.c: gtpu tunnel decap packet processing
3  *
4  * Copyright (c) 2017 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <gtpu/gtpu.h>
20 
23 
24 typedef struct {
30 
31 static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
32 {
33  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
34  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
35  gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
36 
37  if (t->tunnel_index != ~0)
38  {
39  s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
40  t->tunnel_index, t->teid, t->next_index, t->error);
41  }
42  else
43  {
44  s = format (s, "GTPU decap error - tunnel for teid %d does not exist",
45  t->teid);
46  }
47  return s;
48 }
49 
52 {
53  return t->encap_fib_index == vlib_buffer_get_ip_fib_index (b, is_ip4);
54 }
55 
59  vlib_frame_t * from_frame,
60  u32 is_ip4)
61 {
62  u32 n_left_from, next_index, * from, * to_next;
63  gtpu_main_t * gtm = &gtpu_main;
64  vnet_main_t * vnm = gtm->vnet_main;
66  u32 last_tunnel_index = ~0;
67  gtpu4_tunnel_key_t last_key4;
68  gtpu6_tunnel_key_t last_key6;
69  u32 pkts_decapsulated = 0;
70  u32 thread_index = vlib_get_thread_index();
71  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
72 
73  if (is_ip4)
74  last_key4.as_u64 = ~0;
75  else
76  clib_memset (&last_key6, 0xff, sizeof (last_key6));
77 
78  from = vlib_frame_vector_args (from_frame);
79  n_left_from = from_frame->n_vectors;
80 
81  next_index = node->cached_next_index;
82  stats_sw_if_index = node->runtime_data[0];
83  stats_n_packets = stats_n_bytes = 0;
84 
85  while (n_left_from > 0)
86  {
87  u32 n_left_to_next;
88 
89  vlib_get_next_frame (vm, node, next_index,
90  to_next, n_left_to_next);
91  while (n_left_from >= 4 && n_left_to_next >= 2)
92  {
93  u32 bi0, bi1;
94  vlib_buffer_t * b0, * b1;
95  u32 next0, next1;
96  ip4_header_t * ip4_0, * ip4_1;
97  ip6_header_t * ip6_0, * ip6_1;
98  gtpu_header_t * gtpu0, * gtpu1;
99  u32 gtpu_hdr_len0, gtpu_hdr_len1;
100  uword * p0, * p1;
101  u32 tunnel_index0, tunnel_index1;
102  gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
103  gtpu4_tunnel_key_t key4_0, key4_1;
104  gtpu6_tunnel_key_t key6_0, key6_1;
105  u32 error0, error1;
106  u32 sw_if_index0, sw_if_index1, len0, len1;
107  u8 has_space0, has_space1;
108  u8 ver0, ver1;
109 
110  /* Prefetch next iteration. */
111  {
112  vlib_buffer_t * p2, * p3;
113 
114  p2 = vlib_get_buffer (vm, from[2]);
115  p3 = vlib_get_buffer (vm, from[3]);
116 
117  vlib_prefetch_buffer_header (p2, LOAD);
118  vlib_prefetch_buffer_header (p3, LOAD);
119 
122  }
123 
124  bi0 = from[0];
125  bi1 = from[1];
126  to_next[0] = bi0;
127  to_next[1] = bi1;
128  from += 2;
129  to_next += 2;
130  n_left_to_next -= 2;
131  n_left_from -= 2;
132 
133  b0 = vlib_get_buffer (vm, bi0);
134  b1 = vlib_get_buffer (vm, bi1);
135 
136  /* udp leaves current_data pointing at the gtpu header */
137  gtpu0 = vlib_buffer_get_current (b0);
138  gtpu1 = vlib_buffer_get_current (b1);
139  if (is_ip4)
140  {
141  ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
142  ip4_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip4_header_t));
143  }
144  else
145  {
146  ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
147  ip6_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip6_header_t));
148  }
149 
150  tunnel_index0 = ~0;
151  error0 = 0;
152 
153  tunnel_index1 = ~0;
154  error1 = 0;
155 
156  /* speculatively load gtp header version field */
157  ver0 = gtpu0->ver_flags;
158  ver1 = gtpu1->ver_flags;
159 
160  /*
161  * Manipulate gtpu header
162  * TBD: Manipulate Sequence Number and N-PDU Number
163  * TBD: Manipulate Next Extension Header
164  */
165  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
166  gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
167 
168  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
169  has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
170 
171  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
172  {
173  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
174  next0 = GTPU_INPUT_NEXT_DROP;
175  goto trace0;
176  }
177 
178  /* Manipulate packet 0 */
179  if (is_ip4) {
180  key4_0.src = ip4_0->src_address.as_u32;
181  key4_0.teid = gtpu0->teid;
182 
183  /* Make sure GTPU tunnel exist according to packet SIP and teid
184  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
185  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
186  {
187  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
188  if (PREDICT_FALSE (p0 == NULL))
189  {
190  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
191  next0 = GTPU_INPUT_NEXT_DROP;
192  goto trace0;
193  }
194  last_key4.as_u64 = key4_0.as_u64;
195  tunnel_index0 = last_tunnel_index = p0[0];
196  }
197  else
198  tunnel_index0 = last_tunnel_index;
199  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
200 
201  /* Validate GTPU tunnel encap-fib index against packet */
202  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
203  {
204  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
205  next0 = GTPU_INPUT_NEXT_DROP;
206  goto trace0;
207  }
208 
209  /* Validate GTPU tunnel SIP against packet DIP */
210  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
211  goto next0; /* valid packet */
213  {
214  key4_0.src = ip4_0->dst_address.as_u32;
215  key4_0.teid = gtpu0->teid;
216  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
217  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
218  if (PREDICT_TRUE (p0 != NULL))
219  {
220  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
221  goto next0; /* valid packet */
222  }
223  }
224  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
225  next0 = GTPU_INPUT_NEXT_DROP;
226  goto trace0;
227 
228  } else /* !is_ip4 */ {
229  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
230  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
231  key6_0.teid = gtpu0->teid;
232 
233  /* Make sure GTPU tunnel exist according to packet SIP and teid
234  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
235  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
236  {
237  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
238  if (PREDICT_FALSE (p0 == NULL))
239  {
240  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
241  next0 = GTPU_INPUT_NEXT_DROP;
242  goto trace0;
243  }
244  clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
245  tunnel_index0 = last_tunnel_index = p0[0];
246  }
247  else
248  tunnel_index0 = last_tunnel_index;
249  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
250 
251  /* Validate GTPU tunnel encap-fib index against packet */
252  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
253  {
254  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
255  next0 = GTPU_INPUT_NEXT_DROP;
256  goto trace0;
257  }
258 
259  /* Validate GTPU tunnel SIP against packet DIP */
261  &t0->src.ip6)))
262  goto next0; /* valid packet */
264  {
265  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
266  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
267  key6_0.teid = gtpu0->teid;
268  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
269  if (PREDICT_TRUE (p0 != NULL))
270  {
271  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
272  goto next0; /* valid packet */
273  }
274  }
275  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
276  next0 = GTPU_INPUT_NEXT_DROP;
277  goto trace0;
278  }
279 
280  next0:
281  /* Pop gtpu header */
282  vlib_buffer_advance (b0, gtpu_hdr_len0);
283 
284  next0 = t0->decap_next_index;
285  sw_if_index0 = t0->sw_if_index;
286  len0 = vlib_buffer_length_in_chain (vm, b0);
287 
288  /* Required to make the l2 tag push / pop code work on l2 subifs */
289  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
290  vnet_update_l2_len (b0);
291 
292  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
293  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
294  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
295 
296  pkts_decapsulated ++;
297  stats_n_packets += 1;
298  stats_n_bytes += len0;
299 
300  /* Batch stats increment on the same gtpu tunnel so counter
301  is not incremented per packet */
302  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
303  {
304  stats_n_packets -= 1;
305  stats_n_bytes -= len0;
306  if (stats_n_packets)
309  thread_index, stats_sw_if_index,
310  stats_n_packets, stats_n_bytes);
311  stats_n_packets = 1;
312  stats_n_bytes = len0;
313  stats_sw_if_index = sw_if_index0;
314  }
315 
316  trace0:
317  b0->error = error0 ? node->errors[error0] : 0;
318 
319  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
320  {
321  gtpu_rx_trace_t *tr
322  = vlib_add_trace (vm, node, b0, sizeof (*tr));
323  tr->next_index = next0;
324  tr->error = error0;
325  tr->tunnel_index = tunnel_index0;
326  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
327  }
328 
329  if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
330  {
331  error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
332  next1 = GTPU_INPUT_NEXT_DROP;
333  goto trace1;
334  }
335 
336  /* Manipulate packet 1 */
337  if (is_ip4) {
338  key4_1.src = ip4_1->src_address.as_u32;
339  key4_1.teid = gtpu1->teid;
340 
341  /* Make sure GTPU tunnel exist according to packet SIP and teid
342  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
343  if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
344  {
345  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
346  if (PREDICT_FALSE (p1 == NULL))
347  {
348  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
349  next1 = GTPU_INPUT_NEXT_DROP;
350  goto trace1;
351  }
352  last_key4.as_u64 = key4_1.as_u64;
353  tunnel_index1 = last_tunnel_index = p1[0];
354  }
355  else
356  tunnel_index1 = last_tunnel_index;
357  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
358 
359  /* Validate GTPU tunnel encap-fib index against packet */
360  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
361  {
362  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
363  next1 = GTPU_INPUT_NEXT_DROP;
364  goto trace1;
365  }
366 
367  /* Validate GTPU tunnel SIP against packet DIP */
368  if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
369  goto next1; /* valid packet */
371  {
372  key4_1.src = ip4_1->dst_address.as_u32;
373  key4_1.teid = gtpu1->teid;
374  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
375  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
376  if (PREDICT_TRUE (p1 != NULL))
377  {
378  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
379  goto next1; /* valid packet */
380  }
381  }
382  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
383  next1 = GTPU_INPUT_NEXT_DROP;
384  goto trace1;
385 
386  } else /* !is_ip4 */ {
387  key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
388  key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
389  key6_1.teid = gtpu1->teid;
390 
391  /* Make sure GTPU tunnel exist according to packet SIP and teid
392  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
393  if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
394  {
395  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
396 
397  if (PREDICT_FALSE (p1 == NULL))
398  {
399  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
400  next1 = GTPU_INPUT_NEXT_DROP;
401  goto trace1;
402  }
403 
404  clib_memcpy_fast (&last_key6, &key6_1, sizeof(key6_1));
405  tunnel_index1 = last_tunnel_index = p1[0];
406  }
407  else
408  tunnel_index1 = last_tunnel_index;
409  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
410 
411  /* Validate GTPU tunnel encap-fib index against packet */
412  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
413  {
414  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
415  next1 = GTPU_INPUT_NEXT_DROP;
416  goto trace1;
417  }
418 
419  /* Validate GTPU tunnel SIP against packet DIP */
421  &t1->src.ip6)))
422  goto next1; /* valid packet */
424  {
425  key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
426  key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
427  key6_1.teid = gtpu1->teid;
428  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
429  if (PREDICT_TRUE (p1 != NULL))
430  {
431  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
432  goto next1; /* valid packet */
433  }
434  }
435  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
436  next1 = GTPU_INPUT_NEXT_DROP;
437  goto trace1;
438  }
439 
440  next1:
441  /* Pop gtpu header */
442  vlib_buffer_advance (b1, gtpu_hdr_len1);
443 
444  next1 = t1->decap_next_index;
445  sw_if_index1 = t1->sw_if_index;
446  len1 = vlib_buffer_length_in_chain (vm, b1);
447 
448  /* Required to make the l2 tag push / pop code work on l2 subifs */
449  if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
450  vnet_update_l2_len (b1);
451 
452  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
453  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
454  sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
455 
456  pkts_decapsulated ++;
457  stats_n_packets += 1;
458  stats_n_bytes += len1;
459 
460  /* Batch stats increment on the same gtpu tunnel so counter
461  is not incremented per packet */
462  if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
463  {
464  stats_n_packets -= 1;
465  stats_n_bytes -= len1;
466  if (stats_n_packets)
469  thread_index, stats_sw_if_index,
470  stats_n_packets, stats_n_bytes);
471  stats_n_packets = 1;
472  stats_n_bytes = len1;
473  stats_sw_if_index = sw_if_index1;
474  }
475 
476  trace1:
477  b1->error = error1 ? node->errors[error1] : 0;
478 
479  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
480  {
481  gtpu_rx_trace_t *tr
482  = vlib_add_trace (vm, node, b1, sizeof (*tr));
483  tr->next_index = next1;
484  tr->error = error1;
485  tr->tunnel_index = tunnel_index1;
486  tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
487  }
488 
489  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
490  to_next, n_left_to_next,
491  bi0, bi1, next0, next1);
492  }
493 
494  while (n_left_from > 0 && n_left_to_next > 0)
495  {
496  u32 bi0;
497  vlib_buffer_t * b0;
498  u32 next0;
499  ip4_header_t * ip4_0;
500  ip6_header_t * ip6_0;
501  gtpu_header_t * gtpu0;
502  u32 gtpu_hdr_len0;
503  uword * p0;
504  u32 tunnel_index0;
505  gtpu_tunnel_t * t0, * mt0 = NULL;
506  gtpu4_tunnel_key_t key4_0;
507  gtpu6_tunnel_key_t key6_0;
508  u32 error0;
509  u32 sw_if_index0, len0;
510  u8 has_space0;
511  u8 ver0;
512 
513  bi0 = from[0];
514  to_next[0] = bi0;
515  from += 1;
516  to_next += 1;
517  n_left_from -= 1;
518  n_left_to_next -= 1;
519 
520  b0 = vlib_get_buffer (vm, bi0);
521 
522  /* udp leaves current_data pointing at the gtpu header */
523  gtpu0 = vlib_buffer_get_current (b0);
524  if (is_ip4) {
525  ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
526  } else {
527  ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
528  }
529 
530  tunnel_index0 = ~0;
531  error0 = 0;
532 
533  /* speculatively load gtp header version field */
534  ver0 = gtpu0->ver_flags;
535 
536  /*
537  * Manipulate gtpu header
538  * TBD: Manipulate Sequence Number and N-PDU Number
539  * TBD: Manipulate Next Extension Header
540  */
541  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
542 
543  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
544 
545  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
546  {
547  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
548  next0 = GTPU_INPUT_NEXT_DROP;
549  goto trace00;
550  }
551 
552  if (is_ip4) {
553  key4_0.src = ip4_0->src_address.as_u32;
554  key4_0.teid = gtpu0->teid;
555 
556  /* Make sure GTPU tunnel exist according to packet SIP and teid
557  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
558  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
559  {
560  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
561  if (PREDICT_FALSE (p0 == NULL))
562  {
563  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
564  next0 = GTPU_INPUT_NEXT_DROP;
565  goto trace00;
566  }
567  last_key4.as_u64 = key4_0.as_u64;
568  tunnel_index0 = last_tunnel_index = p0[0];
569  }
570  else
571  tunnel_index0 = last_tunnel_index;
572  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
573 
574  /* Validate GTPU tunnel encap-fib index against packet */
575  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
576  {
577  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
578  next0 = GTPU_INPUT_NEXT_DROP;
579  goto trace00;
580  }
581 
582  /* Validate GTPU tunnel SIP against packet DIP */
583  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
584  goto next00; /* valid packet */
586  {
587  key4_0.src = ip4_0->dst_address.as_u32;
588  key4_0.teid = gtpu0->teid;
589  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
590  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
591  if (PREDICT_TRUE (p0 != NULL))
592  {
593  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
594  goto next00; /* valid packet */
595  }
596  }
597  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
598  next0 = GTPU_INPUT_NEXT_DROP;
599  goto trace00;
600 
601  } else /* !is_ip4 */ {
602  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
603  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
604  key6_0.teid = gtpu0->teid;
605 
606  /* Make sure GTPU tunnel exist according to packet SIP and teid
607  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
608  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
609  {
610  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
611  if (PREDICT_FALSE (p0 == NULL))
612  {
613  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
614  next0 = GTPU_INPUT_NEXT_DROP;
615  goto trace00;
616  }
617  clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
618  tunnel_index0 = last_tunnel_index = p0[0];
619  }
620  else
621  tunnel_index0 = last_tunnel_index;
622  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
623 
624  /* Validate GTPU tunnel encap-fib index against packet */
625  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
626  {
627  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
628  next0 = GTPU_INPUT_NEXT_DROP;
629  goto trace00;
630  }
631 
632  /* Validate GTPU tunnel SIP against packet DIP */
634  &t0->src.ip6)))
635  goto next00; /* valid packet */
637  {
638  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
639  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
640  key6_0.teid = gtpu0->teid;
641  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
642  if (PREDICT_TRUE (p0 != NULL))
643  {
644  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
645  goto next00; /* valid packet */
646  }
647  }
648  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
649  next0 = GTPU_INPUT_NEXT_DROP;
650  goto trace00;
651  }
652 
653  next00:
654  /* Pop gtpu header */
655  vlib_buffer_advance (b0, gtpu_hdr_len0);
656 
657  next0 = t0->decap_next_index;
658  sw_if_index0 = t0->sw_if_index;
659  len0 = vlib_buffer_length_in_chain (vm, b0);
660 
661  /* Required to make the l2 tag push / pop code work on l2 subifs */
662  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
663  vnet_update_l2_len (b0);
664 
665  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
666  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
667  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
668 
669  pkts_decapsulated ++;
670  stats_n_packets += 1;
671  stats_n_bytes += len0;
672 
673  /* Batch stats increment on the same gtpu tunnel so counter
674  is not incremented per packet */
675  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
676  {
677  stats_n_packets -= 1;
678  stats_n_bytes -= len0;
679  if (stats_n_packets)
682  thread_index, stats_sw_if_index,
683  stats_n_packets, stats_n_bytes);
684  stats_n_packets = 1;
685  stats_n_bytes = len0;
686  stats_sw_if_index = sw_if_index0;
687  }
688 
689  trace00:
690  b0->error = error0 ? node->errors[error0] : 0;
691 
692  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
693  {
694  gtpu_rx_trace_t *tr
695  = vlib_add_trace (vm, node, b0, sizeof (*tr));
696  tr->next_index = next0;
697  tr->error = error0;
698  tr->tunnel_index = tunnel_index0;
699  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
700  }
701  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
702  to_next, n_left_to_next,
703  bi0, next0);
704  }
705 
706  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
707  }
708  /* Do we still need this now that tunnel tx stats is kept? */
709  vlib_node_increment_counter (vm, is_ip4?
711  GTPU_ERROR_DECAPSULATED,
712  pkts_decapsulated);
713 
714  /* Increment any remaining batch stats */
715  if (stats_n_packets)
716  {
719  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
720  node->runtime_data[0] = stats_sw_if_index;
721  }
722 
723  return from_frame->n_vectors;
724 }
725 
728  vlib_frame_t * from_frame)
729 {
730  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
731 }
732 
735  vlib_frame_t * from_frame)
736 {
737  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
738 }
739 
740 static char * gtpu_error_strings[] = {
741 #define gtpu_error(n,s) s,
742 #include <gtpu/gtpu_error.def>
743 #undef gtpu_error
744 #undef _
745 };
746 
748  .name = "gtpu4-input",
749  /* Takes a vector of packets. */
750  .vector_size = sizeof (u32),
751 
752  .n_errors = GTPU_N_ERROR,
753  .error_strings = gtpu_error_strings,
754 
755  .n_next_nodes = GTPU_INPUT_N_NEXT,
756  .next_nodes = {
757 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
759 #undef _
760  },
761 
762 //temp .format_buffer = format_gtpu_header,
763  .format_trace = format_gtpu_rx_trace,
764  // $$$$ .unformat_buffer = unformat_gtpu_header,
765 };
766 
768  .name = "gtpu6-input",
769  /* Takes a vector of packets. */
770  .vector_size = sizeof (u32),
771 
772  .n_errors = GTPU_N_ERROR,
773  .error_strings = gtpu_error_strings,
774 
775  .n_next_nodes = GTPU_INPUT_N_NEXT,
776  .next_nodes = {
777 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
779 #undef _
780  },
781 
782 //temp .format_buffer = format_gtpu_header,
783  .format_trace = format_gtpu_rx_trace,
784  // $$$$ .unformat_buffer = unformat_gtpu_header,
785 };
786 
787 typedef enum {
792 
797  u32 is_ip4)
798 {
799  gtpu_main_t * gtm = &gtpu_main;
800  u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
801  vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
802  vtep4_key_t last_vtep4; /* last IPv4 address / fib index
803  matching a local VTEP address */
804  vtep6_key_t last_vtep6; /* last IPv6 address / fib index
805  matching a local VTEP address */
806  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
807 #ifdef CLIB_HAVE_VEC512
808  vtep4_cache_t vtep4_u512;
809  clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
810 #endif
811 
812  from = vlib_frame_vector_args (frame);
813  n_left_from = frame->n_vectors;
814  next_index = node->cached_next_index;
815  vlib_get_buffers (vm, from, bufs, n_left_from);
816 
817  if (node->flags & VLIB_NODE_FLAG_TRACE)
818  ip4_forward_next_trace (vm, node, frame, VLIB_TX);
819 
820  if (is_ip4)
821  vtep4_key_init (&last_vtep4);
822  else
823  vtep6_key_init (&last_vtep6);
824 
825  while (n_left_from > 0)
826  {
827  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
828 
829  while (n_left_from >= 4 && n_left_to_next >= 2)
830  {
831  vlib_buffer_t * b0, * b1;
832  ip4_header_t * ip40, * ip41;
833  ip6_header_t * ip60, * ip61;
834  udp_header_t * udp0, * udp1;
835  u32 bi0, ip_len0, udp_len0, flags0, next0;
836  u32 bi1, ip_len1, udp_len1, flags1, next1;
837  i32 len_diff0, len_diff1;
838  u8 error0, good_udp0, proto0;
839  u8 error1, good_udp1, proto1;
840 
841  /* Prefetch next iteration. */
842  {
843  vlib_prefetch_buffer_header (b[2], LOAD);
844  vlib_prefetch_buffer_header (b[3], LOAD);
845 
846  CLIB_PREFETCH (b[2]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
847  CLIB_PREFETCH (b[3]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
848  }
849 
850  bi0 = to_next[0] = from[0];
851  bi1 = to_next[1] = from[1];
852  from += 2;
853  n_left_from -= 2;
854  to_next += 2;
855  n_left_to_next -= 2;
856 
857  b0 = b[0];
858  b1 = b[1];
859  b += 2;
860  if (is_ip4)
861  {
862  ip40 = vlib_buffer_get_current (b0);
863  ip41 = vlib_buffer_get_current (b1);
864  }
865  else
866  {
867  ip60 = vlib_buffer_get_current (b0);
868  ip61 = vlib_buffer_get_current (b1);
869  }
870 
871  /* Setup packet for next IP feature */
872  vnet_feature_next(&next0, b0);
873  vnet_feature_next(&next1, b1);
874 
875  if (is_ip4)
876  {
877  /* Treat IP frag packets as "experimental" protocol for now
878  until support of IP frag reassembly is implemented */
879  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
880  proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
881  }
882  else
883  {
884  proto0 = ip60->protocol;
885  proto1 = ip61->protocol;
886  }
887 
888  /* Process packet 0 */
889  if (proto0 != IP_PROTOCOL_UDP)
890  goto exit0; /* not UDP packet */
891 
892  if (is_ip4)
893  udp0 = ip4_next_header (ip40);
894  else
895  udp0 = ip6_next_header (ip60);
896 
897  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
898  goto exit0; /* not GTPU packet */
899 
900  /* Validate DIP against VTEPs*/
901  if (is_ip4)
902  {
903 #ifdef CLIB_HAVE_VEC512
904  if (!vtep4_check_vector
905  (&gtm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
906 #else
907  if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
908 #endif
909  goto exit0; /* no local VTEP for GTPU packet */
910  }
911  else
912  {
913  if (!vtep6_check (&gtm->vtep_table, b0, ip60, &last_vtep6))
914  goto exit0; /* no local VTEP for GTPU packet */
915  }
916 
917  flags0 = b0->flags;
918  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
919 
920  /* Don't verify UDP checksum for packets with explicit zero checksum. */
921  good_udp0 |= udp0->checksum == 0;
922 
923  /* Verify UDP length */
924  if (is_ip4)
925  ip_len0 = clib_net_to_host_u16 (ip40->length);
926  else
927  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
928  udp_len0 = clib_net_to_host_u16 (udp0->length);
929  len_diff0 = ip_len0 - udp_len0;
930 
931  /* Verify UDP checksum */
932  if (PREDICT_FALSE (!good_udp0))
933  {
934  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
935  {
936  if (is_ip4)
937  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
938  else
939  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
940  good_udp0 =
941  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
942  }
943  }
944 
945  if (is_ip4)
946  {
947  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
948  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
949  }
950  else
951  {
952  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
953  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
954  }
955 
956  next0 = error0 ?
958  b0->error = error0 ? error_node->errors[error0] : 0;
959 
960  /* gtpu-input node expect current at GTPU header */
961  if (is_ip4)
962  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
963  else
964  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
965 
966  exit0:
967  /* Process packet 1 */
968  if (proto1 != IP_PROTOCOL_UDP)
969  goto exit1; /* not UDP packet */
970 
971  if (is_ip4)
972  udp1 = ip4_next_header (ip41);
973  else
974  udp1 = ip6_next_header (ip61);
975 
976  if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
977  goto exit1; /* not GTPU packet */
978 
979  /* Validate DIP against VTEPs*/
980  if (is_ip4)
981  {
982 #ifdef CLIB_HAVE_VEC512
983  if (!vtep4_check_vector
984  (&gtm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
985 #else
986  if (!vtep4_check (&gtm->vtep_table, b1, ip41, &last_vtep4))
987 #endif
988  goto exit1; /* no local VTEP for GTPU packet */
989  }
990  else
991  {
992  if (!vtep6_check (&gtm->vtep_table, b1, ip61, &last_vtep6))
993  goto exit1; /* no local VTEP for GTPU packet */
994  }
995 
996  flags1 = b1->flags;
997  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
998 
999  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1000  good_udp1 |= udp1->checksum == 0;
1001 
1002  /* Verify UDP length */
1003  if (is_ip4)
1004  ip_len1 = clib_net_to_host_u16 (ip41->length);
1005  else
1006  ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1007  udp_len1 = clib_net_to_host_u16 (udp1->length);
1008  len_diff1 = ip_len1 - udp_len1;
1009 
1010  /* Verify UDP checksum */
1011  if (PREDICT_FALSE (!good_udp1))
1012  {
1013  if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1014  {
1015  if (is_ip4)
1016  flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1017  else
1018  flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1019  good_udp1 =
1020  (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1021  }
1022  }
1023 
1024  if (is_ip4)
1025  {
1026  error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1027  error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1028  }
1029  else
1030  {
1031  error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1032  error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1033  }
1034 
1035  next1 = error1 ?
1037  b1->error = error1 ? error_node->errors[error1] : 0;
1038 
1039  /* gtpu-input node expect current at GTPU header */
1040  if (is_ip4)
1041  vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1042  else
1043  vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1044 
1045  exit1:
1046  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1047  to_next, n_left_to_next,
1048  bi0, bi1, next0, next1);
1049  }
1050 
1051  while (n_left_from > 0 && n_left_to_next > 0)
1052  {
1053  vlib_buffer_t * b0;
1054  ip4_header_t * ip40;
1055  ip6_header_t * ip60;
1056  udp_header_t * udp0;
1057  u32 bi0, ip_len0, udp_len0, flags0, next0;
1058  i32 len_diff0;
1059  u8 error0, good_udp0, proto0;
1060 
1061  bi0 = to_next[0] = from[0];
1062  from += 1;
1063  n_left_from -= 1;
1064  to_next += 1;
1065  n_left_to_next -= 1;
1066 
1067  b0 = b[0];
1068  b++;
1069  if (is_ip4)
1070  ip40 = vlib_buffer_get_current (b0);
1071  else
1072  ip60 = vlib_buffer_get_current (b0);
1073 
1074  /* Setup packet for next IP feature */
1075  vnet_feature_next(&next0, b0);
1076 
1077  if (is_ip4)
1078  /* Treat IP4 frag packets as "experimental" protocol for now
1079  until support of IP frag reassembly is implemented */
1080  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1081  else
1082  proto0 = ip60->protocol;
1083 
1084  if (proto0 != IP_PROTOCOL_UDP)
1085  goto exit; /* not UDP packet */
1086 
1087  if (is_ip4)
1088  udp0 = ip4_next_header (ip40);
1089  else
1090  udp0 = ip6_next_header (ip60);
1091 
1092  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1093  goto exit; /* not GTPU packet */
1094 
1095  /* Validate DIP against VTEPs*/
1096  if (is_ip4)
1097  {
1098 #ifdef CLIB_HAVE_VEC512
1099  if (!vtep4_check_vector
1100  (&gtm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
1101 #else
1102  if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
1103 #endif
1104  goto exit; /* no local VTEP for GTPU packet */
1105  }
1106  else
1107  {
1108  if (!vtep6_check (&gtm->vtep_table, b0, ip60, &last_vtep6))
1109  goto exit; /* no local VTEP for GTPU packet */
1110  }
1111 
1112  flags0 = b0->flags;
1113  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1114 
1115  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1116  good_udp0 |= udp0->checksum == 0;
1117 
1118  /* Verify UDP length */
1119  if (is_ip4)
1120  ip_len0 = clib_net_to_host_u16 (ip40->length);
1121  else
1122  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1123  udp_len0 = clib_net_to_host_u16 (udp0->length);
1124  len_diff0 = ip_len0 - udp_len0;
1125 
1126  /* Verify UDP checksum */
1127  if (PREDICT_FALSE (!good_udp0))
1128  {
1129  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1130  {
1131  if (is_ip4)
1132  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1133  else
1134  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1135  good_udp0 =
1136  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1137  }
1138  }
1139 
1140  if (is_ip4)
1141  {
1142  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1143  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1144  }
1145  else
1146  {
1147  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1148  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1149  }
1150 
1151  next0 = error0 ?
1153  b0->error = error0 ? error_node->errors[error0] : 0;
1154 
1155  /* gtpu-input node expect current at GTPU header */
1156  if (is_ip4)
1157  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1158  else
1159  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1160 
1161  exit:
1162  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1163  to_next, n_left_to_next,
1164  bi0, next0);
1165  }
1166 
1167  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1168  }
1169 
1170  return frame->n_vectors;
1171 }
1172 
1175  vlib_frame_t * frame)
1176 {
1177  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1178 }
1179 
1181  .name = "ip4-gtpu-bypass",
1182  .vector_size = sizeof (u32),
1183 
1184  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1185  .next_nodes = {
1186  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1187  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1188  },
1189 
1190  .format_buffer = format_ip4_header,
1191  .format_trace = format_ip4_forward_next_trace,
1192 };
1193 
1194 #ifndef CLIB_MARCH_VARIANT
1195 /* Dummy init function to get us linked in. */
1197 { return 0; }
1198 
1200 #endif /* CLIB_MARCH_VARIANT */
1201 
1204  vlib_frame_t * frame)
1205 {
1206  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1207 }
1208 
1210  .name = "ip6-gtpu-bypass",
1211  .vector_size = sizeof (u32),
1212 
1213  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1214  .next_nodes = {
1215  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1216  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1217  },
1218 
1219  .format_buffer = format_ip6_header,
1220  .format_trace = format_ip6_forward_next_trace,
1221 };
1222 
1223 #ifndef CLIB_MARCH_VARIANT
1224 /* Dummy init function to get us linked in. */
1226 { return 0; }
1227 
1229 
1230 #define foreach_gtpu_flow_error \
1231  _(NONE, "no error") \
1232  _(PAYLOAD_ERROR, "Payload type errors") \
1233  _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
1234  _(IP_HEADER_ERROR, "Rx ip header errors") \
1235  _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
1236  _(UDP_LENGTH_ERROR, "Rx udp length errors")
1237 
1238 typedef enum
1239 {
1240 #define _(f,s) GTPU_FLOW_ERROR_##f,
1242 #undef _
1243 #define gtpu_error(n,s) GTPU_FLOW_ERROR_##n,
1244 #include <gtpu/gtpu_error.def>
1245 #undef gtpu_error
1248 
1249 static char *gtpu_flow_error_strings[] = {
1250 #define _(n,s) s,
1252 #undef _
1253 #define gtpu_error(n,s) s,
1254 #include <gtpu/gtpu_error.def>
1255 #undef gtpu_error
1256 #undef _
1257 
1258 };
1259 
1260 #define gtpu_local_need_csum_check(_b) \
1261  (!(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED \
1262  || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
1263 
1264 #define gtpu_local_csum_is_valid(_b) \
1265  ((_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT \
1266  || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) != 0)
1267 
1270 {
1271  u32 flags = b->flags;
1272  enum { offset = sizeof(ip4_header_t) + sizeof(udp_header_t)};
1273 
1274  /* Verify UDP checksum */
1275  if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1276  {
1278  flags = ip4_tcp_udp_validate_checksum (vm, b);
1280  }
1281 
1282  return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1283 }
1284 
1287 {
1288  ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1289  sizeof(ip4_header_t) - sizeof(udp_header_t);
1290  u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1291  u16 expected = payload_len + sizeof(ip4_header_t) + sizeof(udp_header_t);
1292  return ip_len > expected || ip4_hdr->ttl == 0 || ip4_hdr->ip_version_and_header_length != 0x45;
1293 }
1294 
1297 {
1298  ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1299  sizeof(ip4_header_t) - sizeof(udp_header_t);
1300  udp_header_t * udp_hdr = vlib_buffer_get_current(b) - sizeof(udp_header_t);
1301  u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1302  u16 udp_len = clib_net_to_host_u16 (udp_hdr->length);
1303  return udp_len > ip_len;
1304 }
1305 
1307 gtpu_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1308 {
1309  u8 error0 = GTPU_FLOW_ERROR_NONE;
1310  if (ip_err0)
1311  error0 = GTPU_FLOW_ERROR_IP_HEADER_ERROR;
1312  if (udp_err0)
1313  error0 = GTPU_FLOW_ERROR_UDP_LENGTH_ERROR;
1314  if (csum_err0)
1315  error0 = GTPU_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1316  return error0;
1317 }
1318 
1319 
1323  vlib_frame_t * from_frame)
1324 {
1325  u32 n_left_from, next_index, * from, * to_next;
1326  gtpu_main_t * gtm = &gtpu_main;
1327  vnet_main_t * vnm = gtm->vnet_main;
1329  u32 pkts_decapsulated = 0;
1330  u32 thread_index = vlib_get_thread_index();
1331  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1332  u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
1333 
1334  from = vlib_frame_vector_args (from_frame);
1335  n_left_from = from_frame->n_vectors;
1336 
1337  next_index = node->cached_next_index;
1338  stats_sw_if_index = node->runtime_data[0];
1339  stats_n_packets = stats_n_bytes = 0;
1340 
1341  while (n_left_from > 0)
1342  {
1343  u32 n_left_to_next;
1344 
1345  vlib_get_next_frame (vm, node, next_index,
1346  to_next, n_left_to_next);
1347 
1348  while (n_left_from >= 4 && n_left_to_next >= 2)
1349  {
1350  u32 bi0, bi1;
1351  vlib_buffer_t * b0, * b1;
1352  u32 next0, next1;
1353  gtpu_header_t * gtpu0, * gtpu1;
1354  u32 gtpu_hdr_len0, gtpu_hdr_len1;
1355  u32 tunnel_index0, tunnel_index1;
1356  gtpu_tunnel_t * t0, * t1;
1357  u32 error0, error1;
1358  u32 sw_if_index0, sw_if_index1, len0, len1;
1359  u8 has_space0 = 0, has_space1 = 0;
1360  u8 ver0, ver1;
1361 
1362  /* Prefetch next iteration. */
1363  {
1364  vlib_buffer_t * p2, * p3;
1365 
1366  p2 = vlib_get_buffer (vm, from[2]);
1367  p3 = vlib_get_buffer (vm, from[3]);
1368 
1369  vlib_prefetch_buffer_header (p2, LOAD);
1370  vlib_prefetch_buffer_header (p3, LOAD);
1371 
1372  CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1373  CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1374  }
1375 
1376  bi0 = from[0];
1377  bi1 = from[1];
1378  to_next[0] = bi0;
1379  to_next[1] = bi1;
1380  from += 2;
1381  to_next += 2;
1382  n_left_to_next -= 2;
1383  n_left_from -= 2;
1384 
1385  b0 = vlib_get_buffer (vm, bi0);
1386  b1 = vlib_get_buffer (vm, bi1);
1387 
1388  /* udp leaves current_data pointing at the gtpu header */
1389  gtpu0 = vlib_buffer_get_current (b0);
1390  gtpu1 = vlib_buffer_get_current (b1);
1391 
1392  len0 = vlib_buffer_length_in_chain (vm, b0);
1393  len1 = vlib_buffer_length_in_chain (vm, b1);
1394 
1395  tunnel_index0 = ~0;
1396  error0 = 0;
1397 
1398  tunnel_index1 = ~0;
1399  error1 = 0;
1400 
1401  ip_err0 = gtpu_check_ip (b0, len0);
1402  udp_err0 = gtpu_check_ip_udp_len (b0);
1403  ip_err1 = gtpu_check_ip (b1, len1);
1404  udp_err1 = gtpu_check_ip_udp_len (b1);
1405 
1407  csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1408  else
1409  csum_err0 = !gtpu_local_csum_is_valid (b0);
1411  csum_err1 = !gtpu_validate_udp_csum (vm, b1);
1412  else
1413  csum_err1 = !gtpu_local_csum_is_valid (b1);
1414 
1415  if (ip_err0 || udp_err0 || csum_err0)
1416  {
1417  next0 = GTPU_INPUT_NEXT_DROP;
1418  error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1419  goto trace0;
1420  }
1421 
1422  /* speculatively load gtp header version field */
1423  ver0 = gtpu0->ver_flags;
1424 
1425  /*
1426  * Manipulate gtpu header
1427  * TBD: Manipulate Sequence Number and N-PDU Number
1428  * TBD: Manipulate Next Extension Header
1429  */
1430  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1431 
1432  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1433  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1434  {
1435  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1436  next0 = GTPU_INPUT_NEXT_DROP;
1437  goto trace0;
1438  }
1439 
1440  /* Manipulate packet 0 */
1441  ASSERT (b0->flow_id != 0);
1442  tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1443  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1444  b0->flow_id = 0;
1445 
1446  /* Pop gtpu header */
1447  vlib_buffer_advance (b0, gtpu_hdr_len0);
1448 
1449  /* assign the next node */
1450  if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1451  (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1452  {
1453  error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1454  next0 = GTPU_INPUT_NEXT_DROP;
1455  goto trace0;
1456  }
1457  next0 = t0->decap_next_index;
1458 
1459  sw_if_index0 = t0->sw_if_index;
1460 
1461  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1462  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1463 
1464  pkts_decapsulated ++;
1465  stats_n_packets += 1;
1466  stats_n_bytes += len0;
1467 
1468  /* Batch stats increment on the same gtpu tunnel so counter
1469  is not incremented per packet */
1470  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1471  {
1472  stats_n_packets -= 1;
1473  stats_n_bytes -= len0;
1474  if (stats_n_packets)
1477  thread_index, stats_sw_if_index,
1478  stats_n_packets, stats_n_bytes);
1479  stats_n_packets = 1;
1480  stats_n_bytes = len0;
1481  stats_sw_if_index = sw_if_index0;
1482  }
1483 
1484 trace0:
1485  b0->error = error0 ? node->errors[error0] : 0;
1486 
1487  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1488  {
1489  gtpu_rx_trace_t *tr
1490  = vlib_add_trace (vm, node, b0, sizeof (*tr));
1491  tr->next_index = next0;
1492  tr->error = error0;
1493  tr->tunnel_index = tunnel_index0;
1494  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1495  }
1496 
1497  if (ip_err1 || udp_err1 || csum_err1)
1498  {
1499  next1 = GTPU_INPUT_NEXT_DROP;
1500  error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1);
1501  goto trace1;
1502  }
1503 
1504  /* speculatively load gtp header version field */
1505  ver1 = gtpu1->ver_flags;
1506 
1507  /*
1508  * Manipulate gtpu header
1509  * TBD: Manipulate Sequence Number and N-PDU Number
1510  * TBD: Manipulate Next Extension Header
1511  */
1512  gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
1513  has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
1514  if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
1515  {
1516  error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1517  next1 = GTPU_INPUT_NEXT_DROP;
1518  goto trace1;
1519  }
1520 
1521  /* Manipulate packet 1 */
1522  ASSERT (b1->flow_id != 0);
1523  tunnel_index1 = b1->flow_id - gtm->flow_id_start;
1524  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
1525  b1->flow_id = 0;
1526 
1527  /* Pop gtpu header */
1528  vlib_buffer_advance (b1, gtpu_hdr_len1);
1529 
1530  /* assign the next node */
1531  if (PREDICT_FALSE (t1->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1532  (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1533  {
1534  error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1535  next1 = GTPU_INPUT_NEXT_DROP;
1536  goto trace1;
1537  }
1538  next1 = t1->decap_next_index;
1539 
1540  sw_if_index1 = t1->sw_if_index;
1541 
1542  /* Required to make the l2 tag push / pop code work on l2 subifs */
1543  /* This won't happen in current implementation as only
1544  ipv4/udp/gtpu/IPV4 type packets can be matched */
1545  if (PREDICT_FALSE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
1546  vnet_update_l2_len (b1);
1547 
1548  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1549  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
1550 
1551  pkts_decapsulated ++;
1552  stats_n_packets += 1;
1553  stats_n_bytes += len1;
1554 
1555  /* Batch stats increment on the same gtpu tunnel so counter
1556  is not incremented per packet */
1557  if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
1558  {
1559  stats_n_packets -= 1;
1560  stats_n_bytes -= len1;
1561  if (stats_n_packets)
1564  thread_index, stats_sw_if_index,
1565  stats_n_packets, stats_n_bytes);
1566  stats_n_packets = 1;
1567  stats_n_bytes = len1;
1568  stats_sw_if_index = sw_if_index1;
1569  }
1570 
1571 trace1:
1572  b1->error = error1 ? node->errors[error1] : 0;
1573 
1574  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
1575  {
1576  gtpu_rx_trace_t *tr
1577  = vlib_add_trace (vm, node, b1, sizeof (*tr));
1578  tr->next_index = next1;
1579  tr->error = error1;
1580  tr->tunnel_index = tunnel_index1;
1581  tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
1582  }
1583 
1584  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1585  to_next, n_left_to_next,
1586  bi0, bi1, next0, next1);
1587  }
1588 
1589  while (n_left_from > 0 && n_left_to_next > 0)
1590  {
1591  u32 bi0;
1592  vlib_buffer_t * b0;
1593  u32 next0;
1594  gtpu_header_t * gtpu0;
1595  u32 gtpu_hdr_len0;
1596  u32 error0;
1597  u32 tunnel_index0;
1598  gtpu_tunnel_t * t0;
1599  u32 sw_if_index0, len0;
1600  u8 has_space0 = 0;
1601  u8 ver0;
1602 
1603  bi0 = from[0];
1604  to_next[0] = bi0;
1605  from += 1;
1606  to_next += 1;
1607  n_left_from -= 1;
1608  n_left_to_next -= 1;
1609 
1610  b0 = vlib_get_buffer (vm, bi0);
1611  len0 = vlib_buffer_length_in_chain (vm, b0);
1612 
1613  tunnel_index0 = ~0;
1614  error0 = 0;
1615 
1616  ip_err0 = gtpu_check_ip (b0, len0);
1617  udp_err0 = gtpu_check_ip_udp_len (b0);
1619  csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1620  else
1621  csum_err0 = !gtpu_local_csum_is_valid (b0);
1622 
1623  if (ip_err0 || udp_err0 || csum_err0)
1624  {
1625  next0 = GTPU_INPUT_NEXT_DROP;
1626  error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1627  goto trace00;
1628  }
1629 
1630  /* udp leaves current_data pointing at the gtpu header */
1631  gtpu0 = vlib_buffer_get_current (b0);
1632 
1633  /* speculatively load gtp header version field */
1634  ver0 = gtpu0->ver_flags;
1635 
1636  /*
1637  * Manipulate gtpu header
1638  * TBD: Manipulate Sequence Number and N-PDU Number
1639  * TBD: Manipulate Next Extension Header
1640  */
1641  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1642 
1643  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1644  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1645  {
1646  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1647  next0 = GTPU_INPUT_NEXT_DROP;
1648  goto trace00;
1649  }
1650 
1651  ASSERT (b0->flow_id != 0);
1652  tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1653  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1654  b0->flow_id = 0;
1655 
1656  /* Pop gtpu header */
1657  vlib_buffer_advance (b0, gtpu_hdr_len0);
1658 
1659  /* assign the next node */
1660  if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1661  (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1662  {
1663  error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1664  next0 = GTPU_INPUT_NEXT_DROP;
1665  goto trace00;
1666  }
1667  next0 = t0->decap_next_index;
1668 
1669  sw_if_index0 = t0->sw_if_index;
1670 
1671  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1672  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1673 
1674  pkts_decapsulated ++;
1675  stats_n_packets += 1;
1676  stats_n_bytes += len0;
1677 
1678  /* Batch stats increment on the same gtpu tunnel so counter
1679  is not incremented per packet */
1680  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1681  {
1682  stats_n_packets -= 1;
1683  stats_n_bytes -= len0;
1684  if (stats_n_packets)
1687  thread_index, stats_sw_if_index,
1688  stats_n_packets, stats_n_bytes);
1689  stats_n_packets = 1;
1690  stats_n_bytes = len0;
1691  stats_sw_if_index = sw_if_index0;
1692  }
1693  trace00:
1694  b0->error = error0 ? node->errors[error0] : 0;
1695 
1696  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1697  {
1698  gtpu_rx_trace_t *tr
1699  = vlib_add_trace (vm, node, b0, sizeof (*tr));
1700  tr->next_index = next0;
1701  tr->error = error0;
1702  tr->tunnel_index = tunnel_index0;
1703  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1704  }
1705  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1706  to_next, n_left_to_next,
1707  bi0, next0);
1708  }
1709 
1710  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1711  }
1712 
1713  /* Do we still need this now that tunnel tx stats is kept? */
1715  GTPU_ERROR_DECAPSULATED,
1716  pkts_decapsulated);
1717 
1718  /* Increment any remaining batch stats */
1719  if (stats_n_packets)
1720  {
1723  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1724  node->runtime_data[0] = stats_sw_if_index;
1725  }
1726 
1727  return from_frame->n_vectors;
1728 }
1729 
1732  vlib_frame_t * from_frame)
1733 {
1734  return gtpu_flow_input(vm, node, from_frame);
1735 }
1736 
1737 
1738 /* *INDENT-OFF* */
1739 #ifndef CLIB_MULTIARCH_VARIANT
1741  .name = "gtpu4-flow-input",
1742  .type = VLIB_NODE_TYPE_INTERNAL,
1743  .vector_size = sizeof (u32),
1744 
1745  .format_trace = format_gtpu_rx_trace,
1746 
1747  .n_errors = GTPU_FLOW_N_ERROR,
1748  .error_strings = gtpu_flow_error_strings,
1749 
1750  .n_next_nodes = GTPU_INPUT_N_NEXT,
1751  .next_nodes = {
1752 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
1754 #undef _
1755 
1756  },
1757 };
1758 #endif
1759 /* *INDENT-ON* */
1760 
1761 #endif /* CLIB_MARCH_VARIANT */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define CLIB_UNUSED(x)
Definition: clib.h:87
vnet_main_t * vnet_main
Definition: gtpu.h:237
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:239
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:519
ip4_address_t src_address
Definition: ip4_packet.h:125
u32 teid
Definition: mobile.h:95
static u8 * format_gtpu_rx_trace(u8 *s, va_list *args)
Definition: gtpu_decap.c:31
vnet_interface_main_t interface_main
Definition: vnet.h:65
format_function_t format_ip4_header
Definition: format.h:81
#define PREDICT_TRUE(x)
Definition: clib.h:122
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static u32 validate_gtpu_fib(vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
Definition: gtpu_decap.c:51
vlib_node_registration_t ip6_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_gtpu_bypass_node)
Definition: gtpu_decap.c:1209
vlib_main_t * vm
Definition: in2out_ed.c:1578
static_always_inline u8 gtpu_check_ip(vlib_buffer_t *b, u16 payload_len)
Definition: gtpu_decap.c:1286
clib_error_t * ip6_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1225
#define VLIB_NODE_FN(node)
Definition: node.h:203
static uword ip4_address_is_multicast(const ip4_address_t *a)
Definition: ip4_packet.h:434
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:402
ip6_address_t src_address
Definition: ip6_packet.h:310
static u32 vlib_buffer_get_ip_fib_index(vlib_buffer_t *b, u8 is_ip4)
Definition: ip.h:292
unsigned char u8
Definition: types.h:56
u8 data[128]
Definition: ipsec_types.api:90
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:168
#define static_always_inline
Definition: clib.h:109
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
ip46_address_t src
Definition: gtpu.h:144
vlib_node_registration_t ip4_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_gtpu_bypass_node)
Definition: gtpu_decap.c:1180
ip4_address_t dst_address
Definition: ip4_packet.h:125
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:882
description fragment has unexpected format
Definition: map.api:433
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:207
const cJSON *const b
Definition: cJSON.h:255
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:196
vlib_node_registration_t gtpu4_flow_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_flow_input_node)
Definition: gtpu_decap.c:1740
unsigned int u32
Definition: types.h:88
gtpu_main_t gtpu_main
Definition: gtpu.c:36
static void vtep4_key_init(vtep4_key_t *k4)
Definition: vtep.h:80
#define VLIB_FRAME_SIZE
Definition: node.h:378
static char * gtpu_flow_error_strings[]
Definition: gtpu_decap.c:1249
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define hash_get(h, key)
Definition: hash.h:249
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:546
u32 decap_next_index
Definition: gtpu.h:151
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:385
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
u32 flow_id_start
Definition: gtpu.h:238
static char * gtpu_error_strings[]
Definition: gtpu_decap.c:740
#define PREDICT_FALSE(x)
Definition: clib.h:121
#define always_inline
Definition: ipsec.h:28
static uword ip_gtpu_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
Definition: gtpu_decap.c:794
static_always_inline u8 gtpu_validate_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
Definition: gtpu_decap.c:1269
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip4_forward.c:1361
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
vlib_node_registration_t gtpu4_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_input_node)
Definition: gtpu_decap.c:747
static u8 vtep6_check(vtep_table_t *t, vlib_buffer_t *b0, ip6_header_t *ip60, vtep6_key_t *last_k6)
Definition: vtep.h:157
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
static u8 vtep4_check(vtep_table_t *t, vlib_buffer_t *b0, ip4_header_t *ip40, vtep4_key_t *last_k4)
Definition: vtep.h:100
clib_error_t * ip4_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1196
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
u16 n_vectors
Definition: node.h:397
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:219
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
u32 flow_id
Generic flow identifier.
Definition: buffer.h:127
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
static uword gtpu_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: gtpu_decap.c:57
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:115
u8 data[]
Packet data.
Definition: buffer.h:181
#define GTPU_E_S_PN_BIT
Definition: gtpu.h:74
u32 sw_if_index
Definition: gtpu.h:157
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:376
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1578
#define foreach_gtpu_input_next
Definition: gtpu.h:183
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:511
#define ASSERT(truth)
static void vtep6_key_init(vtep6_key_t *k6)
Definition: vtep.h:86
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:252
#define gtpu_local_need_csum_check(_b)
Definition: gtpu_decap.c:1260
u32 encap_fib_index
Definition: gtpu.h:154
format_function_t format_ip6_header
Definition: format.h:95
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
Definition: ip6_packet.h:167
#define GTPU_V1_VER
Definition: mobile.h:162
#define GTPU_VER_MASK
Definition: gtpu.h:69
static uword ip6_address_is_multicast(const ip6_address_t *a)
Definition: ip6_packet.h:121
vtep_table_t vtep_table
Definition: gtpu.h:216
ip_vxan_bypass_next_t
Definition: gtpu_decap.c:787
uword * gtpu4_tunnel_by_key
Definition: gtpu.h:211
static_always_inline u8 gtpu_err_code(u8 ip_err0, u8 udp_err0, u8 csum_err0)
Definition: gtpu_decap.c:1307
struct _vlib_node_registration vlib_node_registration_t
template key/value backing page structure
Definition: bihash_doc.h:44
Definition: defs.h:47
static u8 vlib_buffer_has_space(vlib_buffer_t *b, word l)
Check if there is enough space in buffer to advance.
Definition: buffer.h:269
u16 payload_length
Definition: ip6_packet.h:301
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip6_forward.c:1164
static void vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:298
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1240
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1579
uword * gtpu6_tunnel_by_key
Definition: gtpu.h:212
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
Definition: ip4_forward.c:1190
static uword gtpu_flow_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: gtpu_decap.c:1321
Bits Octets 8 7 6 5 4 3 2 1 1 Version PT (*) E S PN 2 Message Type 3 Length (1st Octet) 4 Length...
Definition: mobile.h:90
#define hash_get_mem(h, key)
Definition: hash.h:269
#define vnet_buffer(b)
Definition: buffer.h:417
gtpu_flow_error_t
Definition: gtpu_decap.c:1238
gtpu_tunnel_t * tunnels
Definition: gtpu.h:208
u8 ver_flags
Definition: mobile.h:92
u16 flags
Copy of main node flags.
Definition: node.h:501
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
static_always_inline u8 gtpu_check_ip_udp_len(vlib_buffer_t *b)
Definition: gtpu_decap.c:1296
u8 ip_version_and_header_length
Definition: ip4_packet.h:93
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
vlib_node_registration_t gtpu6_input_node
(constructor) VLIB_REGISTER_NODE (gtpu6_input_node)
Definition: gtpu_decap.c:767
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
#define foreach_gtpu_flow_error
Definition: gtpu_decap.c:1230
#define gtpu_local_csum_is_valid(_b)
Definition: gtpu_decap.c:1264
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:310
static u8 vtep4_check_vector(vtep_table_t *t, vlib_buffer_t *b0, ip4_header_t *ip40, vtep4_key_t *last_k4, vtep4_cache_t *vtep4_u512)
Definition: vtep.h:121
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)
Definition: ip6_forward.c:952