FD.io VPP  v20.05-21-gb1500e9ff
Vector Packet Processing
gtpu_decap.c
Go to the documentation of this file.
1 /*
2  * decap.c: gtpu tunnel decap packet processing
3  *
4  * Copyright (c) 2017 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <gtpu/gtpu.h>
21 
24 
25 typedef struct {
31 
32 static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
33 {
34  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36  gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
37 
38  if (t->tunnel_index != ~0)
39  {
40  s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
41  t->tunnel_index, t->teid, t->next_index, t->error);
42  }
43  else
44  {
45  s = format (s, "GTPU decap error - tunnel for teid %d does not exist",
46  t->teid);
47  }
48  return s;
49 }
50 
53 {
54  return t->encap_fib_index == vlib_buffer_get_ip_fib_index (b, is_ip4);
55 }
56 
60  vlib_frame_t * from_frame,
61  u32 is_ip4)
62 {
63  u32 n_left_from, next_index, * from, * to_next;
64  gtpu_main_t * gtm = &gtpu_main;
65  vnet_main_t * vnm = gtm->vnet_main;
67  u32 last_tunnel_index = ~0;
68  gtpu4_tunnel_key_t last_key4;
69  gtpu6_tunnel_key_t last_key6;
70  u32 pkts_decapsulated = 0;
71  u32 thread_index = vlib_get_thread_index();
72  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
73 
74  if (is_ip4)
75  last_key4.as_u64 = ~0;
76  else
77  clib_memset (&last_key6, 0xff, sizeof (last_key6));
78 
79  from = vlib_frame_vector_args (from_frame);
80  n_left_from = from_frame->n_vectors;
81 
82  next_index = node->cached_next_index;
83  stats_sw_if_index = node->runtime_data[0];
84  stats_n_packets = stats_n_bytes = 0;
85 
86  while (n_left_from > 0)
87  {
88  u32 n_left_to_next;
89 
90  vlib_get_next_frame (vm, node, next_index,
91  to_next, n_left_to_next);
92  while (n_left_from >= 4 && n_left_to_next >= 2)
93  {
94  u32 bi0, bi1;
95  vlib_buffer_t * b0, * b1;
96  u32 next0, next1;
97  ip4_header_t * ip4_0, * ip4_1;
98  ip6_header_t * ip6_0, * ip6_1;
99  gtpu_header_t * gtpu0, * gtpu1;
100  u32 gtpu_hdr_len0, gtpu_hdr_len1;
101  uword * p0, * p1;
102  u32 tunnel_index0, tunnel_index1;
103  gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
104  gtpu4_tunnel_key_t key4_0, key4_1;
105  gtpu6_tunnel_key_t key6_0, key6_1;
106  u32 error0, error1;
107  u32 sw_if_index0, sw_if_index1, len0, len1;
108  u8 has_space0, has_space1;
109  u8 ver0, ver1;
110 
111  /* Prefetch next iteration. */
112  {
113  vlib_buffer_t * p2, * p3;
114 
115  p2 = vlib_get_buffer (vm, from[2]);
116  p3 = vlib_get_buffer (vm, from[3]);
117 
118  vlib_prefetch_buffer_header (p2, LOAD);
119  vlib_prefetch_buffer_header (p3, LOAD);
120 
123  }
124 
125  bi0 = from[0];
126  bi1 = from[1];
127  to_next[0] = bi0;
128  to_next[1] = bi1;
129  from += 2;
130  to_next += 2;
131  n_left_to_next -= 2;
132  n_left_from -= 2;
133 
134  b0 = vlib_get_buffer (vm, bi0);
135  b1 = vlib_get_buffer (vm, bi1);
136 
137  /* udp leaves current_data pointing at the gtpu header */
138  gtpu0 = vlib_buffer_get_current (b0);
139  gtpu1 = vlib_buffer_get_current (b1);
140  if (is_ip4)
141  {
142  ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
143  ip4_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip4_header_t));
144  }
145  else
146  {
147  ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
148  ip6_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip6_header_t));
149  }
150 
151  tunnel_index0 = ~0;
152  error0 = 0;
153 
154  tunnel_index1 = ~0;
155  error1 = 0;
156 
157  /* speculatively load gtp header version field */
158  ver0 = gtpu0->ver_flags;
159  ver1 = gtpu1->ver_flags;
160 
161  /*
162  * Manipulate gtpu header
163  * TBD: Manipulate Sequence Number and N-PDU Number
164  * TBD: Manipulate Next Extension Header
165  */
166  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
167  gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
168 
169  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
170  has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
171 
172  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
173  {
174  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
175  next0 = GTPU_INPUT_NEXT_DROP;
176  goto trace0;
177  }
178 
179  /* Manipulate packet 0 */
180  if (is_ip4) {
181  key4_0.src = ip4_0->src_address.as_u32;
182  key4_0.teid = gtpu0->teid;
183 
184  /* Make sure GTPU tunnel exist according to packet SIP and teid
185  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
186  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
187  {
188  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
189  if (PREDICT_FALSE (p0 == NULL))
190  {
191  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
192  next0 = GTPU_INPUT_NEXT_DROP;
193  goto trace0;
194  }
195  last_key4.as_u64 = key4_0.as_u64;
196  tunnel_index0 = last_tunnel_index = p0[0];
197  }
198  else
199  tunnel_index0 = last_tunnel_index;
200  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
201 
202  /* Validate GTPU tunnel encap-fib index against packet */
203  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
204  {
205  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
206  next0 = GTPU_INPUT_NEXT_DROP;
207  goto trace0;
208  }
209 
210  /* Validate GTPU tunnel SIP against packet DIP */
211  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
212  goto next0; /* valid packet */
214  {
215  key4_0.src = ip4_0->dst_address.as_u32;
216  key4_0.teid = gtpu0->teid;
217  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
218  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
219  if (PREDICT_TRUE (p0 != NULL))
220  {
221  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
222  goto next0; /* valid packet */
223  }
224  }
225  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
226  next0 = GTPU_INPUT_NEXT_DROP;
227  goto trace0;
228 
229  } else /* !is_ip4 */ {
230  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
231  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
232  key6_0.teid = gtpu0->teid;
233 
234  /* Make sure GTPU tunnel exist according to packet SIP and teid
235  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
236  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
237  {
238  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
239  if (PREDICT_FALSE (p0 == NULL))
240  {
241  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
242  next0 = GTPU_INPUT_NEXT_DROP;
243  goto trace0;
244  }
245  clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
246  tunnel_index0 = last_tunnel_index = p0[0];
247  }
248  else
249  tunnel_index0 = last_tunnel_index;
250  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
251 
252  /* Validate GTPU tunnel encap-fib index against packet */
253  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
254  {
255  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
256  next0 = GTPU_INPUT_NEXT_DROP;
257  goto trace0;
258  }
259 
260  /* Validate GTPU tunnel SIP against packet DIP */
262  &t0->src.ip6)))
263  goto next0; /* valid packet */
265  {
266  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
267  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
268  key6_0.teid = gtpu0->teid;
269  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
270  if (PREDICT_TRUE (p0 != NULL))
271  {
272  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
273  goto next0; /* valid packet */
274  }
275  }
276  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
277  next0 = GTPU_INPUT_NEXT_DROP;
278  goto trace0;
279  }
280 
281  next0:
282  /* Pop gtpu header */
283  vlib_buffer_advance (b0, gtpu_hdr_len0);
284 
285  next0 = t0->decap_next_index;
286  sw_if_index0 = t0->sw_if_index;
287  len0 = vlib_buffer_length_in_chain (vm, b0);
288 
289  /* Required to make the l2 tag push / pop code work on l2 subifs */
290  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
291  vnet_update_l2_len (b0);
292 
293  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
294  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
295  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
296 
297  pkts_decapsulated ++;
298  stats_n_packets += 1;
299  stats_n_bytes += len0;
300 
301  /* Batch stats increment on the same gtpu tunnel so counter
302  is not incremented per packet */
303  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
304  {
305  stats_n_packets -= 1;
306  stats_n_bytes -= len0;
307  if (stats_n_packets)
310  thread_index, stats_sw_if_index,
311  stats_n_packets, stats_n_bytes);
312  stats_n_packets = 1;
313  stats_n_bytes = len0;
314  stats_sw_if_index = sw_if_index0;
315  }
316 
317  trace0:
318  b0->error = error0 ? node->errors[error0] : 0;
319 
320  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
321  {
322  gtpu_rx_trace_t *tr
323  = vlib_add_trace (vm, node, b0, sizeof (*tr));
324  tr->next_index = next0;
325  tr->error = error0;
326  tr->tunnel_index = tunnel_index0;
327  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
328  }
329 
330  if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
331  {
332  error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
333  next1 = GTPU_INPUT_NEXT_DROP;
334  goto trace1;
335  }
336 
337  /* Manipulate packet 1 */
338  if (is_ip4) {
339  key4_1.src = ip4_1->src_address.as_u32;
340  key4_1.teid = gtpu1->teid;
341 
342  /* Make sure GTPU tunnel exist according to packet SIP and teid
343  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
344  if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
345  {
346  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
347  if (PREDICT_FALSE (p1 == NULL))
348  {
349  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
350  next1 = GTPU_INPUT_NEXT_DROP;
351  goto trace1;
352  }
353  last_key4.as_u64 = key4_1.as_u64;
354  tunnel_index1 = last_tunnel_index = p1[0];
355  }
356  else
357  tunnel_index1 = last_tunnel_index;
358  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
359 
360  /* Validate GTPU tunnel encap-fib index against packet */
361  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
362  {
363  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
364  next1 = GTPU_INPUT_NEXT_DROP;
365  goto trace1;
366  }
367 
368  /* Validate GTPU tunnel SIP against packet DIP */
369  if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
370  goto next1; /* valid packet */
372  {
373  key4_1.src = ip4_1->dst_address.as_u32;
374  key4_1.teid = gtpu1->teid;
375  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
376  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
377  if (PREDICT_TRUE (p1 != NULL))
378  {
379  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
380  goto next1; /* valid packet */
381  }
382  }
383  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
384  next1 = GTPU_INPUT_NEXT_DROP;
385  goto trace1;
386 
387  } else /* !is_ip4 */ {
388  key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
389  key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
390  key6_1.teid = gtpu1->teid;
391 
392  /* Make sure GTPU tunnel exist according to packet SIP and teid
393  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
394  if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
395  {
396  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
397 
398  if (PREDICT_FALSE (p1 == NULL))
399  {
400  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
401  next1 = GTPU_INPUT_NEXT_DROP;
402  goto trace1;
403  }
404 
405  clib_memcpy_fast (&last_key6, &key6_1, sizeof(key6_1));
406  tunnel_index1 = last_tunnel_index = p1[0];
407  }
408  else
409  tunnel_index1 = last_tunnel_index;
410  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
411 
412  /* Validate GTPU tunnel encap-fib index against packet */
413  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
414  {
415  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
416  next1 = GTPU_INPUT_NEXT_DROP;
417  goto trace1;
418  }
419 
420  /* Validate GTPU tunnel SIP against packet DIP */
422  &t1->src.ip6)))
423  goto next1; /* valid packet */
425  {
426  key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
427  key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
428  key6_1.teid = gtpu1->teid;
429  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
430  if (PREDICT_TRUE (p1 != NULL))
431  {
432  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
433  goto next1; /* valid packet */
434  }
435  }
436  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
437  next1 = GTPU_INPUT_NEXT_DROP;
438  goto trace1;
439  }
440 
441  next1:
442  /* Pop gtpu header */
443  vlib_buffer_advance (b1, gtpu_hdr_len1);
444 
445  next1 = t1->decap_next_index;
446  sw_if_index1 = t1->sw_if_index;
447  len1 = vlib_buffer_length_in_chain (vm, b1);
448 
449  /* Required to make the l2 tag push / pop code work on l2 subifs */
450  if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
451  vnet_update_l2_len (b1);
452 
453  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
454  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
455  sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
456 
457  pkts_decapsulated ++;
458  stats_n_packets += 1;
459  stats_n_bytes += len1;
460 
461  /* Batch stats increment on the same gtpu tunnel so counter
462  is not incremented per packet */
463  if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
464  {
465  stats_n_packets -= 1;
466  stats_n_bytes -= len1;
467  if (stats_n_packets)
470  thread_index, stats_sw_if_index,
471  stats_n_packets, stats_n_bytes);
472  stats_n_packets = 1;
473  stats_n_bytes = len1;
474  stats_sw_if_index = sw_if_index1;
475  }
476 
477  trace1:
478  b1->error = error1 ? node->errors[error1] : 0;
479 
480  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
481  {
482  gtpu_rx_trace_t *tr
483  = vlib_add_trace (vm, node, b1, sizeof (*tr));
484  tr->next_index = next1;
485  tr->error = error1;
486  tr->tunnel_index = tunnel_index1;
487  tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
488  }
489 
490  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
491  to_next, n_left_to_next,
492  bi0, bi1, next0, next1);
493  }
494 
495  while (n_left_from > 0 && n_left_to_next > 0)
496  {
497  u32 bi0;
498  vlib_buffer_t * b0;
499  u32 next0;
500  ip4_header_t * ip4_0;
501  ip6_header_t * ip6_0;
502  gtpu_header_t * gtpu0;
503  u32 gtpu_hdr_len0;
504  uword * p0;
505  u32 tunnel_index0;
506  gtpu_tunnel_t * t0, * mt0 = NULL;
507  gtpu4_tunnel_key_t key4_0;
508  gtpu6_tunnel_key_t key6_0;
509  u32 error0;
510  u32 sw_if_index0, len0;
511  u8 has_space0;
512  u8 ver0;
513 
514  bi0 = from[0];
515  to_next[0] = bi0;
516  from += 1;
517  to_next += 1;
518  n_left_from -= 1;
519  n_left_to_next -= 1;
520 
521  b0 = vlib_get_buffer (vm, bi0);
522 
523  /* udp leaves current_data pointing at the gtpu header */
524  gtpu0 = vlib_buffer_get_current (b0);
525  if (is_ip4) {
526  ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
527  } else {
528  ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
529  }
530 
531  tunnel_index0 = ~0;
532  error0 = 0;
533 
534  /* speculatively load gtp header version field */
535  ver0 = gtpu0->ver_flags;
536 
537  /*
538  * Manipulate gtpu header
539  * TBD: Manipulate Sequence Number and N-PDU Number
540  * TBD: Manipulate Next Extension Header
541  */
542  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
543 
544  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
545 
546  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
547  {
548  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
549  next0 = GTPU_INPUT_NEXT_DROP;
550  goto trace00;
551  }
552 
553  if (is_ip4) {
554  key4_0.src = ip4_0->src_address.as_u32;
555  key4_0.teid = gtpu0->teid;
556 
557  /* Make sure GTPU tunnel exist according to packet SIP and teid
558  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
559  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
560  {
561  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
562  if (PREDICT_FALSE (p0 == NULL))
563  {
564  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
565  next0 = GTPU_INPUT_NEXT_DROP;
566  goto trace00;
567  }
568  last_key4.as_u64 = key4_0.as_u64;
569  tunnel_index0 = last_tunnel_index = p0[0];
570  }
571  else
572  tunnel_index0 = last_tunnel_index;
573  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
574 
575  /* Validate GTPU tunnel encap-fib index against packet */
576  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
577  {
578  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
579  next0 = GTPU_INPUT_NEXT_DROP;
580  goto trace00;
581  }
582 
583  /* Validate GTPU tunnel SIP against packet DIP */
584  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
585  goto next00; /* valid packet */
587  {
588  key4_0.src = ip4_0->dst_address.as_u32;
589  key4_0.teid = gtpu0->teid;
590  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
591  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
592  if (PREDICT_TRUE (p0 != NULL))
593  {
594  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
595  goto next00; /* valid packet */
596  }
597  }
598  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
599  next0 = GTPU_INPUT_NEXT_DROP;
600  goto trace00;
601 
602  } else /* !is_ip4 */ {
603  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
604  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
605  key6_0.teid = gtpu0->teid;
606 
607  /* Make sure GTPU tunnel exist according to packet SIP and teid
608  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
609  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
610  {
611  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
612  if (PREDICT_FALSE (p0 == NULL))
613  {
614  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
615  next0 = GTPU_INPUT_NEXT_DROP;
616  goto trace00;
617  }
618  clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
619  tunnel_index0 = last_tunnel_index = p0[0];
620  }
621  else
622  tunnel_index0 = last_tunnel_index;
623  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
624 
625  /* Validate GTPU tunnel encap-fib index against packet */
626  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
627  {
628  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
629  next0 = GTPU_INPUT_NEXT_DROP;
630  goto trace00;
631  }
632 
633  /* Validate GTPU tunnel SIP against packet DIP */
635  &t0->src.ip6)))
636  goto next00; /* valid packet */
638  {
639  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
640  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
641  key6_0.teid = gtpu0->teid;
642  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
643  if (PREDICT_TRUE (p0 != NULL))
644  {
645  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
646  goto next00; /* valid packet */
647  }
648  }
649  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
650  next0 = GTPU_INPUT_NEXT_DROP;
651  goto trace00;
652  }
653 
654  next00:
655  /* Pop gtpu header */
656  vlib_buffer_advance (b0, gtpu_hdr_len0);
657 
658  next0 = t0->decap_next_index;
659  sw_if_index0 = t0->sw_if_index;
660  len0 = vlib_buffer_length_in_chain (vm, b0);
661 
662  /* Required to make the l2 tag push / pop code work on l2 subifs */
663  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
664  vnet_update_l2_len (b0);
665 
666  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
667  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
668  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
669 
670  pkts_decapsulated ++;
671  stats_n_packets += 1;
672  stats_n_bytes += len0;
673 
674  /* Batch stats increment on the same gtpu tunnel so counter
675  is not incremented per packet */
676  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
677  {
678  stats_n_packets -= 1;
679  stats_n_bytes -= len0;
680  if (stats_n_packets)
683  thread_index, stats_sw_if_index,
684  stats_n_packets, stats_n_bytes);
685  stats_n_packets = 1;
686  stats_n_bytes = len0;
687  stats_sw_if_index = sw_if_index0;
688  }
689 
690  trace00:
691  b0->error = error0 ? node->errors[error0] : 0;
692 
693  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
694  {
695  gtpu_rx_trace_t *tr
696  = vlib_add_trace (vm, node, b0, sizeof (*tr));
697  tr->next_index = next0;
698  tr->error = error0;
699  tr->tunnel_index = tunnel_index0;
700  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
701  }
702  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
703  to_next, n_left_to_next,
704  bi0, next0);
705  }
706 
707  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
708  }
709  /* Do we still need this now that tunnel tx stats is kept? */
710  vlib_node_increment_counter (vm, is_ip4?
712  GTPU_ERROR_DECAPSULATED,
713  pkts_decapsulated);
714 
715  /* Increment any remaining batch stats */
716  if (stats_n_packets)
717  {
720  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
721  node->runtime_data[0] = stats_sw_if_index;
722  }
723 
724  return from_frame->n_vectors;
725 }
726 
729  vlib_frame_t * from_frame)
730 {
731  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
732 }
733 
736  vlib_frame_t * from_frame)
737 {
738  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
739 }
740 
741 static char * gtpu_error_strings[] = {
742 #define gtpu_error(n,s) s,
743 #include <gtpu/gtpu_error.def>
744 #undef gtpu_error
745 #undef _
746 };
747 
749  .name = "gtpu4-input",
750  /* Takes a vector of packets. */
751  .vector_size = sizeof (u32),
752 
753  .n_errors = GTPU_N_ERROR,
754  .error_strings = gtpu_error_strings,
755 
756  .n_next_nodes = GTPU_INPUT_N_NEXT,
757  .next_nodes = {
758 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
760 #undef _
761  },
762 
763 //temp .format_buffer = format_gtpu_header,
764  .format_trace = format_gtpu_rx_trace,
765  // $$$$ .unformat_buffer = unformat_gtpu_header,
766 };
767 
769  .name = "gtpu6-input",
770  /* Takes a vector of packets. */
771  .vector_size = sizeof (u32),
772 
773  .n_errors = GTPU_N_ERROR,
774  .error_strings = gtpu_error_strings,
775 
776  .n_next_nodes = GTPU_INPUT_N_NEXT,
777  .next_nodes = {
778 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
780 #undef _
781  },
782 
783 //temp .format_buffer = format_gtpu_header,
784  .format_trace = format_gtpu_rx_trace,
785  // $$$$ .unformat_buffer = unformat_gtpu_header,
786 };
787 
788 typedef enum {
793 
798  u32 is_ip4)
799 {
800  gtpu_main_t * gtm = &gtpu_main;
801  u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
802  vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
803  vtep4_key_t last_vtep4; /* last IPv4 address / fib index
804  matching a local VTEP address */
805  vtep6_key_t last_vtep6; /* last IPv6 address / fib index
806  matching a local VTEP address */
807 
808  from = vlib_frame_vector_args (frame);
809  n_left_from = frame->n_vectors;
810  next_index = node->cached_next_index;
811 
812  if (node->flags & VLIB_NODE_FLAG_TRACE)
813  ip4_forward_next_trace (vm, node, frame, VLIB_TX);
814 
815  if (is_ip4)
816  vtep4_key_init (&last_vtep4);
817  else
818  vtep6_key_init (&last_vtep6);
819 
820  while (n_left_from > 0)
821  {
822  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
823 
824  while (n_left_from >= 4 && n_left_to_next >= 2)
825  {
826  vlib_buffer_t * b0, * b1;
827  ip4_header_t * ip40, * ip41;
828  ip6_header_t * ip60, * ip61;
829  udp_header_t * udp0, * udp1;
830  u32 bi0, ip_len0, udp_len0, flags0, next0;
831  u32 bi1, ip_len1, udp_len1, flags1, next1;
832  i32 len_diff0, len_diff1;
833  u8 error0, good_udp0, proto0;
834  u8 error1, good_udp1, proto1;
835 
836  /* Prefetch next iteration. */
837  {
838  vlib_buffer_t * p2, * p3;
839 
840  p2 = vlib_get_buffer (vm, from[2]);
841  p3 = vlib_get_buffer (vm, from[3]);
842 
843  vlib_prefetch_buffer_header (p2, LOAD);
844  vlib_prefetch_buffer_header (p3, LOAD);
845 
848  }
849 
850  bi0 = to_next[0] = from[0];
851  bi1 = to_next[1] = from[1];
852  from += 2;
853  n_left_from -= 2;
854  to_next += 2;
855  n_left_to_next -= 2;
856 
857  b0 = vlib_get_buffer (vm, bi0);
858  b1 = vlib_get_buffer (vm, bi1);
859  if (is_ip4)
860  {
861  ip40 = vlib_buffer_get_current (b0);
862  ip41 = vlib_buffer_get_current (b1);
863  }
864  else
865  {
866  ip60 = vlib_buffer_get_current (b0);
867  ip61 = vlib_buffer_get_current (b1);
868  }
869 
870  /* Setup packet for next IP feature */
871  vnet_feature_next(&next0, b0);
872  vnet_feature_next(&next1, b1);
873 
874  if (is_ip4)
875  {
876  /* Treat IP frag packets as "experimental" protocol for now
877  until support of IP frag reassembly is implemented */
878  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
879  proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
880  }
881  else
882  {
883  proto0 = ip60->protocol;
884  proto1 = ip61->protocol;
885  }
886 
887  /* Process packet 0 */
888  if (proto0 != IP_PROTOCOL_UDP)
889  goto exit0; /* not UDP packet */
890 
891  if (is_ip4)
892  udp0 = ip4_next_header (ip40);
893  else
894  udp0 = ip6_next_header (ip60);
895 
896  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
897  goto exit0; /* not GTPU packet */
898 
899  /* Validate DIP against VTEPs*/
900  if (is_ip4)
901  {
902  if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
903  goto exit0; /* no local VTEP for GTPU packet */
904  }
905  else
906  {
907  if (!vtep6_check (&gtm->vtep_table, b0, ip60, &last_vtep6))
908  goto exit0; /* no local VTEP for GTPU packet */
909  }
910 
911  flags0 = b0->flags;
912  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
913 
914  /* Don't verify UDP checksum for packets with explicit zero checksum. */
915  good_udp0 |= udp0->checksum == 0;
916 
917  /* Verify UDP length */
918  if (is_ip4)
919  ip_len0 = clib_net_to_host_u16 (ip40->length);
920  else
921  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
922  udp_len0 = clib_net_to_host_u16 (udp0->length);
923  len_diff0 = ip_len0 - udp_len0;
924 
925  /* Verify UDP checksum */
926  if (PREDICT_FALSE (!good_udp0))
927  {
928  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
929  {
930  if (is_ip4)
931  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
932  else
933  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
934  good_udp0 =
935  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
936  }
937  }
938 
939  if (is_ip4)
940  {
941  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
942  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
943  }
944  else
945  {
946  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
947  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
948  }
949 
950  next0 = error0 ?
952  b0->error = error0 ? error_node->errors[error0] : 0;
953 
954  /* gtpu-input node expect current at GTPU header */
955  if (is_ip4)
956  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
957  else
958  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
959 
960  exit0:
961  /* Process packet 1 */
962  if (proto1 != IP_PROTOCOL_UDP)
963  goto exit1; /* not UDP packet */
964 
965  if (is_ip4)
966  udp1 = ip4_next_header (ip41);
967  else
968  udp1 = ip6_next_header (ip61);
969 
970  if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
971  goto exit1; /* not GTPU packet */
972 
973  /* Validate DIP against VTEPs*/
974  if (is_ip4)
975  {
976  if (!vtep4_check (&gtm->vtep_table, b1, ip41, &last_vtep4))
977  goto exit1; /* no local VTEP for GTPU packet */
978  }
979  else
980  {
981  if (!vtep6_check (&gtm->vtep_table, b1, ip61, &last_vtep6))
982  goto exit1; /* no local VTEP for GTPU packet */
983  }
984 
985  flags1 = b1->flags;
986  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
987 
988  /* Don't verify UDP checksum for packets with explicit zero checksum. */
989  good_udp1 |= udp1->checksum == 0;
990 
991  /* Verify UDP length */
992  if (is_ip4)
993  ip_len1 = clib_net_to_host_u16 (ip41->length);
994  else
995  ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
996  udp_len1 = clib_net_to_host_u16 (udp1->length);
997  len_diff1 = ip_len1 - udp_len1;
998 
999  /* Verify UDP checksum */
1000  if (PREDICT_FALSE (!good_udp1))
1001  {
1002  if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1003  {
1004  if (is_ip4)
1005  flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1006  else
1007  flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1008  good_udp1 =
1009  (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1010  }
1011  }
1012 
1013  if (is_ip4)
1014  {
1015  error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1016  error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1017  }
1018  else
1019  {
1020  error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1021  error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1022  }
1023 
1024  next1 = error1 ?
1026  b1->error = error1 ? error_node->errors[error1] : 0;
1027 
1028  /* gtpu-input node expect current at GTPU header */
1029  if (is_ip4)
1030  vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1031  else
1032  vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1033 
1034  exit1:
1035  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1036  to_next, n_left_to_next,
1037  bi0, bi1, next0, next1);
1038  }
1039 
1040  while (n_left_from > 0 && n_left_to_next > 0)
1041  {
1042  vlib_buffer_t * b0;
1043  ip4_header_t * ip40;
1044  ip6_header_t * ip60;
1045  udp_header_t * udp0;
1046  u32 bi0, ip_len0, udp_len0, flags0, next0;
1047  i32 len_diff0;
1048  u8 error0, good_udp0, proto0;
1049 
1050  bi0 = to_next[0] = from[0];
1051  from += 1;
1052  n_left_from -= 1;
1053  to_next += 1;
1054  n_left_to_next -= 1;
1055 
1056  b0 = vlib_get_buffer (vm, bi0);
1057  if (is_ip4)
1058  ip40 = vlib_buffer_get_current (b0);
1059  else
1060  ip60 = vlib_buffer_get_current (b0);
1061 
1062  /* Setup packet for next IP feature */
1063  vnet_feature_next(&next0, b0);
1064 
1065  if (is_ip4)
1066  /* Treat IP4 frag packets as "experimental" protocol for now
1067  until support of IP frag reassembly is implemented */
1068  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1069  else
1070  proto0 = ip60->protocol;
1071 
1072  if (proto0 != IP_PROTOCOL_UDP)
1073  goto exit; /* not UDP packet */
1074 
1075  if (is_ip4)
1076  udp0 = ip4_next_header (ip40);
1077  else
1078  udp0 = ip6_next_header (ip60);
1079 
1080  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1081  goto exit; /* not GTPU packet */
1082 
1083  /* Validate DIP against VTEPs*/
1084  if (is_ip4)
1085  {
1086  if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
1087  goto exit; /* no local VTEP for GTPU packet */
1088  }
1089  else
1090  {
1091  if (!vtep6_check (&gtm->vtep_table, b0, ip60, &last_vtep6))
1092  goto exit; /* no local VTEP for GTPU packet */
1093  }
1094 
1095  flags0 = b0->flags;
1096  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1097 
1098  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1099  good_udp0 |= udp0->checksum == 0;
1100 
1101  /* Verify UDP length */
1102  if (is_ip4)
1103  ip_len0 = clib_net_to_host_u16 (ip40->length);
1104  else
1105  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1106  udp_len0 = clib_net_to_host_u16 (udp0->length);
1107  len_diff0 = ip_len0 - udp_len0;
1108 
1109  /* Verify UDP checksum */
1110  if (PREDICT_FALSE (!good_udp0))
1111  {
1112  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1113  {
1114  if (is_ip4)
1115  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1116  else
1117  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1118  good_udp0 =
1119  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1120  }
1121  }
1122 
1123  if (is_ip4)
1124  {
1125  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1126  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1127  }
1128  else
1129  {
1130  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1131  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1132  }
1133 
1134  next0 = error0 ?
1136  b0->error = error0 ? error_node->errors[error0] : 0;
1137 
1138  /* gtpu-input node expect current at GTPU header */
1139  if (is_ip4)
1140  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1141  else
1142  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1143 
1144  exit:
1145  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1146  to_next, n_left_to_next,
1147  bi0, next0);
1148  }
1149 
1150  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1151  }
1152 
1153  return frame->n_vectors;
1154 }
1155 
1158  vlib_frame_t * frame)
1159 {
1160  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1161 }
1162 
1164  .name = "ip4-gtpu-bypass",
1165  .vector_size = sizeof (u32),
1166 
1167  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1168  .next_nodes = {
1169  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1170  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1171  },
1172 
1173  .format_buffer = format_ip4_header,
1174  .format_trace = format_ip4_forward_next_trace,
1175 };
1176 
1177 #ifndef CLIB_MARCH_VARIANT
1178 /* Dummy init function to get us linked in. */
1180 { return 0; }
1181 
1183 #endif /* CLIB_MARCH_VARIANT */
1184 
1187  vlib_frame_t * frame)
1188 {
1189  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1190 }
1191 
1193  .name = "ip6-gtpu-bypass",
1194  .vector_size = sizeof (u32),
1195 
1196  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1197  .next_nodes = {
1198  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1199  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1200  },
1201 
1202  .format_buffer = format_ip6_header,
1203  .format_trace = format_ip6_forward_next_trace,
1204 };
1205 
1206 #ifndef CLIB_MARCH_VARIANT
1207 /* Dummy init function to get us linked in. */
1209 { return 0; }
1210 
1212 
1213 #define foreach_gtpu_flow_error \
1214  _(NONE, "no error") \
1215  _(PAYLOAD_ERROR, "Payload type errors") \
1216  _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
1217  _(IP_HEADER_ERROR, "Rx ip header errors") \
1218  _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
1219  _(UDP_LENGTH_ERROR, "Rx udp length errors")
1220 
1221 typedef enum
1222 {
1223 #define _(f,s) GTPU_FLOW_ERROR_##f,
1225 #undef _
1226 #define gtpu_error(n,s) GTPU_FLOW_ERROR_##n,
1227 #include <gtpu/gtpu_error.def>
1228 #undef gtpu_error
1231 
1232 static char *gtpu_flow_error_strings[] = {
1233 #define _(n,s) s,
1235 #undef _
1236 #define gtpu_error(n,s) s,
1237 #include <gtpu/gtpu_error.def>
1238 #undef gtpu_error
1239 #undef _
1240 
1241 };
1242 
1243 #define gtpu_local_need_csum_check(_b) \
1244  (!(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED \
1245  || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
1246 
1247 #define gtpu_local_csum_is_valid(_b) \
1248  ((_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT \
1249  || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) != 0)
1250 
1253 {
1254  u32 flags = b->flags;
1255  enum { offset = sizeof(ip4_header_t) + sizeof(udp_header_t)};
1256 
1257  /* Verify UDP checksum */
1258  if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1259  {
1261  flags = ip4_tcp_udp_validate_checksum (vm, b);
1263  }
1264 
1265  return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1266 }
1267 
1270 {
1271  ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1272  sizeof(ip4_header_t) - sizeof(udp_header_t);
1273  u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1274  u16 expected = payload_len + sizeof(ip4_header_t) + sizeof(udp_header_t);
1275  return ip_len > expected || ip4_hdr->ttl == 0 || ip4_hdr->ip_version_and_header_length != 0x45;
1276 }
1277 
1280 {
1281  ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1282  sizeof(ip4_header_t) - sizeof(udp_header_t);
1283  udp_header_t * udp_hdr = vlib_buffer_get_current(b) - sizeof(udp_header_t);
1284  u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1285  u16 udp_len = clib_net_to_host_u16 (udp_hdr->length);
1286  return udp_len > ip_len;
1287 }
1288 
1290 gtpu_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1291 {
1292  u8 error0 = GTPU_FLOW_ERROR_NONE;
1293  if (ip_err0)
1294  error0 = GTPU_FLOW_ERROR_IP_HEADER_ERROR;
1295  if (udp_err0)
1296  error0 = GTPU_FLOW_ERROR_UDP_LENGTH_ERROR;
1297  if (csum_err0)
1298  error0 = GTPU_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1299  return error0;
1300 }
1301 
1302 
1306  vlib_frame_t * from_frame)
1307 {
1308  u32 n_left_from, next_index, * from, * to_next;
1309  gtpu_main_t * gtm = &gtpu_main;
1310  vnet_main_t * vnm = gtm->vnet_main;
1312  u32 pkts_decapsulated = 0;
1313  u32 thread_index = vlib_get_thread_index();
1314  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1315  u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
1316 
1317  from = vlib_frame_vector_args (from_frame);
1318  n_left_from = from_frame->n_vectors;
1319 
1320  next_index = node->cached_next_index;
1321  stats_sw_if_index = node->runtime_data[0];
1322  stats_n_packets = stats_n_bytes = 0;
1323 
1324  while (n_left_from > 0)
1325  {
1326  u32 n_left_to_next;
1327 
1328  vlib_get_next_frame (vm, node, next_index,
1329  to_next, n_left_to_next);
1330 
1331  while (n_left_from >= 4 && n_left_to_next >= 2)
1332  {
1333  u32 bi0, bi1;
1334  vlib_buffer_t * b0, * b1;
1335  u32 next0, next1;
1336  gtpu_header_t * gtpu0, * gtpu1;
1337  u32 gtpu_hdr_len0, gtpu_hdr_len1;
1338  u32 tunnel_index0, tunnel_index1;
1339  gtpu_tunnel_t * t0, * t1;
1340  u32 error0, error1;
1341  u32 sw_if_index0, sw_if_index1, len0, len1;
1342  u8 has_space0 = 0, has_space1 = 0;
1343  u8 ver0, ver1;
1344 
1345  /* Prefetch next iteration. */
1346  {
1347  vlib_buffer_t * p2, * p3;
1348 
1349  p2 = vlib_get_buffer (vm, from[2]);
1350  p3 = vlib_get_buffer (vm, from[3]);
1351 
1352  vlib_prefetch_buffer_header (p2, LOAD);
1353  vlib_prefetch_buffer_header (p3, LOAD);
1354 
1355  CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1356  CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1357  }
1358 
1359  bi0 = from[0];
1360  bi1 = from[1];
1361  to_next[0] = bi0;
1362  to_next[1] = bi1;
1363  from += 2;
1364  to_next += 2;
1365  n_left_to_next -= 2;
1366  n_left_from -= 2;
1367 
1368  b0 = vlib_get_buffer (vm, bi0);
1369  b1 = vlib_get_buffer (vm, bi1);
1370 
1371  /* udp leaves current_data pointing at the gtpu header */
1372  gtpu0 = vlib_buffer_get_current (b0);
1373  gtpu1 = vlib_buffer_get_current (b1);
1374 
1375  len0 = vlib_buffer_length_in_chain (vm, b0);
1376  len1 = vlib_buffer_length_in_chain (vm, b1);
1377 
1378  tunnel_index0 = ~0;
1379  error0 = 0;
1380 
1381  tunnel_index1 = ~0;
1382  error1 = 0;
1383 
1384  ip_err0 = gtpu_check_ip (b0, len0);
1385  udp_err0 = gtpu_check_ip_udp_len (b0);
1386  ip_err1 = gtpu_check_ip (b1, len1);
1387  udp_err1 = gtpu_check_ip_udp_len (b1);
1388 
1390  csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1391  else
1392  csum_err0 = !gtpu_local_csum_is_valid (b0);
1394  csum_err1 = !gtpu_validate_udp_csum (vm, b1);
1395  else
1396  csum_err1 = !gtpu_local_csum_is_valid (b1);
1397 
1398  if (ip_err0 || udp_err0 || csum_err0)
1399  {
1400  next0 = GTPU_INPUT_NEXT_DROP;
1401  error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1402  goto trace0;
1403  }
1404 
1405  /* speculatively load gtp header version field */
1406  ver0 = gtpu0->ver_flags;
1407 
1408  /*
1409  * Manipulate gtpu header
1410  * TBD: Manipulate Sequence Number and N-PDU Number
1411  * TBD: Manipulate Next Extension Header
1412  */
1413  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1414 
1415  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1416  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1417  {
1418  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1419  next0 = GTPU_INPUT_NEXT_DROP;
1420  goto trace0;
1421  }
1422 
1423  /* Manipulate packet 0 */
1424  ASSERT (b0->flow_id != 0);
1425  tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1426  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1427  b0->flow_id = 0;
1428 
1429  /* Pop gtpu header */
1430  vlib_buffer_advance (b0, gtpu_hdr_len0);
1431 
1432  /* assign the next node */
1433  if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1434  (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1435  {
1436  error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1437  next0 = GTPU_INPUT_NEXT_DROP;
1438  goto trace0;
1439  }
1440  next0 = t0->decap_next_index;
1441 
1442  sw_if_index0 = t0->sw_if_index;
1443 
1444  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1445  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1446 
1447  pkts_decapsulated ++;
1448  stats_n_packets += 1;
1449  stats_n_bytes += len0;
1450 
1451  /* Batch stats increment on the same gtpu tunnel so counter
1452  is not incremented per packet */
1453  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1454  {
1455  stats_n_packets -= 1;
1456  stats_n_bytes -= len0;
1457  if (stats_n_packets)
1460  thread_index, stats_sw_if_index,
1461  stats_n_packets, stats_n_bytes);
1462  stats_n_packets = 1;
1463  stats_n_bytes = len0;
1464  stats_sw_if_index = sw_if_index0;
1465  }
1466 
1467 trace0:
1468  b0->error = error0 ? node->errors[error0] : 0;
1469 
1470  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1471  {
1472  gtpu_rx_trace_t *tr
1473  = vlib_add_trace (vm, node, b0, sizeof (*tr));
1474  tr->next_index = next0;
1475  tr->error = error0;
1476  tr->tunnel_index = tunnel_index0;
1477  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1478  }
1479 
1480  if (ip_err1 || udp_err1 || csum_err1)
1481  {
1482  next1 = GTPU_INPUT_NEXT_DROP;
1483  error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1);
1484  goto trace1;
1485  }
1486 
1487  /* speculatively load gtp header version field */
1488  ver1 = gtpu1->ver_flags;
1489 
1490  /*
1491  * Manipulate gtpu header
1492  * TBD: Manipulate Sequence Number and N-PDU Number
1493  * TBD: Manipulate Next Extension Header
1494  */
1495  gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
1496  has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
1497  if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
1498  {
1499  error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1500  next1 = GTPU_INPUT_NEXT_DROP;
1501  goto trace1;
1502  }
1503 
1504  /* Manipulate packet 1 */
1505  ASSERT (b1->flow_id != 0);
1506  tunnel_index1 = b1->flow_id - gtm->flow_id_start;
1507  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
1508  b1->flow_id = 0;
1509 
1510  /* Pop gtpu header */
1511  vlib_buffer_advance (b1, gtpu_hdr_len1);
1512 
1513  /* assign the next node */
1514  if (PREDICT_FALSE (t1->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1515  (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1516  {
1517  error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1518  next1 = GTPU_INPUT_NEXT_DROP;
1519  goto trace1;
1520  }
1521  next1 = t1->decap_next_index;
1522 
1523  sw_if_index1 = t1->sw_if_index;
1524 
1525  /* Required to make the l2 tag push / pop code work on l2 subifs */
1526  /* This won't happen in current implementation as only
1527  ipv4/udp/gtpu/IPV4 type packets can be matched */
1528  if (PREDICT_FALSE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
1529  vnet_update_l2_len (b1);
1530 
1531  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1532  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
1533 
1534  pkts_decapsulated ++;
1535  stats_n_packets += 1;
1536  stats_n_bytes += len1;
1537 
1538  /* Batch stats increment on the same gtpu tunnel so counter
1539  is not incremented per packet */
1540  if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
1541  {
1542  stats_n_packets -= 1;
1543  stats_n_bytes -= len1;
1544  if (stats_n_packets)
1547  thread_index, stats_sw_if_index,
1548  stats_n_packets, stats_n_bytes);
1549  stats_n_packets = 1;
1550  stats_n_bytes = len1;
1551  stats_sw_if_index = sw_if_index1;
1552  }
1553 
1554 trace1:
1555  b1->error = error1 ? node->errors[error1] : 0;
1556 
1557  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
1558  {
1559  gtpu_rx_trace_t *tr
1560  = vlib_add_trace (vm, node, b1, sizeof (*tr));
1561  tr->next_index = next1;
1562  tr->error = error1;
1563  tr->tunnel_index = tunnel_index1;
1564  tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
1565  }
1566 
1567  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1568  to_next, n_left_to_next,
1569  bi0, bi1, next0, next1);
1570  }
1571 
1572  while (n_left_from > 0 && n_left_to_next > 0)
1573  {
1574  u32 bi0;
1575  vlib_buffer_t * b0;
1576  u32 next0;
1577  gtpu_header_t * gtpu0;
1578  u32 gtpu_hdr_len0;
1579  u32 error0;
1580  u32 tunnel_index0;
1581  gtpu_tunnel_t * t0;
1582  u32 sw_if_index0, len0;
1583  u8 has_space0 = 0;
1584  u8 ver0;
1585 
1586  bi0 = from[0];
1587  to_next[0] = bi0;
1588  from += 1;
1589  to_next += 1;
1590  n_left_from -= 1;
1591  n_left_to_next -= 1;
1592 
1593  b0 = vlib_get_buffer (vm, bi0);
1594  len0 = vlib_buffer_length_in_chain (vm, b0);
1595 
1596  tunnel_index0 = ~0;
1597  error0 = 0;
1598 
1599  ip_err0 = gtpu_check_ip (b0, len0);
1600  udp_err0 = gtpu_check_ip_udp_len (b0);
1602  csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1603  else
1604  csum_err0 = !gtpu_local_csum_is_valid (b0);
1605 
1606  if (ip_err0 || udp_err0 || csum_err0)
1607  {
1608  next0 = GTPU_INPUT_NEXT_DROP;
1609  error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1610  goto trace00;
1611  }
1612 
1613  /* udp leaves current_data pointing at the gtpu header */
1614  gtpu0 = vlib_buffer_get_current (b0);
1615 
1616  /* speculatively load gtp header version field */
1617  ver0 = gtpu0->ver_flags;
1618 
1619  /*
1620  * Manipulate gtpu header
1621  * TBD: Manipulate Sequence Number and N-PDU Number
1622  * TBD: Manipulate Next Extension Header
1623  */
1624  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1625 
1626  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1627  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1628  {
1629  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1630  next0 = GTPU_INPUT_NEXT_DROP;
1631  goto trace00;
1632  }
1633 
1634  ASSERT (b0->flow_id != 0);
1635  tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1636  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1637  b0->flow_id = 0;
1638 
1639  /* Pop gtpu header */
1640  vlib_buffer_advance (b0, gtpu_hdr_len0);
1641 
1642  /* assign the next node */
1643  if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1644  (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1645  {
1646  error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1647  next0 = GTPU_INPUT_NEXT_DROP;
1648  goto trace00;
1649  }
1650  next0 = t0->decap_next_index;
1651 
1652  sw_if_index0 = t0->sw_if_index;
1653 
1654  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1655  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1656 
1657  pkts_decapsulated ++;
1658  stats_n_packets += 1;
1659  stats_n_bytes += len0;
1660 
1661  /* Batch stats increment on the same gtpu tunnel so counter
1662  is not incremented per packet */
1663  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1664  {
1665  stats_n_packets -= 1;
1666  stats_n_bytes -= len0;
1667  if (stats_n_packets)
1670  thread_index, stats_sw_if_index,
1671  stats_n_packets, stats_n_bytes);
1672  stats_n_packets = 1;
1673  stats_n_bytes = len0;
1674  stats_sw_if_index = sw_if_index0;
1675  }
1676  trace00:
1677  b0->error = error0 ? node->errors[error0] : 0;
1678 
1679  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1680  {
1681  gtpu_rx_trace_t *tr
1682  = vlib_add_trace (vm, node, b0, sizeof (*tr));
1683  tr->next_index = next0;
1684  tr->error = error0;
1685  tr->tunnel_index = tunnel_index0;
1686  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1687  }
1688  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1689  to_next, n_left_to_next,
1690  bi0, next0);
1691  }
1692 
1693  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1694  }
1695 
1696  /* Do we still need this now that tunnel tx stats is kept? */
1698  GTPU_ERROR_DECAPSULATED,
1699  pkts_decapsulated);
1700 
1701  /* Increment any remaining batch stats */
1702  if (stats_n_packets)
1703  {
1706  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1707  node->runtime_data[0] = stats_sw_if_index;
1708  }
1709 
1710  return from_frame->n_vectors;
1711 }
1712 
1715  vlib_frame_t * from_frame)
1716 {
1717  return gtpu_flow_input(vm, node, from_frame);
1718 }
1719 
1720 
1721 /* *INDENT-OFF* */
1722 #ifndef CLIB_MULTIARCH_VARIANT
1724  .name = "gtpu4-flow-input",
1725  .type = VLIB_NODE_TYPE_INTERNAL,
1726  .vector_size = sizeof (u32),
1727 
1728  .format_trace = format_gtpu_rx_trace,
1729 
1730  .n_errors = GTPU_FLOW_N_ERROR,
1731  .error_strings = gtpu_flow_error_strings,
1732 
1733  .n_next_nodes = GTPU_INPUT_N_NEXT,
1734  .next_nodes = {
1735 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
1737 #undef _
1738 
1739  },
1740 };
1741 #endif
1742 /* *INDENT-ON* */
1743 
1744 #endif /* CLIB_MARCH_VARIANT */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define CLIB_UNUSED(x)
Definition: clib.h:86
vnet_main_t * vnet_main
Definition: gtpu.h:235
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:525
ip4_address_t src_address
Definition: ip4_packet.h:170
u32 teid
Definition: mobile.h:95
static u8 * format_gtpu_rx_trace(u8 *s, va_list *args)
Definition: gtpu_decap.c:32
vnet_interface_main_t interface_main
Definition: vnet.h:56
format_function_t format_ip4_header
Definition: format.h:81
#define PREDICT_TRUE(x)
Definition: clib.h:119
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static u32 validate_gtpu_fib(vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
Definition: gtpu_decap.c:52
vlib_node_registration_t ip6_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_gtpu_bypass_node)
Definition: gtpu_decap.c:1192
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static_always_inline u8 gtpu_check_ip(vlib_buffer_t *b, u16 payload_len)
Definition: gtpu_decap.c:1269
clib_error_t * ip6_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1208
#define VLIB_NODE_FN(node)
Definition: node.h:202
static uword ip4_address_is_multicast(const ip4_address_t *a)
Definition: ip4_packet.h:382
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:472
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:402
ip6_address_t src_address
Definition: ip6_packet.h:310
static u32 vlib_buffer_get_ip_fib_index(vlib_buffer_t *b, u8 is_ip4)
Definition: ip.h:284
unsigned char u8
Definition: types.h:56
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:213
#define static_always_inline
Definition: clib.h:106
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
ip46_address_t src
Definition: gtpu.h:142
vlib_node_registration_t ip4_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_gtpu_bypass_node)
Definition: gtpu_decap.c:1163
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:867
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:241
vlib_node_registration_t gtpu4_flow_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_flow_input_node)
Definition: gtpu_decap.c:1723
unsigned int u32
Definition: types.h:88
gtpu_main_t gtpu_main
Definition: gtpu.c:36
static void vtep4_key_init(vtep4_key_t *k4)
Definition: vtep.h:81
static char * gtpu_flow_error_strings[]
Definition: gtpu_decap.c:1232
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define hash_get(h, key)
Definition: hash.h:249
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
u32 decap_next_index
Definition: gtpu.h:149
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:385
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
u32 flow_id_start
Definition: gtpu.h:236
static char * gtpu_error_strings[]
Definition: gtpu_decap.c:741
#define PREDICT_FALSE(x)
Definition: clib.h:118
#define always_inline
Definition: ipsec.h:28
static uword ip_gtpu_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
Definition: gtpu_decap.c:795
static_always_inline u8 gtpu_validate_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
Definition: gtpu_decap.c:1252
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip4_forward.c:1400
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
vlib_node_registration_t gtpu4_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_input_node)
Definition: gtpu_decap.c:748
vlib_main_t * vm
Definition: in2out_ed.c:1599
static u8 vtep6_check(vtep_table_t *t, vlib_buffer_t *b0, ip6_header_t *ip60, vtep6_key_t *last_k6)
Definition: vtep.h:116
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
static u8 vtep4_check(vtep_table_t *t, vlib_buffer_t *b0, ip4_header_t *ip40, vtep4_key_t *last_k4)
Definition: vtep.h:101
clib_error_t * ip4_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1179
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 flags
Definition: vhost_user.h:248
u16 n_vectors
Definition: node.h:399
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:218
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
u32 flow_id
Generic flow identifier.
Definition: buffer.h:127
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
static uword gtpu_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: gtpu_decap.c:58
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
u8 data[]
Packet data.
Definition: buffer.h:181
#define GTPU_E_S_PN_BIT
Definition: gtpu.h:73
u32 sw_if_index
Definition: gtpu.h:155
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1599
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:371
#define foreach_gtpu_input_next
Definition: gtpu.h:181
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:517
#define ASSERT(truth)
static void vtep6_key_init(vtep6_key_t *k6)
Definition: vtep.h:87
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
#define gtpu_local_need_csum_check(_b)
Definition: gtpu_decap.c:1243
u32 encap_fib_index
Definition: gtpu.h:152
format_function_t format_ip6_header
Definition: format.h:95
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
Definition: ip6_packet.h:167
#define GTPU_V1_VER
Definition: mobile.h:162
#define GTPU_VER_MASK
Definition: gtpu.h:68
static uword ip6_address_is_multicast(const ip6_address_t *a)
Definition: ip6_packet.h:121
vtep_table_t vtep_table
Definition: gtpu.h:214
ip_vxan_bypass_next_t
Definition: gtpu_decap.c:788
uword * gtpu4_tunnel_by_key
Definition: gtpu.h:209
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
static_always_inline u8 gtpu_err_code(u8 ip_err0, u8 udp_err0, u8 csum_err0)
Definition: gtpu_decap.c:1290
struct _vlib_node_registration vlib_node_registration_t
template key/value backing page structure
Definition: bihash_doc.h:44
Definition: defs.h:47
static u8 vlib_buffer_has_space(vlib_buffer_t *b, word l)
Check if there is enough space in buffer to advance.
Definition: buffer.h:265
u16 payload_length
Definition: ip6_packet.h:301
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip6_forward.c:1160
static void vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:236
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1279
uword * gtpu6_tunnel_by_key
Definition: gtpu.h:210
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
Definition: ip4_forward.c:1229
static uword gtpu_flow_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: gtpu_decap.c:1304
Bits Octets 8 7 6 5 4 3 2 1 1 Version PT (*) E S PN 2 Message Type 3 Length (1st Octet) 4 Length...
Definition: mobile.h:90
#define hash_get_mem(h, key)
Definition: hash.h:269
#define vnet_buffer(b)
Definition: buffer.h:417
gtpu_flow_error_t
Definition: gtpu_decap.c:1221
gtpu_tunnel_t * tunnels
Definition: gtpu.h:206
u8 ver_flags
Definition: mobile.h:92
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
u16 flags
Copy of main node flags.
Definition: node.h:511
static_always_inline u8 gtpu_check_ip_udp_len(vlib_buffer_t *b)
Definition: gtpu_decap.c:1279
u8 ip_version_and_header_length
Definition: ip4_packet.h:138
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:304
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
vlib_node_registration_t gtpu6_input_node
(constructor) VLIB_REGISTER_NODE (gtpu6_input_node)
Definition: gtpu_decap.c:768
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
#define foreach_gtpu_flow_error
Definition: gtpu_decap.c:1213
#define gtpu_local_csum_is_valid(_b)
Definition: gtpu_decap.c:1247
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:310
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)
Definition: ip6_forward.c:951