FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
load_balance_map.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @brief
17  */
18 #include <vnet/fib/fib_path.h>
19 #include <vnet/fib/fib_node_list.h>
21 #include <vnet/dpo/load_balance.h>
22 
23 /**
24  * A hash-table of load-balance maps by path index.
25  * this provides the fast lookup of the LB map when a path goes down
26  */
28 
29 /**
30  * A hash-table of load-balance maps by set of paths.
31  * This provides the LB map sharing.
32  * LB maps do not necessarily use all the paths in the list, since
33  * the entry that is requesting the map, may not have an out-going
34  * label for each of the paths.
35  */
37 
39 {
42 } __attribute__ ((packed)) load_balance_map_path_flags_t;
43 
44 typedef struct load_balance_map_path_t_ {
45  /**
46  * Index of the path
47  */
49 
50  /**
51  * Sibling Index in the list of all maps with this path index
52  */
54 
55  /**
56  * the normalised wegiht of the path
57  */
59 
60  /**
61  * The sate of the path
62  */
65 
66 /**
67  * The global pool of LB maps
68  */
70 
71 /**
72  * the logger
73  */
75 
76 /*
77  * Debug macro
78  */
79 #define LOAD_BALANCE_MAP_DBG(_pl, _fmt, _args...) \
80 { \
81  vlib_log_debug(load_balance_map_logger, \
82  "lbm:" _fmt, \
83  ##_args); \
84 }
85 
86 static index_t
88 {
89  return (lbm - load_balance_map_pool);
90 }
91 
92 u8*
93 format_load_balance_map (u8 *s, va_list * ap)
94 {
95  index_t lbmi = va_arg(*ap, index_t);
96  u32 indent = va_arg(*ap, u32);
97  load_balance_map_t *lbm;
98  u32 n_buckets, ii;
99 
100  lbm = load_balance_map_get(lbmi);
101  n_buckets = vec_len(lbm->lbm_buckets);
102 
103  s = format(s, "load-balance-map: index:%d buckets:%d", lbmi, n_buckets);
104  s = format(s, "\n%U index:", format_white_space, indent+2);
105  for (ii = 0; ii < n_buckets; ii++)
106  {
107  s = format(s, "%5d", ii);
108  }
109  s = format(s, "\n%U map:", format_white_space, indent+2);
110  for (ii = 0; ii < n_buckets; ii++)
111  {
112  s = format(s, "%5d", lbm->lbm_buckets[ii]);
113  }
114 
115  return (s);
116 }
117 
118 
119 static uword
121 {
122  u32 old_lbm_hash, new_lbm_hash, hash;
123  load_balance_map_path_t *lb_path;
124 
125  new_lbm_hash = old_lbm_hash = vec_len(lbm->lbm_paths);
126 
127  vec_foreach (lb_path, lbm->lbm_paths)
128  {
129  hash = lb_path->lbmp_index;
130  hash_mix32(hash, old_lbm_hash, new_lbm_hash);
131  }
132 
133  return (new_lbm_hash);
134 }
135 
138 {
139  return 1 + 2*index;
140 }
141 
144 {
145  return key & 1;
146 }
147 
150 {
152  return key / 2;
153 }
154 
155 static load_balance_map_t*
157 {
158  load_balance_map_t *lbm;
159 
161  {
162  index_t lbm_index;
163 
164  lbm_index = load_balance_map_db_hash_key_2_index(key);
165  lbm = load_balance_map_get(lbm_index);
166  }
167  else
168  {
169  lbm = uword_to_pointer (key, load_balance_map_t *);
170  }
171 
172  return (lbm);
173 }
174 
175 static uword
177  uword key)
178 {
179  load_balance_map_t *lbm;
180 
182 
183  return (load_balance_map_hash(lbm));
184 }
185 
186 static uword
188  uword key1,
189  uword key2)
190 {
191  load_balance_map_t *lbm1, *lbm2;
192 
195 
196  return (load_balance_map_hash(lbm1) ==
197  load_balance_map_hash(lbm2));
198 }
199 
200 static index_t
202 {
203  uword *p;
204 
205  p = hash_get(load_balance_map_db, lbm);
206 
207  if (NULL != p)
208  {
209  return p[0];
210  }
211 
212  return (FIB_NODE_INDEX_INVALID);
213 }
214 
215 static void
217 {
219  fib_node_list_t list;
220  uword *p;
221 
223 
224  /*
225  * insert into the DB based on the set of paths.
226  */
227  hash_set (load_balance_map_db,
231 
232  /*
233  * insert into each per-path list.
234  */
235  vec_foreach(lbmp, lbm->lbm_paths)
236  {
237  p = hash_get(lb_maps_by_path_index, lbmp->lbmp_index);
238 
239  if (NULL == p)
240  {
241  list = fib_node_list_create();
242  hash_set(lb_maps_by_path_index, lbmp->lbmp_index, list);
243  }
244  else
245  {
246  list = p[0];
247  }
248 
249  lbmp->lbmp_sibling =
253  }
254 
255  LOAD_BALANCE_MAP_DBG(lbm, "DB-inserted");
256 }
257 
258 static void
260 {
262  uword *p;
263 
265 
266  hash_unset(load_balance_map_db,
269 
270  /*
271  * remove from each per-path list.
272  */
273  vec_foreach(lbmp, lbm->lbm_paths)
274  {
275  p = hash_get(lb_maps_by_path_index, lbmp->lbmp_index);
276 
277  ALWAYS_ASSERT(NULL != p);
278 
279  fib_node_list_remove(p[0], lbmp->lbmp_sibling);
280  }
281 
282  LOAD_BALANCE_MAP_DBG(lbm, "DB-removed");
283 }
284 
285 /**
286  * @brief from the paths that are usable, fill the Map.
287  */
288 static void
290 {
292  u32 n_buckets, bucket, ii, jj;
293  u16 *tmp_buckets;
294 
295  tmp_buckets = NULL;
296  n_buckets = vec_len(lbm->lbm_buckets);
297 
298  /*
299  * run throught the set of paths once, and build a vector of the
300  * indices that are usable. we do this is a scratch space, since we
301  * need to refer to it multiple times as we build the real buckets.
302  */
303  vec_validate(tmp_buckets, n_buckets-1);
304 
305  bucket = jj = 0;
306  vec_foreach (lbmp, lbm->lbm_paths)
307  {
308  if (fib_path_is_resolved(lbmp->lbmp_index))
309  {
310  for (ii = 0; ii < lbmp->lbmp_weight; ii++)
311  {
312  tmp_buckets[jj++] = bucket++;
313  }
314  }
315  else
316  {
317  bucket += lbmp->lbmp_weight;
318  }
319  }
320  _vec_len(tmp_buckets) = jj;
321 
322  /*
323  * If the number of temporaries written is as many as we need, implying
324  * all paths were up, then we can simply copy the scratch area over the
325  * actual buckets' memory
326  */
327  if (jj == n_buckets)
328  {
329  memcpy(lbm->lbm_buckets,
330  tmp_buckets,
331  sizeof(lbm->lbm_buckets[0]) * n_buckets);
332  }
333  else
334  {
335  /*
336  * one or more paths are down.
337  */
338  if (0 == vec_len(tmp_buckets))
339  {
340  /*
341  * if the scratch area is empty, then no paths are usable.
342  * they will all drop. so use them all, lest we account drops
343  * against only one.
344  */
345  for (bucket = 0; bucket < n_buckets; bucket++)
346  {
347  lbm->lbm_buckets[bucket] = bucket;
348  }
349  }
350  else
351  {
352  bucket = jj = 0;
353  vec_foreach (lbmp, lbm->lbm_paths)
354  {
355  if (fib_path_is_resolved(lbmp->lbmp_index))
356  {
357  for (ii = 0; ii < lbmp->lbmp_weight; ii++)
358  {
359  lbm->lbm_buckets[bucket] = bucket;
360  bucket++;
361  }
362  }
363  else
364  {
365  /*
366  * path is unusable
367  * cycle through the scratch space selecting a index.
368  * this means we load balance, in the intended ratio,
369  * over the paths that are still usable.
370  */
371  for (ii = 0; ii < lbmp->lbmp_weight; ii++)
372  {
373  lbm->lbm_buckets[bucket] = tmp_buckets[jj];
374  jj = (jj + 1) % vec_len(tmp_buckets);
375  bucket++;
376  }
377  }
378  }
379  }
380  }
381 
382  vec_free(tmp_buckets);
383 }
384 
385 static load_balance_map_t*
387 {
388  load_balance_map_t *lbm;
389  u32 ii;
390  vlib_main_t *vm;
391  u8 did_barrier_sync;
392 
393  dpo_pool_barrier_sync (vm, load_balance_map_pool, did_barrier_sync);
394  pool_get_aligned(load_balance_map_pool, lbm, CLIB_CACHE_LINE_BYTES);
395  dpo_pool_barrier_release (vm, did_barrier_sync);
396 
397  clib_memset(lbm, 0, sizeof(*lbm));
398 
399  vec_validate(lbm->lbm_paths, vec_len(paths)-1);
400 
401  vec_foreach_index(ii, paths)
402  {
403  lbm->lbm_paths[ii].lbmp_index = paths[ii].path_index;
404  lbm->lbm_paths[ii].lbmp_weight = paths[ii].path_weight;
405  }
406 
407  return (lbm);
408 }
409 
410 static load_balance_map_t *
412  u32 n_buckets,
413  u32 sum_of_weights)
414 {
415  lbm->lbm_sum_of_norm_weights = sum_of_weights;
416  vec_validate(lbm->lbm_buckets, n_buckets-1);
417 
419 
421 
422  load_balance_map_logger =
423  vlib_log_register_class ("dpo", "load-balance-map");
424 
425  return (lbm);
426 }
427 
428 static void
430 {
431  vec_free(lbm->lbm_paths);
432  vec_free(lbm->lbm_buckets);
433  pool_put(load_balance_map_pool, lbm);
434 }
435 
436 index_t
438  u32 sum_of_weights,
439  const load_balance_path_t *paths)
440 {
441  load_balance_map_t *tmp, *lbm;
442  index_t lbmi;
443 
444  tmp = load_balance_map_alloc(paths);
445 
446  lbmi = load_balance_map_db_find(tmp);
447 
448  if (INDEX_INVALID == lbmi)
449  {
450  lbm = load_balance_map_init(tmp, n_buckets, sum_of_weights);
451  }
452  else
453  {
454  lbm = load_balance_map_get(lbmi);
456  }
457 
458  lbm->lbm_locks++;
459 
460  return (load_balance_map_get_index(lbm));
461 }
462 
463 void
465 {
466  load_balance_map_t *lbm;
467 
468  lbm = load_balance_map_get(lbmi);
469 
470  lbm->lbm_locks++;
471 }
472 
473 void
475 {
476  load_balance_map_t *lbm;
477 
478  if (INDEX_INVALID == lbmi)
479  {
480  return;
481  }
482 
483  lbm = load_balance_map_get(lbmi);
484 
485  lbm->lbm_locks--;
486 
487  if (0 == lbm->lbm_locks)
488  {
491  }
492 }
493 
494 static int
496  void *ctx)
497 {
498  load_balance_map_t *lbm;
499 
500  lbm = load_balance_map_get(fptr->fnp_index);
501 
503 
504  return (!0);
505 }
506 
507 /**
508  * @brief the state of a path has changed (it has no doubt gone down).
509  * This is the trigger to perform a PIC edge cutover and update the maps
510  * to exclude this path.
511  */
512 void
514 {
515  uword *p;
516 
517  /*
518  * re-stripe the buckets for each affect MAP
519  */
520  p = hash_get(lb_maps_by_path_index, path_index);
521 
522  if (NULL == p)
523  return;
524 
526 }
527 
528 /**
529  * @brief Make/add a new or lock an existing Load-balance map
530  */
531 void
533 {
534  load_balance_map_db =
535  hash_create2 (/* elts */ 0,
536  /* user */ 0,
537  /* value_bytes */ sizeof (index_t),
540  /* format pair/arg */
541  0, 0);
542 
543  lb_maps_by_path_index = hash_create(0, sizeof(fib_node_list_t));
544 }
545 
546 void
548 {
549  fib_show_memory_usage("Load-Balance Map",
550  pool_elts(load_balance_map_pool),
551  pool_len(load_balance_map_pool),
552  sizeof(load_balance_map_t));
553 }
554 
555 static clib_error_t *
557  unformat_input_t * input,
558  vlib_cli_command_t * cmd)
559 {
560  index_t lbmi = INDEX_INVALID;
561 
563  {
564  if (unformat (input, "%d", &lbmi))
565  ;
566  else
567  break;
568  }
569 
570  if (INDEX_INVALID != lbmi)
571  {
572  vlib_cli_output (vm, "%U", format_load_balance_map, lbmi, 0);
573  }
574  else
575  {
576  load_balance_map_t *lbm;
577 
578  pool_foreach(lbm, load_balance_map_pool,
579  ({
582  }));
583  }
584 
585  return 0;
586 }
587 
588 VLIB_CLI_COMMAND (load_balance_map_show_command, static) = {
589  .path = "show load-balance-map",
590  .short_help = "show load-balance-map [<index>]",
591  .function = load_balance_map_show,
592 };
vlib_log_class_t vlib_log_register_class(char *class, char *subclass)
Definition: log.c:176
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
int fib_path_is_resolved(fib_node_index_t path_index)
Definition: fib_path.c:2684
void load_balance_map_unlock(index_t lbmi)
#define vec_foreach_index(var, v)
Iterate over vector indices.
struct load_balance_map_path_t_ * lbm_paths
the vector of paths this MAP represents
index_t load_balance_map_add_or_lock(u32 n_buckets, u32 sum_of_weights, const load_balance_path_t *paths)
vlib_log_class_t load_balance_map_logger
the logger
#define hash_set(h, key, value)
Definition: hash.h:255
fib_node_index_t path_index
The index of the FIB path.
Definition: load_balance.h:71
static index_t load_balance_map_get_index(load_balance_map_t *lbm)
static uword load_balance_map_db_hash_key_2_index(uword key)
#define hash_unset(h, key)
Definition: hash.h:261
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
void load_balance_map_path_state_change(fib_node_index_t path_index)
the state of a path has changed (it has no doubt gone down).
u32 index_t
A Data-Path Object is an object that represents actions that are applied to packets are they are swit...
Definition: dpo.h:41
enum load_balance_map_path_flags_t_ load_balance_map_path_flags_t
static int load_balance_map_path_state_change_walk(fib_node_ptr_t *fptr, void *ctx)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
void fib_node_list_walk(fib_node_list_t list, fib_node_list_walk_cb_t fn, void *args)
Walk the list of node.
unsigned char u8
Definition: types.h:56
#define pool_len(p)
Number of elements in pool vector.
Definition: pool.h:140
static load_balance_map_t * load_balance_map_get(index_t lbmi)
static index_t load_balance_map_db_find(load_balance_map_t *lbm)
u32 vlib_log_class_t
Definition: vlib.h:51
fib_node_index_t fnp_index
node&#39;s index
Definition: fib_node.h:197
void fib_node_list_remove(fib_node_list_t list, u32 sibling)
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:493
void load_balance_map_module_init(void)
Make/add a new or lock an existing Load-balance map.
#define always_inline
Definition: clib.h:99
static uword load_balance_map_db_hash_key_is_index(uword key)
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
static uword load_balance_map_db_hash_key_sum(hash_t *h, uword key)
u32 lbm_sum_of_norm_weights
the sum of the normalised weights.
fib_node_index_t lbmp_index
Index of the path.
void fib_show_memory_usage(const char *name, u32 in_use_elts, u32 allocd_elts, size_t size_elt)
Show the memory usage for a type.
Definition: fib_node.c:220
static uword load_balance_map_db_hash_key_from_index(uword index)
#define ALWAYS_ASSERT(truth)
unsigned int u32
Definition: types.h:88
A representation of one pointer to another node.
Definition: fib_node.h:189
load_balance_map_t * load_balance_map_pool
The global pool of LB maps.
#define hash_get(h, key)
Definition: hash.h:249
long ctx[MAX_CONNS]
Definition: main.c:144
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:286
static clib_error_t * load_balance_map_show(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
#define LOAD_BALANCE_MAP_DBG(_pl, _fmt, _args...)
u32 fib_node_list_push_front(fib_node_list_t list, int owner_id, fib_node_type_t type, fib_node_index_t index)
Insert an element at the from of the list.
u8 * format_load_balance_map(u8 *s, va_list *ap)
#define dpo_pool_barrier_release(VM, YESNO)
Release barrier sync after dpo pool expansion.
Definition: dpo.h:542
struct load_balance_map_path_t_ load_balance_map_path_t
load_balance_map_path_flags_t lbmp_flags
The sate of the path.
void load_balance_map_lock(index_t lbmi)
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
Definition: pool.h:230
static load_balance_map_t * load_balance_map_db_get_from_hash_key(uword key)
#define UNFORMAT_END_OF_INPUT
Definition: format.h:145
load_balance_map_path_flags_t_
static uword load_balance_map_db_hash_key_equal(hash_t *h, uword key1, uword key2)
vlib_main_t * vm
Definition: buffer.c:323
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
void load_balance_map_show_mem(void)
static uword * lb_maps_by_path_index
A hash-table of load-balance maps by path index.
#define hash_mix32(a0, b0, c0)
Definition: hash.h:539
static void load_balance_map_db_insert(load_balance_map_t *lbm)
static load_balance_map_t * load_balance_map_alloc(const load_balance_path_t *paths)
u32 fib_node_index_t
A typedef of a node index.
Definition: fib_types.h:30
static void load_balance_map_destroy(load_balance_map_t *lbm)
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:161
#define hash_create(elts, value_bytes)
Definition: hash.h:696
#define uword_to_pointer(u, type)
Definition: types.h:136
#define ASSERT(truth)
fib_node_list_t fib_node_list_create(void)
Create a new node list.
u16 * lbm_buckets
The buckets of the map that provide the index to index translation.
static void load_balance_map_db_remove(load_balance_map_t *lbm)
static load_balance_map_t * load_balance_map_init(load_balance_map_t *lbm, u32 n_buckets, u32 sum_of_weights)
vl_api_mfib_path_t paths[n_paths]
Definition: ip.api:458
u32 lbmp_weight
the normalised wegiht of the path
#define hash_create2(_elts, _user, _value_bytes, _key_sum, _key_equal, _format_pair, _format_pair_arg)
Definition: hash.h:494
#define FIB_NODE_INDEX_INVALID
Definition: fib_types.h:31
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 path_weight
weight for the path.
Definition: load_balance.h:76
#define INDEX_INVALID
Invalid index - used when no index is known blazoned capitals INVALID speak volumes where ~0 does not...
Definition: dpo.h:47
u64 uword
Definition: types.h:112
typedef key
Definition: ipsec.api:247
fib_node_index_t lbmp_sibling
Sibling Index in the list of all maps with this path index.
One path from an [EU]CMP set that the client wants to add to a load-balance object.
Definition: load_balance.h:62
#define dpo_pool_barrier_sync(VM, P, YESNO)
Barrier sync if a dpo pool is about to expand.
Definition: dpo.h:518
u32 fib_node_list_t
A list of FIB nodes.
Definition: fib_node.h:203
static uword load_balance_map_hash(load_balance_map_t *lbm)
#define vec_foreach(var, vec)
Vector iterator.
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static uword * load_balance_map_db
A hash-table of load-balance maps by set of paths.
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:772
u32 lbm_locks
Number of locks.
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:171
static void load_balance_map_fill(load_balance_map_t *lbm)
from the paths that are usable, fill the Map.
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128