26 rv = alloc_arena_next (h);
27 alloc_arena_next (h) += nbytes;
29 if (rv >= alloc_arena_size (h))
32 return (
void *) (
uword) (rv + alloc_arena (h));
41 nbuckets = 1 << (
max_log2 (nbuckets));
43 h->name = (
u8 *) name;
44 h->nbuckets = nbuckets;
45 h->log2_nbuckets =
max_log2 (nbuckets);
53 ASSERT (memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS));
56 alloc_arena_next (h) = 0;
59 bucket_size = nbuckets *
sizeof (h->buckets[0]);
69 #if !defined (MFD_ALLOW_SEALING) 70 #define MFD_ALLOW_SEALING 0x0002U 73 void BV (clib_bihash_master_init_svm)
81 ASSERT (memory_size < (1ULL << 32));
89 if (ftruncate (fd, memory_size) < 0)
99 mmap_addr = mmap (0, memory_size,
100 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
102 if (mmap_addr == MAP_FAILED)
108 h->sh = (
void *) mmap_addr;
110 nbuckets = 1 << (
max_log2 (nbuckets));
112 h->name = (
u8 *) name;
113 h->sh->nbuckets = h->nbuckets = nbuckets;
114 h->log2_nbuckets =
max_log2 (nbuckets);
116 alloc_arena (h) = (
u64) (
uword) mmap_addr;
120 bucket_size = nbuckets *
sizeof (h->buckets[0]);
125 h->alloc_lock[0] = 0;
127 h->sh->alloc_lock_as_u64 =
132 BIHASH_FREELIST_LENGTH *
sizeof (
u64));
133 freelist_vh->
len = BIHASH_FREELIST_LENGTH;
135 h->sh->freelists_as_u64 =
137 h->freelists = (
void *) (freelist_vh->
vector_data);
142 void BV (clib_bihash_slave_init_svm)
143 (
BVT (clib_bihash) *
h,
char *
name,
int fd)
147 BVT (clib_bihash_shared_header) * sh;
150 mmap_addr = mmap (0, 4096, PROT_READ, MAP_SHARED, fd, 0 );
151 if (mmap_addr == MAP_FAILED)
157 sh = (
BVT (clib_bihash_shared_header) *) mmap_addr;
159 memory_size = sh->alloc_arena_size;
161 munmap (mmap_addr, 4096);
164 mmap_addr = mmap (0, memory_size,
165 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
167 if (mmap_addr == MAP_FAILED)
175 h->sh = (
void *) mmap_addr;
176 alloc_arena (h) = (
u64) (
uword) mmap_addr;
179 h->name = (
u8 *) name;
181 h->nbuckets = h->sh->nbuckets;
182 h->log2_nbuckets =
max_log2 (h->nbuckets);
190 void BV (clib_bihash_set_kvp_format_fn) (
BVT (clib_bihash) *
h,
199 #if BIHASH_32_64_SVM == 0 203 (void) close (h->memfd);
215 ASSERT (h->alloc_lock[0]);
228 h->freelists[
log2_pages] = rv->next_free_as_u64;
245 ASSERT (h->alloc_lock[0]);
260 BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
263 int log2_working_copy_length;
265 ASSERT (h->alloc_lock[0]);
267 if (thread_index >=
vec_len (h->working_copies))
278 working_copy = h->working_copies[thread_index];
279 log2_working_copy_length = h->working_copy_lengths[thread_index];
281 h->saved_bucket.as_u64 = b->as_u64;
283 if (b->log2_pages > log2_working_copy_length)
291 (
h,
sizeof (working_copy[0]) * (1 << b->log2_pages));
292 h->working_copy_lengths[thread_index] = b->log2_pages;
293 h->working_copies[thread_index] = working_copy;
299 working_bucket.as_u64 = b->as_u64;
302 b->as_u64 = working_bucket.as_u64;
303 h->working_copies[thread_index] = working_copy;
309 (
BVT (clib_bihash) *
h,
314 int i, j, length_in_kvs;
316 ASSERT (h->alloc_lock[0]);
318 new_values = BV (value_alloc) (
h, new_log2_pages);
319 length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
321 for (i = 0; i < length_in_kvs; i++)
326 if (BV (clib_bihash_is_free) (&(old_values->kvp[
i])))
330 new_hash = BV (clib_bihash_hash) (&(old_values->kvp[
i]));
331 new_hash >>= h->log2_nbuckets;
332 new_hash &= (1 << new_log2_pages) - 1;
333 new_v = &new_values[new_hash];
339 if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
342 sizeof (new_v->kvp[j]));
347 BV (value_free) (
h, new_values, new_log2_pages);
358 (
BVT (clib_bihash) *
h,
363 int i, j, new_length, old_length;
365 ASSERT (h->alloc_lock[0]);
367 new_values = BV (value_alloc) (
h, new_log2_pages);
368 new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
369 old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
373 for (i = 0; i < old_length; i++)
376 for (; j < new_length; j++)
379 if (BV (clib_bihash_is_free) (&(old_values->kvp[
i])))
383 if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
387 sizeof (new_values->kvp[j]));
394 BV (value_free) (
h, new_values, new_log2_pages);
402 static inline int BV (clib_bihash_add_del_inline)
403 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
int is_add,
404 int (*is_stale_cb) (
BVT (clib_bihash_kv) *,
void *),
void *arg)
407 BVT (clib_bihash_bucket) * b, tmp_b;
411 u32 new_log2_pages, old_log2_pages;
413 int mark_bucket_linear;
416 hash = BV (clib_bihash_hash) (add_v);
418 bucket_index = hash & (h->nbuckets - 1);
419 b = &h->buckets[bucket_index];
421 hash >>= h->log2_nbuckets;
423 BV (clib_bihash_lock_bucket) (b);
426 if (BV (clib_bihash_bucket_is_empty) (b))
430 BV (clib_bihash_unlock_bucket) (b);
434 BV (clib_bihash_alloc_lock) (
h);
435 v = BV (value_alloc) (
h, 0);
436 BV (clib_bihash_alloc_unlock) (
h);
444 b->as_u64 = tmp_b.as_u64;
452 v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
453 if (b->linear_search)
454 limit <<= b->log2_pages;
469 for (i = 0; i < limit; i++)
471 if (BV (clib_bihash_key_compare) (v->kvp[
i].key, add_v->key))
475 BV (clib_bihash_unlock_bucket) (b);
482 for (i = 0; i < limit; i++)
484 if (BV (clib_bihash_is_free) (&(v->kvp[
i])))
491 &add_v->value, sizeof (add_v->value));
494 sizeof (add_v->key));
497 BV (clib_bihash_unlock_bucket) (b);
504 for (i = 0; i < limit; i++)
506 if (is_stale_cb (&(v->kvp[i]), arg))
510 BV (clib_bihash_unlock_bucket) (b);
519 for (i = 0; i < limit; i++)
522 if (BV (clib_bihash_key_compare) (v->kvp[
i].key, add_v->key))
524 clib_memset (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
529 BV (clib_bihash_unlock_bucket) (b);
535 tmp_b.as_u64 = b->as_u64;
542 BV (clib_bihash_alloc_lock) (
h);
545 BV (value_free) (
h, v, tmp_b.log2_pages);
546 BV (clib_bihash_alloc_unlock) (
h);
552 BV (clib_bihash_unlock_bucket) (b);
557 BV (clib_bihash_alloc_lock) (
h);
562 old_log2_pages = h->saved_bucket.log2_pages;
563 new_log2_pages = old_log2_pages + 1;
564 mark_bucket_linear = 0;
566 working_copy = h->working_copies[thread_index];
587 mark_bucket_linear = 1;
593 new_hash = BV (clib_bihash_hash) (add_v);
595 if (mark_bucket_linear)
596 limit <<= new_log2_pages;
597 new_hash >>= h->log2_nbuckets;
598 new_hash &= (1 << new_log2_pages) - 1;
599 new_v += mark_bucket_linear ? 0 : new_hash;
601 for (i = 0; i < limit; i++)
603 if (BV (clib_bihash_is_free) (&(new_v->kvp[
i])))
611 BV (value_free) (
h, save_new_v, new_log2_pages);
622 tmp_b.log2_pages = new_log2_pages;
624 tmp_b.linear_search = mark_bucket_linear;
625 tmp_b.refcnt = h->saved_bucket.refcnt + 1;
626 ASSERT (tmp_b.refcnt > 0);
629 b->as_u64 = tmp_b.as_u64;
632 BV (value_free) (
h, v, h->saved_bucket.log2_pages);
633 BV (clib_bihash_alloc_unlock) (
h);
638 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
int is_add)
640 return BV (clib_bihash_add_del_inline) (
h, add_v,
is_add, 0, 0);
643 int BV (clib_bihash_add_or_overwrite_stale)
644 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
645 int (*stale_callback) (
BVT (clib_bihash_kv) *,
void *),
void *arg)
647 return BV (clib_bihash_add_del_inline) (
h, add_v, 1, stale_callback, arg);
650 int BV (clib_bihash_search)
651 (
BVT (clib_bihash) *
h,
652 BVT (clib_bihash_kv) * search_key,
BVT (clib_bihash_kv) * valuep)
657 BVT (clib_bihash_bucket) * b;
662 hash = BV (clib_bihash_hash) (search_key);
664 bucket_index = hash & (h->nbuckets - 1);
665 b = &h->buckets[bucket_index];
667 if (BV (clib_bihash_bucket_is_empty) (b))
672 volatile BVT (clib_bihash_bucket) * bv = b;
677 hash >>= h->log2_nbuckets;
681 v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
683 limit <<= b->log2_pages;
685 for (i = 0; i < limit; i++)
687 if (BV (clib_bihash_key_compare) (v->kvp[
i].key, search_key->key))
696 u8 *BV (format_bihash) (
u8 * s, va_list * args)
698 BVT (clib_bihash) * h = va_arg (*args,
BVT (clib_bihash) *);
699 int verbose = va_arg (*args,
int);
700 BVT (clib_bihash_bucket) * b;
703 u64 active_elements = 0;
704 u64 active_buckets = 0;
705 u64 linear_buckets = 0;
708 s =
format (s,
"Hash table %s\n", h->name ? h->name : (
u8 *)
"(unnamed)");
710 for (i = 0; i < h->nbuckets; i++)
713 if (BV (clib_bihash_bucket_is_empty) (b))
716 s =
format (s,
"[%d]: empty\n", i);
722 if (b->linear_search)
727 s =
format (s,
"[%d]: heap offset %lld, len %d, linear %d\n", i,
728 b->offset, (1 << b->log2_pages), b->linear_search);
732 for (j = 0; j < (1 << b->log2_pages); j++)
736 if (BV (clib_bihash_is_free) (&v->kvp[k]))
739 s =
format (s,
" %d: empty\n",
740 j * BIHASH_KVP_PER_PAGE + k);
747 s =
format (s,
" %d: %U\n",
748 j * BIHASH_KVP_PER_PAGE + k,
749 h->fmt_fn, &(v->kvp[k]));
753 s =
format (s,
" %d: %U\n",
754 j * BIHASH_KVP_PER_PAGE + k,
755 BV (format_bihash_kvp), &(v->kvp[k]));
764 s =
format (s,
" %lld active elements %lld active buckets\n",
765 active_elements, active_buckets);
768 for (i = 0; i <
vec_len (h->freelists); i++)
772 u64 free_elt_as_u64 = h->freelists[
i];
774 while (free_elt_as_u64)
778 free_elt_as_u64 = free_elt->next_free_as_u64;
781 if (nfree || verbose)
782 s =
format (s,
" [len %d] %u free elts\n", 1 << i, nfree);
785 s =
format (s,
" %lld linear search buckets\n", linear_buckets);
786 used_bytes = alloc_arena_next (h);
788 " arena: base %llx, next %llx\n" 789 " used %lld b (%lld Mbytes) of %lld b (%lld Mbytes)\n",
790 alloc_arena (h), alloc_arena_next (h),
791 used_bytes, used_bytes >> 20,
792 alloc_arena_size (h), alloc_arena_size (h) >> 20);
797 (
BVT (clib_bihash) *
h,
void *callback,
void *arg)
800 BVT (clib_bihash_bucket) * b;
802 void (*fp) (
BVT (clib_bihash_kv) *,
void *) = callback;
804 for (i = 0; i < h->nbuckets; i++)
807 if (BV (clib_bihash_bucket_is_empty) (b))
811 for (j = 0; j < (1 << b->log2_pages); j++)
815 if (BV (clib_bihash_is_free) (&v->kvp[k]))
818 (*fp) (&v->kvp[k], arg);
822 if (BV (clib_bihash_bucket_is_empty) (b))
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
#define BIHASH_KVP_PER_PAGE
void clib_bihash_free(clib_bihash *h)
Destroy a bounded index extensible hash table.
#define clib_memcpy_fast(a, b, c)
static int memfd_create(const char *name, unsigned int flags)
void os_out_of_memory(void)
for(i=1;i<=collision_buckets;i++)
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
#define MFD_ALLOW_SEALING
int clib_bihash_add_del(clib_bihash *h, clib_bihash_kv *add_v, int is_add)
Add or delete a (key,value) pair from a bi-hash table.
static uword clib_bihash_get_offset(clib_bihash *h, void *v)
Get clib mheap offset given a pointer.
static vnet_classify_entry_t * split_and_rehash_linear(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
void clib_bihash_init(clib_bihash *h, char *name, u32 nbuckets, uword memory_size)
initialize a bounded index extensible hash table
void clib_bihash_foreach_key_value_pair(clib_bihash *h, void *callback, void *arg)
Visit active (key,value) pairs in a bi-hash table.
static vnet_classify_entry_t * split_and_rehash(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
#define vec_free(V)
Free vector's memory (no header).
#define clib_warning(format, args...)
static void make_working_copy(vnet_classify_table_t *t, vnet_classify_bucket_t *b)
template key/value backing page structure
static void clib_mem_vm_free(void *addr, uword size)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
#define clib_unix_warning(format, args...)
static_always_inline uword os_get_thread_index(void)
#define CLIB_MEMORY_BARRIER()
static void * clib_bihash_get_value(clib_bihash *h, uword offset)
Get pointer to value page given its clib mheap offset.
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
#define CLIB_CACHE_LINE_BYTES
static void * clib_mem_vm_alloc(uword size)
static void * alloc_aligned(uword size, uword log2_align, void **ptr_to_free)