blob: 38354a129921d942925562e8ba037f1b39a2e49e [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
Chris Luke16bcf7d2016-09-01 14:31:46 -040016/** @cond DOCUMENTATION_IS_IN_BIHASH_DOC_H */
Dave Barachdd3a57f2016-07-27 16:58:51 -040017
Dave Barach16e4a4a2020-04-16 12:00:14 -040018#ifndef MAP_HUGE_SHIFT
19#define MAP_HUGE_SHIFT 26
20#endif
21
Damjan Marion2454de22020-09-26 19:32:34 +020022#ifndef BIIHASH_MIN_ALLOC_LOG2_PAGES
23#define BIIHASH_MIN_ALLOC_LOG2_PAGES 10
24#endif
25
Damjan Marion4a251d02021-05-06 17:28:12 +020026#ifndef BIHASH_USE_HEAP
27#define BIHASH_USE_HEAP 1
28#endif
29
Dave Barach97f5af02018-02-22 09:48:45 -050030static inline void *BV (alloc_aligned) (BVT (clib_bihash) * h, uword nbytes)
31{
32 uword rv;
33
34 /* Round to an even number of cache lines */
Damjan Marion2454de22020-09-26 19:32:34 +020035 nbytes = round_pow2 (nbytes, CLIB_CACHE_LINE_BYTES);
36
37 if (BIHASH_USE_HEAP)
38 {
39 void *rv, *oldheap;
40 uword page_sz = sizeof (BVT (clib_bihash_value));
41 uword chunk_sz = round_pow2 (page_sz << BIIHASH_MIN_ALLOC_LOG2_PAGES,
42 CLIB_CACHE_LINE_BYTES);
43
44 BVT (clib_bihash_alloc_chunk) * chunk = h->chunks;
45
46 /* if there is enough space in the currenrt chunk */
47 if (chunk && chunk->bytes_left >= nbytes)
48 {
49 rv = chunk->next_alloc;
50 chunk->bytes_left -= nbytes;
51 chunk->next_alloc += nbytes;
52 return rv;
53 }
54
55 /* requested allocation is bigger than chunk size */
56 if (nbytes >= chunk_sz)
57 {
58 oldheap = clib_mem_set_heap (h->heap);
59 chunk = clib_mem_alloc_aligned (nbytes + sizeof (*chunk),
60 CLIB_CACHE_LINE_BYTES);
61 clib_mem_set_heap (oldheap);
62 clib_memset_u8 (chunk, 0, sizeof (*chunk));
63 chunk->size = nbytes;
64 rv = (u8 *) (chunk + 1);
65 if (h->chunks)
66 {
67 /* take 2nd place in the list */
68 chunk->next = h->chunks->next;
69 chunk->prev = h->chunks;
70 h->chunks->next = chunk;
71 if (chunk->next)
72 chunk->next->prev = chunk;
73 }
74 else
75 h->chunks = chunk;
76
77 return rv;
78 }
79
80 oldheap = clib_mem_set_heap (h->heap);
81 chunk = clib_mem_alloc_aligned (chunk_sz + sizeof (*chunk),
82 CLIB_CACHE_LINE_BYTES);
83 clib_mem_set_heap (oldheap);
84 chunk->size = chunk_sz;
85 chunk->bytes_left = chunk_sz;
86 chunk->next_alloc = (u8 *) (chunk + 1);
87 chunk->next = h->chunks;
88 chunk->prev = 0;
89 if (chunk->next)
90 chunk->next->prev = chunk;
91 h->chunks = chunk;
92 rv = chunk->next_alloc;
93 chunk->bytes_left -= nbytes;
94 chunk->next_alloc += nbytes;
95 return rv;
96 }
Dave Barach97f5af02018-02-22 09:48:45 -050097
Dave Barach9466c452018-08-24 17:21:14 -040098 rv = alloc_arena_next (h);
99 alloc_arena_next (h) += nbytes;
Dave Barach97f5af02018-02-22 09:48:45 -0500100
Andreas Schultzb4b525e2019-07-19 11:14:50 +0200101 if (alloc_arena_next (h) > alloc_arena_size (h))
Dave Barach97f5af02018-02-22 09:48:45 -0500102 os_out_of_memory ();
103
Dave Barach16e4a4a2020-04-16 12:00:14 -0400104 if (alloc_arena_next (h) > alloc_arena_mapped (h))
105 {
106 void *base, *rv;
107 uword alloc = alloc_arena_next (h) - alloc_arena_mapped (h);
108 int mmap_flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS;
Damjan Marione6db7752020-05-24 20:43:10 +0200109 int mmap_flags_huge = (mmap_flags | MAP_HUGETLB | MAP_LOCKED |
Dave Barach16e4a4a2020-04-16 12:00:14 -0400110 BIHASH_LOG2_HUGEPAGE_SIZE << MAP_HUGE_SHIFT);
111
112 /* new allocation is 25% of existing one */
113 if (alloc_arena_mapped (h) >> 2 > alloc)
114 alloc = alloc_arena_mapped (h) >> 2;
115
116 /* round allocation to page size */
117 alloc = round_pow2 (alloc, 1 << BIHASH_LOG2_HUGEPAGE_SIZE);
118
119 base = (void *) (uword) (alloc_arena (h) + alloc_arena_mapped (h));
120
121 rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags_huge, -1, 0);
122
123 /* fallback - maybe we are still able to allocate normal pages */
Damjan Marion6183cf42020-05-27 16:43:35 +0200124 if (rv == MAP_FAILED || mlock (base, alloc) != 0)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400125 rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
126
127 if (rv == MAP_FAILED)
128 os_out_of_memory ();
129
130 alloc_arena_mapped (h) += alloc;
131 }
132
Dave Barachffb14b92018-09-11 17:20:23 -0400133 return (void *) (uword) (rv + alloc_arena (h));
Dave Barach97f5af02018-02-22 09:48:45 -0500134}
135
Vijayabhaskar Katamreddyf0bae642020-01-15 13:45:19 -0800136static void BV (clib_bihash_instantiate) (BVT (clib_bihash) * h)
Dave Barach32dcd3b2019-07-08 12:25:38 -0400137{
138 uword bucket_size;
139
Damjan Marion2454de22020-09-26 19:32:34 +0200140 if (BIHASH_USE_HEAP)
141 {
142 h->heap = clib_mem_get_heap ();
143 h->chunks = 0;
144 alloc_arena (h) = (uword) clib_mem_get_heap_base (h->heap);
145 }
146 else
147 {
148 alloc_arena (h) = clib_mem_vm_reserve (0, h->memory_size,
149 BIHASH_LOG2_HUGEPAGE_SIZE);
150 if (alloc_arena (h) == ~0)
151 os_out_of_memory ();
152 alloc_arena_next (h) = 0;
153 alloc_arena_size (h) = h->memory_size;
154 alloc_arena_mapped (h) = 0;
155 }
Dave Barach32dcd3b2019-07-08 12:25:38 -0400156
157 bucket_size = h->nbuckets * sizeof (h->buckets[0]);
Dave Barach16e4a4a2020-04-16 12:00:14 -0400158
159 if (BIHASH_KVP_AT_BUCKET_LEVEL)
160 bucket_size +=
161 h->nbuckets * BIHASH_KVP_PER_PAGE * sizeof (BVT (clib_bihash_kv));
162
Dave Barach32dcd3b2019-07-08 12:25:38 -0400163 h->buckets = BV (alloc_aligned) (h, bucket_size);
Damjan Marion2454de22020-09-26 19:32:34 +0200164 clib_memset_u8 (h->buckets, 0, bucket_size);
Dave Barach16e4a4a2020-04-16 12:00:14 -0400165
166 if (BIHASH_KVP_AT_BUCKET_LEVEL)
167 {
Dave Barachb9c8c572023-03-16 13:03:47 -0400168 int i, j;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400169 BVT (clib_bihash_bucket) * b;
170
171 b = h->buckets;
172
173 for (i = 0; i < h->nbuckets; i++)
174 {
Dave Barachb9c8c572023-03-16 13:03:47 -0400175 BVT (clib_bihash_kv) * v;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400176 b->offset = BV (clib_bihash_get_offset) (h, (void *) (b + 1));
177 b->refcnt = 1;
178 /* Mark all elements free */
Dave Barachb9c8c572023-03-16 13:03:47 -0400179 v = (void *) (b + 1);
180 for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
181 {
182 BV (clib_bihash_mark_free) (v);
183 v++;
184 }
Dave Barach16e4a4a2020-04-16 12:00:14 -0400185 /* Compute next bucket start address */
186 b = (void *) (((uword) b) + sizeof (*b) +
187 (BIHASH_KVP_PER_PAGE *
188 sizeof (BVT (clib_bihash_kv))));
189 }
190 }
Damjan Marion801ec2a2020-04-21 19:42:30 +0200191 CLIB_MEMORY_STORE_BARRIER ();
Dave Barach67d09e02019-08-01 08:15:01 -0400192 h->instantiated = 1;
Dave Barach32dcd3b2019-07-08 12:25:38 -0400193}
Dave Barach97f5af02018-02-22 09:48:45 -0500194
Dave Barachbdf9b972019-09-03 10:57:19 -0400195void BV (clib_bihash_init2) (BVT (clib_bihash_init2_args) * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700196{
Dave Barach32dcd3b2019-07-08 12:25:38 -0400197 int i;
198 void *oldheap;
Dave Barachbdf9b972019-09-03 10:57:19 -0400199 BVT (clib_bihash) * h = a->h;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700200
Dave Barachbdf9b972019-09-03 10:57:19 -0400201 a->nbuckets = 1 << (max_log2 (a->nbuckets));
202
203 h->name = (u8 *) a->name;
204 h->nbuckets = a->nbuckets;
205 h->log2_nbuckets = max_log2 (a->nbuckets);
Damjan Marion2454de22020-09-26 19:32:34 +0200206 h->memory_size = BIHASH_USE_HEAP ? 0 : a->memory_size;
Dave Barach67d09e02019-08-01 08:15:01 -0400207 h->instantiated = 0;
Nathan Skrzypczaka8c720e2021-08-06 12:03:11 +0200208 h->dont_add_to_all_bihash_list = a->dont_add_to_all_bihash_list;
Damjan Marionf2b4a372020-09-30 14:15:24 +0200209 h->fmt_fn = BV (format_bihash);
210 h->kvp_fmt_fn = a->kvp_fmt_fn;
Dave Barachbdf9b972019-09-03 10:57:19 -0400211
Dave Barach32dcd3b2019-07-08 12:25:38 -0400212 alloc_arena (h) = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700213
Dave Barach508498f2018-07-19 12:11:16 -0400214 /*
215 * Make sure the requested size is rational. The max table
216 * size without playing the alignment card is 64 Gbytes.
217 * If someone starts complaining that's not enough, we can shift
218 * the offset by CLIB_LOG2_CACHE_LINE_BYTES...
219 */
Damjan Marion2454de22020-09-26 19:32:34 +0200220 if (BIHASH_USE_HEAP)
221 ASSERT (h->memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS));
Dave Barach32dcd3b2019-07-08 12:25:38 -0400222
223 /* Add this hash table to the list */
Dave Barachbdf9b972019-09-03 10:57:19 -0400224 if (a->dont_add_to_all_bihash_list == 0)
225 {
226 for (i = 0; i < vec_len (clib_all_bihashes); i++)
227 if (clib_all_bihashes[i] == h)
228 goto do_lock;
229 oldheap = clib_all_bihash_set_heap ();
230 vec_add1 (clib_all_bihashes, (void *) h);
231 clib_mem_set_heap (oldheap);
232 }
Dave Barach32dcd3b2019-07-08 12:25:38 -0400233
Dave Barachbdf9b972019-09-03 10:57:19 -0400234do_lock:
235 if (h->alloc_lock)
236 clib_mem_free ((void *) h->alloc_lock);
Dave Barach67d09e02019-08-01 08:15:01 -0400237
238 /*
239 * Set up the lock now, so we can use it to make the first add
240 * thread-safe
241 */
242 h->alloc_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
243 CLIB_CACHE_LINE_BYTES);
244 h->alloc_lock[0] = 0;
245
Dave Barach16e4a4a2020-04-16 12:00:14 -0400246#if BIHASH_LAZY_INSTANTIATE
Dave Barachbdf9b972019-09-03 10:57:19 -0400247 if (a->instantiate_immediately)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400248#endif
Dave Barachbdf9b972019-09-03 10:57:19 -0400249 BV (clib_bihash_instantiate) (h);
250}
251
252void BV (clib_bihash_init)
253 (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size)
254{
255 BVT (clib_bihash_init2_args) _a, *a = &_a;
256
257 memset (a, 0, sizeof (*a));
258
259 a->h = h;
260 a->name = name;
261 a->nbuckets = nbuckets;
262 a->memory_size = memory_size;
263
264 BV (clib_bihash_init2) (a);
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -0800265}
266
Dave Barach9466c452018-08-24 17:21:14 -0400267#if BIHASH_32_64_SVM
268#if !defined (MFD_ALLOW_SEALING)
269#define MFD_ALLOW_SEALING 0x0002U
270#endif
271
Dave Barachd4a639b2020-08-06 11:38:40 -0400272void BV (clib_bihash_initiator_init_svm)
Dave Barachffb14b92018-09-11 17:20:23 -0400273 (BVT (clib_bihash) * h, char *name, u32 nbuckets, u64 memory_size)
Dave Barach9466c452018-08-24 17:21:14 -0400274{
275 uword bucket_size;
276 u8 *mmap_addr;
277 vec_header_t *freelist_vh;
278 int fd;
279
Damjan Marion2454de22020-09-26 19:32:34 +0200280 ASSERT (BIHASH_USE_HEAP == 0);
281
Dave Barachffb14b92018-09-11 17:20:23 -0400282 ASSERT (memory_size < (1ULL << 32));
Dave Barach9466c452018-08-24 17:21:14 -0400283 /* Set up for memfd sharing */
Damjan Marionf8cb7012020-10-09 17:16:55 +0200284 if ((fd = clib_mem_vm_create_fd (CLIB_MEM_PAGE_SZ_DEFAULT, name) == -1)
Dave Barach9466c452018-08-24 17:21:14 -0400285 {
286 clib_unix_warning ("memfd_create");
287 return;
288 }
289
290 if (ftruncate (fd, memory_size) < 0)
291 {
292 clib_unix_warning ("ftruncate");
293 return;
294 }
295
296 /* Not mission-critical, complain and continue */
297 if ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1)
298 clib_unix_warning ("fcntl (F_ADD_SEALS)");
299
Dave Barachffb14b92018-09-11 17:20:23 -0400300 mmap_addr = mmap (0, memory_size,
301 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
Dave Barach9466c452018-08-24 17:21:14 -0400302
303 if (mmap_addr == MAP_FAILED)
304 {
305 clib_unix_warning ("mmap failed");
306 ASSERT (0);
307 }
308
309 h->sh = (void *) mmap_addr;
310 h->memfd = fd;
311 nbuckets = 1 << (max_log2 (nbuckets));
312
313 h->name = (u8 *) name;
314 h->sh->nbuckets = h->nbuckets = nbuckets;
315 h->log2_nbuckets = max_log2 (nbuckets);
316
317 alloc_arena (h) = (u64) (uword) mmap_addr;
Dave Barachffb14b92018-09-11 17:20:23 -0400318 alloc_arena_next (h) = CLIB_CACHE_LINE_BYTES;
Dave Barach9466c452018-08-24 17:21:14 -0400319 alloc_arena_size (h) = memory_size;
320
321 bucket_size = nbuckets * sizeof (h->buckets[0]);
322 h->buckets = BV (alloc_aligned) (h, bucket_size);
Damjan Marion2454de22020-09-26 19:32:34 +0200323 clib_memset_u8 (h->buckets, 0, bucket_size);
Dave Barachffb14b92018-09-11 17:20:23 -0400324 h->sh->buckets_as_u64 = (u64) BV (clib_bihash_get_offset) (h, h->buckets);
Dave Barach9466c452018-08-24 17:21:14 -0400325
326 h->alloc_lock = BV (alloc_aligned) (h, CLIB_CACHE_LINE_BYTES);
327 h->alloc_lock[0] = 0;
328
Dave Barachffb14b92018-09-11 17:20:23 -0400329 h->sh->alloc_lock_as_u64 =
330 (u64) BV (clib_bihash_get_offset) (h, (void *) h->alloc_lock);
331 freelist_vh =
332 BV (alloc_aligned) (h,
333 sizeof (vec_header_t) +
334 BIHASH_FREELIST_LENGTH * sizeof (u64));
Dave Barach9466c452018-08-24 17:21:14 -0400335 freelist_vh->len = BIHASH_FREELIST_LENGTH;
Dave Barachffb14b92018-09-11 17:20:23 -0400336 h->sh->freelists_as_u64 =
337 (u64) BV (clib_bihash_get_offset) (h, freelist_vh->vector_data);
338 h->freelists = (void *) (freelist_vh->vector_data);
Dave Barach9466c452018-08-24 17:21:14 -0400339
Damjan Marionf2b4a372020-09-30 14:15:24 +0200340 h->fmt_fn = BV (format_bihash);
341 h->kvp_fmt_fn = NULL;
Vijayabhaskar Katamreddyf0bae642020-01-15 13:45:19 -0800342 h->instantiated = 1;
Dave Barach9466c452018-08-24 17:21:14 -0400343}
344
Dave Barachd4a639b2020-08-06 11:38:40 -0400345void BV (clib_bihash_responder_init_svm)
Dave Barach9466c452018-08-24 17:21:14 -0400346 (BVT (clib_bihash) * h, char *name, int fd)
347{
348 u8 *mmap_addr;
Dave Barachffb14b92018-09-11 17:20:23 -0400349 u64 memory_size;
Dave Barach9466c452018-08-24 17:21:14 -0400350 BVT (clib_bihash_shared_header) * sh;
351
Damjan Marion2454de22020-09-26 19:32:34 +0200352 ASSERT (BIHASH_USE_HEAP == 0);
353
Dave Barachffb14b92018-09-11 17:20:23 -0400354 /* Trial mapping, to learn the segment size */
Dave Barach9466c452018-08-24 17:21:14 -0400355 mmap_addr = mmap (0, 4096, PROT_READ, MAP_SHARED, fd, 0 /* offset */ );
356 if (mmap_addr == MAP_FAILED)
357 {
358 clib_unix_warning ("trial mmap failed");
359 ASSERT (0);
360 }
361
362 sh = (BVT (clib_bihash_shared_header) *) mmap_addr;
363
Dave Barach9466c452018-08-24 17:21:14 -0400364 memory_size = sh->alloc_arena_size;
365
366 munmap (mmap_addr, 4096);
367
Dave Barachffb14b92018-09-11 17:20:23 -0400368 /* Actual mapping, at the required size */
369 mmap_addr = mmap (0, memory_size,
370 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
Dave Barach9466c452018-08-24 17:21:14 -0400371
372 if (mmap_addr == MAP_FAILED)
373 {
374 clib_unix_warning ("mmap failed");
375 ASSERT (0);
376 }
377
378 (void) close (fd);
379
380 h->sh = (void *) mmap_addr;
Dave Barachffb14b92018-09-11 17:20:23 -0400381 alloc_arena (h) = (u64) (uword) mmap_addr;
Dave Barach9466c452018-08-24 17:21:14 -0400382 h->memfd = -1;
383
384 h->name = (u8 *) name;
Dave Barachffb14b92018-09-11 17:20:23 -0400385 h->buckets = BV (clib_bihash_get_value) (h, h->sh->buckets_as_u64);
Dave Barach9466c452018-08-24 17:21:14 -0400386 h->nbuckets = h->sh->nbuckets;
387 h->log2_nbuckets = max_log2 (h->nbuckets);
388
Dave Barachffb14b92018-09-11 17:20:23 -0400389 h->alloc_lock = BV (clib_bihash_get_value) (h, h->sh->alloc_lock_as_u64);
390 h->freelists = BV (clib_bihash_get_value) (h, h->sh->freelists_as_u64);
Damjan Marionf2b4a372020-09-30 14:15:24 +0200391 h->fmt_fn = BV (format_bihash);
392 h->kvp_fmt_fn = NULL;
Dave Barach9466c452018-08-24 17:21:14 -0400393}
394#endif /* BIHASH_32_64_SVM */
395
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -0800396void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h,
Damjan Marionf2b4a372020-09-30 14:15:24 +0200397 format_function_t * kvp_fmt_fn)
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -0800398{
Damjan Marionf2b4a372020-09-30 14:15:24 +0200399 h->kvp_fmt_fn = kvp_fmt_fn;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700400}
401
Neale Rannseb696482020-09-29 14:44:23 +0000402int BV (clib_bihash_is_initialised) (const BVT (clib_bihash) * h)
403{
404 return (h->instantiated != 0);
405}
406
Dave Barachc3799992016-08-15 11:12:27 -0400407void BV (clib_bihash_free) (BVT (clib_bihash) * h)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700408{
Dave Barach32dcd3b2019-07-08 12:25:38 -0400409 int i;
410
Dave Barach67d09e02019-08-01 08:15:01 -0400411 if (PREDICT_FALSE (h->instantiated == 0))
Dave Barach32dcd3b2019-07-08 12:25:38 -0400412 goto never_initialized;
413
Dave Barach67d09e02019-08-01 08:15:01 -0400414 h->instantiated = 0;
Damjan Marion2454de22020-09-26 19:32:34 +0200415
416 if (BIHASH_USE_HEAP)
417 {
418 BVT (clib_bihash_alloc_chunk) * next, *chunk;
419 void *oldheap = clib_mem_set_heap (h->heap);
420
421 chunk = h->chunks;
422 while (chunk)
423 {
424 next = chunk->next;
425 clib_mem_free (chunk);
426 chunk = next;
427 }
428 clib_mem_set_heap (oldheap);
429 }
430
Dave Barach97f5af02018-02-22 09:48:45 -0500431 vec_free (h->working_copies);
Vijayabhaskar Katamreddy72739a62019-05-07 13:27:32 -0700432 vec_free (h->working_copy_lengths);
Han Wu3d5e48b2021-11-19 17:45:40 +0800433 clib_mem_free ((void *) h->alloc_lock);
Dave Barach9466c452018-08-24 17:21:14 -0400434#if BIHASH_32_64_SVM == 0
Dave Barach97f5af02018-02-22 09:48:45 -0500435 vec_free (h->freelists);
Dave Barach9466c452018-08-24 17:21:14 -0400436#else
437 if (h->memfd > 0)
438 (void) close (h->memfd);
439#endif
Damjan Marion2454de22020-09-26 19:32:34 +0200440 if (BIHASH_USE_HEAP == 0)
441 clib_mem_vm_free ((void *) (uword) (alloc_arena (h)),
442 alloc_arena_size (h));
Dave Barach32dcd3b2019-07-08 12:25:38 -0400443never_initialized:
Nathan Skrzypczaka8c720e2021-08-06 12:03:11 +0200444 if (h->dont_add_to_all_bihash_list)
445 {
446 clib_memset_u8 (h, 0, sizeof (*h));
447 return;
448 }
Damjan Marion2454de22020-09-26 19:32:34 +0200449 clib_memset_u8 (h, 0, sizeof (*h));
Dave Barach32dcd3b2019-07-08 12:25:38 -0400450 for (i = 0; i < vec_len (clib_all_bihashes); i++)
451 {
452 if ((void *) h == clib_all_bihashes[i])
453 {
454 vec_delete (clib_all_bihashes, 1, i);
455 return;
456 }
457 }
458 clib_warning ("Couldn't find hash table %llx on clib_all_bihashes...",
Vijayabhaskar Katamreddyf0bae642020-01-15 13:45:19 -0800459 (u64) (uword) h);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700460}
461
Dave Barachc3799992016-08-15 11:12:27 -0400462static
463BVT (clib_bihash_value) *
464BV (value_alloc) (BVT (clib_bihash) * h, u32 log2_pages)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700465{
Dave Barachb9c8c572023-03-16 13:03:47 -0400466 int i;
Dave Barachc3799992016-08-15 11:12:27 -0400467 BVT (clib_bihash_value) * rv = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700468
Dave Barach508498f2018-07-19 12:11:16 -0400469 ASSERT (h->alloc_lock[0]);
Dave Barach9466c452018-08-24 17:21:14 -0400470
471#if BIHASH_32_64_SVM
472 ASSERT (log2_pages < vec_len (h->freelists));
473#endif
474
Dave Barachc3799992016-08-15 11:12:27 -0400475 if (log2_pages >= vec_len (h->freelists) || h->freelists[log2_pages] == 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700476 {
Dave Barach97f5af02018-02-22 09:48:45 -0500477 vec_validate_init_empty (h->freelists, log2_pages, 0);
478 rv = BV (alloc_aligned) (h, (sizeof (*rv) * (1 << log2_pages)));
Dave Barachc3799992016-08-15 11:12:27 -0400479 goto initialize;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700480 }
Dave Barachffb14b92018-09-11 17:20:23 -0400481 rv = BV (clib_bihash_get_value) (h, (uword) h->freelists[log2_pages]);
Dave Barach9466c452018-08-24 17:21:14 -0400482 h->freelists[log2_pages] = rv->next_free_as_u64;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700483
Dave Barachc3799992016-08-15 11:12:27 -0400484initialize:
485 ASSERT (rv);
Dave Barachb9c8c572023-03-16 13:03:47 -0400486
487 BVT (clib_bihash_kv) * v;
488 v = (BVT (clib_bihash_kv) *) rv;
489
490 for (i = 0; i < BIHASH_KVP_PER_PAGE * (1 << log2_pages); i++)
491 {
492 BV (clib_bihash_mark_free) (v);
493 v++;
494 }
Dave Barachc3799992016-08-15 11:12:27 -0400495 return rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700496}
497
498static void
Dave Barachba7ddfe2017-05-17 20:20:50 -0400499BV (value_free) (BVT (clib_bihash) * h, BVT (clib_bihash_value) * v,
500 u32 log2_pages)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700501{
Dave Barach508498f2018-07-19 12:11:16 -0400502 ASSERT (h->alloc_lock[0]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700503
Dave Barachc3799992016-08-15 11:12:27 -0400504 ASSERT (vec_len (h->freelists) > log2_pages);
505
Damjan Marion2454de22020-09-26 19:32:34 +0200506 if (BIHASH_USE_HEAP && log2_pages >= BIIHASH_MIN_ALLOC_LOG2_PAGES)
507 {
508 /* allocations bigger or equal to chunk size always contain single
509 * alloc and they can be given back to heap */
510 void *oldheap;
511 BVT (clib_bihash_alloc_chunk) * c;
512 c = (BVT (clib_bihash_alloc_chunk) *) v - 1;
513
514 if (c->prev)
515 c->prev->next = c->next;
516 else
517 h->chunks = c->next;
518
519 if (c->next)
520 c->next->prev = c->prev;
521
522 oldheap = clib_mem_set_heap (h->heap);
523 clib_mem_free (c);
524 clib_mem_set_heap (oldheap);
525 return;
526 }
527
Dave Barach508498f2018-07-19 12:11:16 -0400528 if (CLIB_DEBUG > 0)
Damjan Marion2454de22020-09-26 19:32:34 +0200529 clib_memset_u8 (v, 0xFE, sizeof (*v) * (1 << log2_pages));
Dave Barach508498f2018-07-19 12:11:16 -0400530
Dave Barach9466c452018-08-24 17:21:14 -0400531 v->next_free_as_u64 = (u64) h->freelists[log2_pages];
Dave Barachffb14b92018-09-11 17:20:23 -0400532 h->freelists[log2_pages] = (u64) BV (clib_bihash_get_offset) (h, v);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700533}
534
535static inline void
Dave Barach908a5ea2017-07-14 12:42:21 -0400536BV (make_working_copy) (BVT (clib_bihash) * h, BVT (clib_bihash_bucket) * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700537{
Dave Barachc3799992016-08-15 11:12:27 -0400538 BVT (clib_bihash_value) * v;
Dave Barach908a5ea2017-07-14 12:42:21 -0400539 BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
Dave Barachc3799992016-08-15 11:12:27 -0400540 BVT (clib_bihash_value) * working_copy;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200541 u32 thread_index = os_get_thread_index ();
Dave Barachba7ddfe2017-05-17 20:20:50 -0400542 int log2_working_copy_length;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700543
Dave Barach508498f2018-07-19 12:11:16 -0400544 ASSERT (h->alloc_lock[0]);
545
Damjan Marionf55f9b82017-05-10 21:06:28 +0200546 if (thread_index >= vec_len (h->working_copies))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700547 {
Damjan Marionf55f9b82017-05-10 21:06:28 +0200548 vec_validate (h->working_copies, thread_index);
Steve Shin871cdec2017-06-02 10:09:02 -0700549 vec_validate_init_empty (h->working_copy_lengths, thread_index, ~0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700550 }
551
Dave Barachc3799992016-08-15 11:12:27 -0400552 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700553 * working_copies are per-cpu so that near-simultaneous
554 * updates from multiple threads will not result in sporadic, spurious
Dave Barachc3799992016-08-15 11:12:27 -0400555 * lookup failures.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700556 */
Damjan Marionf55f9b82017-05-10 21:06:28 +0200557 working_copy = h->working_copies[thread_index];
Dave Barachba7ddfe2017-05-17 20:20:50 -0400558 log2_working_copy_length = h->working_copy_lengths[thread_index];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700559
560 h->saved_bucket.as_u64 = b->as_u64;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700561
Dave Barachba7ddfe2017-05-17 20:20:50 -0400562 if (b->log2_pages > log2_working_copy_length)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700563 {
Dave Barach97f5af02018-02-22 09:48:45 -0500564 /*
565 * It's not worth the bookkeeping to free working copies
566 * if (working_copy)
567 * clib_mem_free (working_copy);
568 */
569 working_copy = BV (alloc_aligned)
570 (h, sizeof (working_copy[0]) * (1 << b->log2_pages));
Dave Barachba7ddfe2017-05-17 20:20:50 -0400571 h->working_copy_lengths[thread_index] = b->log2_pages;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200572 h->working_copies[thread_index] = working_copy;
Dave Barach2ce28d62019-05-03 12:58:01 -0400573
574 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_working_copy_lost,
575 1ULL << b->log2_pages);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700576 }
577
Dave Barachc3799992016-08-15 11:12:27 -0400578 v = BV (clib_bihash_get_value) (h, b->offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700579
Dave Barach178cf492018-11-13 16:34:13 -0500580 clib_memcpy_fast (working_copy, v, sizeof (*v) * (1 << b->log2_pages));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700581 working_bucket.as_u64 = b->as_u64;
Dave Barachc3799992016-08-15 11:12:27 -0400582 working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy);
Damjan Marion801ec2a2020-04-21 19:42:30 +0200583 CLIB_MEMORY_STORE_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700584 b->as_u64 = working_bucket.as_u64;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200585 h->working_copies[thread_index] = working_copy;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700586}
587
Dave Barachc3799992016-08-15 11:12:27 -0400588static
589BVT (clib_bihash_value) *
590BV (split_and_rehash)
591 (BVT (clib_bihash) * h,
Dave Barachba7ddfe2017-05-17 20:20:50 -0400592 BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
593 u32 new_log2_pages)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700594{
Dave Barach5e6b9582016-12-12 15:37:29 -0500595 BVT (clib_bihash_value) * new_values, *new_v;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400596 int i, j, length_in_kvs;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700597
Dave Barach508498f2018-07-19 12:11:16 -0400598 ASSERT (h->alloc_lock[0]);
599
Dave Barachc3799992016-08-15 11:12:27 -0400600 new_values = BV (value_alloc) (h, new_log2_pages);
Dave Barachba7ddfe2017-05-17 20:20:50 -0400601 length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700602
Dave Barachba7ddfe2017-05-17 20:20:50 -0400603 for (i = 0; i < length_in_kvs; i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700604 {
605 u64 new_hash;
Dave Barachc3799992016-08-15 11:12:27 -0400606
Dave Barach5e6b9582016-12-12 15:37:29 -0500607 /* Entry not in use? Forget it */
608 if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
609 continue;
610
611 /* rehash the item onto its new home-page */
612 new_hash = BV (clib_bihash_hash) (&(old_values->kvp[i]));
Damjan Marion68e5fd52020-04-23 13:41:47 +0200613 new_hash = extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500614 new_v = &new_values[new_hash];
615
616 /* Across the new home-page */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700617 for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
Dave Barachc3799992016-08-15 11:12:27 -0400618 {
Dave Barach5e6b9582016-12-12 15:37:29 -0500619 /* Empty slot */
620 if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
Dave Barachc3799992016-08-15 11:12:27 -0400621 {
Dave Barach178cf492018-11-13 16:34:13 -0500622 clib_memcpy_fast (&(new_v->kvp[j]), &(old_values->kvp[i]),
623 sizeof (new_v->kvp[j]));
Dave Barach5e6b9582016-12-12 15:37:29 -0500624 goto doublebreak;
Dave Barachc3799992016-08-15 11:12:27 -0400625 }
Dave Barachc3799992016-08-15 11:12:27 -0400626 }
Dave Barach5e6b9582016-12-12 15:37:29 -0500627 /* Crap. Tell caller to try again */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400628 BV (value_free) (h, new_values, new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500629 return 0;
630 doublebreak:;
631 }
Dave Barachba7ddfe2017-05-17 20:20:50 -0400632
Dave Barach5e6b9582016-12-12 15:37:29 -0500633 return new_values;
634}
635
636static
637BVT (clib_bihash_value) *
638BV (split_and_rehash_linear)
639 (BVT (clib_bihash) * h,
Dave Barachba7ddfe2017-05-17 20:20:50 -0400640 BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
641 u32 new_log2_pages)
Dave Barach5e6b9582016-12-12 15:37:29 -0500642{
643 BVT (clib_bihash_value) * new_values;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400644 int i, j, new_length, old_length;
Dave Barach5e6b9582016-12-12 15:37:29 -0500645
Dave Barach508498f2018-07-19 12:11:16 -0400646 ASSERT (h->alloc_lock[0]);
647
Dave Barach5e6b9582016-12-12 15:37:29 -0500648 new_values = BV (value_alloc) (h, new_log2_pages);
649 new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400650 old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
Dave Barach5e6b9582016-12-12 15:37:29 -0500651
652 j = 0;
653 /* Across the old value array */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400654 for (i = 0; i < old_length; i++)
Dave Barach5e6b9582016-12-12 15:37:29 -0500655 {
656 /* Find a free slot in the new linear scan bucket */
657 for (; j < new_length; j++)
658 {
Dave Barach8f544962017-01-18 10:23:22 -0500659 /* Old value not in use? Forget it. */
Dave Barach5e6b9582016-12-12 15:37:29 -0500660 if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
661 goto doublebreak;
662
663 /* New value should never be in use */
664 if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
665 {
666 /* Copy the old value and move along */
Dave Barach178cf492018-11-13 16:34:13 -0500667 clib_memcpy_fast (&(new_values->kvp[j]), &(old_values->kvp[i]),
668 sizeof (new_values->kvp[j]));
Dave Barach5e6b9582016-12-12 15:37:29 -0500669 j++;
670 goto doublebreak;
671 }
Dave Barach5e6b9582016-12-12 15:37:29 -0500672 }
Dave Barach8f544962017-01-18 10:23:22 -0500673 /* This should never happen... */
674 clib_warning ("BUG: linear rehash failed!");
Dave Barachba7ddfe2017-05-17 20:20:50 -0400675 BV (value_free) (h, new_values, new_log2_pages);
Dave Barach8f544962017-01-18 10:23:22 -0500676 return 0;
677
Dave Barach5e6b9582016-12-12 15:37:29 -0500678 doublebreak:;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700679 }
680 return new_values;
681}
682
Nathan Skrzypczak17ecd852021-12-02 14:40:06 +0100683static_always_inline int BV (clib_bihash_add_del_inline_with_hash) (
684 BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, u64 hash, int is_add,
685 int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *is_stale_arg,
686 void (*overwrite_cb) (BVT (clib_bihash_kv) *, void *), void *overwrite_arg)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700687{
Dave Barach908a5ea2017-07-14 12:42:21 -0400688 BVT (clib_bihash_bucket) * b, tmp_b;
Dave Barachc3799992016-08-15 11:12:27 -0400689 BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy;
Dave Barach5e6b9582016-12-12 15:37:29 -0500690 int i, limit;
Damjan Marion801ec2a2020-04-21 19:42:30 +0200691 u64 new_hash;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400692 u32 new_log2_pages, old_log2_pages;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200693 u32 thread_index = os_get_thread_index ();
Dave Barach5e6b9582016-12-12 15:37:29 -0500694 int mark_bucket_linear;
695 int resplit_once;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700696
Damjan Marion68e5fd52020-04-23 13:41:47 +0200697 /* *INDENT-OFF* */
698 static const BVT (clib_bihash_bucket) mask = {
699 .linear_search = 1,
700 .log2_pages = -1
701 };
702 /* *INDENT-ON* */
703
704#if BIHASH_LAZY_INSTANTIATE
Dave Barach67d09e02019-08-01 08:15:01 -0400705 /*
706 * Create the table (is_add=1,2), or flunk the request now (is_add=0)
707 * Use the alloc_lock to protect the instantiate operation.
708 */
709 if (PREDICT_FALSE (h->instantiated == 0))
Dave Barach32dcd3b2019-07-08 12:25:38 -0400710 {
711 if (is_add == 0)
712 return (-1);
Dave Barach67d09e02019-08-01 08:15:01 -0400713
714 BV (clib_bihash_alloc_lock) (h);
715 if (h->instantiated == 0)
716 BV (clib_bihash_instantiate) (h);
717 BV (clib_bihash_alloc_unlock) (h);
Dave Barach32dcd3b2019-07-08 12:25:38 -0400718 }
Dave Baracha90ba642020-04-23 16:56:15 -0400719#else
720 /* Debug image: make sure the table has been instantiated */
721 ASSERT (h->instantiated != 0);
Damjan Marion68e5fd52020-04-23 13:41:47 +0200722#endif
Dave Barach32dcd3b2019-07-08 12:25:38 -0400723
Dave Barachb9c8c572023-03-16 13:03:47 -0400724 /*
725 * Debug image: make sure that an item being added doesn't accidentally
726 * look like a free item.
727 */
728 ASSERT ((is_add && BV (clib_bihash_is_free) (add_v)) == 0);
729
Damjan Marion4e149772020-03-27 16:57:28 +0100730 b = BV (clib_bihash_get_bucket) (h, hash);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700731
Dave Barach508498f2018-07-19 12:11:16 -0400732 BV (clib_bihash_lock_bucket) (b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700733
734 /* First elt in the bucket? */
Dave Barach16e4a4a2020-04-16 12:00:14 -0400735 if (BIHASH_KVP_AT_BUCKET_LEVEL == 0 && BV (clib_bihash_bucket_is_empty) (b))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700736 {
737 if (is_add == 0)
Dave Barachc3799992016-08-15 11:12:27 -0400738 {
Dave Barach508498f2018-07-19 12:11:16 -0400739 BV (clib_bihash_unlock_bucket) (b);
740 return (-1);
Dave Barachc3799992016-08-15 11:12:27 -0400741 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700742
Dave Barach508498f2018-07-19 12:11:16 -0400743 BV (clib_bihash_alloc_lock) (h);
Dave Barachc3799992016-08-15 11:12:27 -0400744 v = BV (value_alloc) (h, 0);
Dave Barach508498f2018-07-19 12:11:16 -0400745 BV (clib_bihash_alloc_unlock) (h);
Dave Barachba7ddfe2017-05-17 20:20:50 -0400746
Dave Barachc3799992016-08-15 11:12:27 -0400747 *v->kvp = *add_v;
Dave Barach508498f2018-07-19 12:11:16 -0400748 tmp_b.as_u64 = 0; /* clears bucket lock */
Dave Barachc3799992016-08-15 11:12:27 -0400749 tmp_b.offset = BV (clib_bihash_get_offset) (h, v);
Dave Barache7d212f2018-02-07 13:14:06 -0500750 tmp_b.refcnt = 1;
Damjan Marion801ec2a2020-04-21 19:42:30 +0200751 CLIB_MEMORY_STORE_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700752
Tom Seidenberg97f8ae92019-03-15 10:15:26 -0400753 b->as_u64 = tmp_b.as_u64; /* unlocks the bucket */
Dave Barach2ce28d62019-05-03 12:58:01 -0400754 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_alloc_add, 1);
755
Dave Barach508498f2018-07-19 12:11:16 -0400756 return (0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700757 }
758
Dave Barach508498f2018-07-19 12:11:16 -0400759 /* WARNING: we're still looking at the live copy... */
Dave Barach5e6b9582016-12-12 15:37:29 -0500760 limit = BIHASH_KVP_PER_PAGE;
Dave Barach508498f2018-07-19 12:11:16 -0400761 v = BV (clib_bihash_get_value) (h, b->offset);
762
Damjan Marion68e5fd52020-04-23 13:41:47 +0200763 if (PREDICT_FALSE (b->as_u64 & mask.as_u64))
764 {
765 if (PREDICT_FALSE (b->linear_search))
766 limit <<= b->log2_pages;
767 else
768 v += extract_bits (hash, h->log2_nbuckets, b->log2_pages);
769 }
Dave Barachc3799992016-08-15 11:12:27 -0400770
Ed Warnickecb9cada2015-12-08 15:45:58 -0700771 if (is_add)
772 {
Dave Barachc3799992016-08-15 11:12:27 -0400773 /*
Dave Barach508498f2018-07-19 12:11:16 -0400774 * Because reader threads are looking at live data,
775 * we have to be extra careful. Readers do NOT hold the
776 * bucket lock. We need to be SLOWER than a search, past the
777 * point where readers CHECK the bucket lock.
778 */
779
780 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700781 * For obvious (in hindsight) reasons, see if we're supposed to
782 * replace an existing key, then look for an empty slot.
783 */
Dave Barach5e6b9582016-12-12 15:37:29 -0500784 for (i = 0; i < limit; i++)
Dave Barachc3799992016-08-15 11:12:27 -0400785 {
Dave Barachb9c8c572023-03-16 13:03:47 -0400786 if (BV (clib_bihash_is_free) (&(v->kvp[i])))
787 continue;
Dave Baracha11bf452019-04-17 17:27:31 -0400788 if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
Dave Barachc3799992016-08-15 11:12:27 -0400789 {
Dave Barach9e4946b2019-07-08 14:47:44 -0400790 /* Add but do not overwrite? */
791 if (is_add == 2)
792 {
793 BV (clib_bihash_unlock_bucket) (b);
794 return (-2);
795 }
Nathan Skrzypczak17ecd852021-12-02 14:40:06 +0100796 if (overwrite_cb)
797 overwrite_cb (&(v->kvp[i]), overwrite_arg);
Damjan Marion801ec2a2020-04-21 19:42:30 +0200798 clib_memcpy_fast (&(v->kvp[i].value),
799 &add_v->value, sizeof (add_v->value));
Dave Barach508498f2018-07-19 12:11:16 -0400800 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400801 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
Dave Barach508498f2018-07-19 12:11:16 -0400802 return (0);
Dave Barachc3799992016-08-15 11:12:27 -0400803 }
804 }
Dave Barach508498f2018-07-19 12:11:16 -0400805 /*
806 * Look for an empty slot. If found, use it
807 */
Dave Barach5e6b9582016-12-12 15:37:29 -0500808 for (i = 0; i < limit; i++)
Dave Barachc3799992016-08-15 11:12:27 -0400809 {
810 if (BV (clib_bihash_is_free) (&(v->kvp[i])))
811 {
Dave Barach508498f2018-07-19 12:11:16 -0400812 /*
813 * Copy the value first, so that if a reader manages
814 * to match the new key, the value will be right...
815 */
Dave Barach178cf492018-11-13 16:34:13 -0500816 clib_memcpy_fast (&(v->kvp[i].value),
817 &add_v->value, sizeof (add_v->value));
Damjan Marion801ec2a2020-04-21 19:42:30 +0200818 CLIB_MEMORY_STORE_BARRIER (); /* Make sure the value has settled */
Dave Barach178cf492018-11-13 16:34:13 -0500819 clib_memcpy_fast (&(v->kvp[i]), &add_v->key,
820 sizeof (add_v->key));
Dave Barache7d212f2018-02-07 13:14:06 -0500821 b->refcnt++;
Dave Barach9466c452018-08-24 17:21:14 -0400822 ASSERT (b->refcnt > 0);
Dave Barach508498f2018-07-19 12:11:16 -0400823 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400824 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_add, 1);
Dave Barach508498f2018-07-19 12:11:16 -0400825 return (0);
Dave Barachc3799992016-08-15 11:12:27 -0400826 }
827 }
Matus Fabian828d27e2018-08-21 03:15:50 -0700828 /* look for stale data to overwrite */
829 if (is_stale_cb)
830 {
831 for (i = 0; i < limit; i++)
832 {
Nathan Skrzypczak17ecd852021-12-02 14:40:06 +0100833 if (is_stale_cb (&(v->kvp[i]), is_stale_arg))
Matus Fabian828d27e2018-08-21 03:15:50 -0700834 {
Dave Barach178cf492018-11-13 16:34:13 -0500835 clib_memcpy_fast (&(v->kvp[i]), add_v, sizeof (*add_v));
Damjan Marion801ec2a2020-04-21 19:42:30 +0200836 CLIB_MEMORY_STORE_BARRIER ();
Matus Fabian828d27e2018-08-21 03:15:50 -0700837 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400838 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
Matus Fabian828d27e2018-08-21 03:15:50 -0700839 return (0);
840 }
841 }
842 }
Dave Barach508498f2018-07-19 12:11:16 -0400843 /* Out of space in this bucket, split the bucket... */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700844 }
Dave Barach508498f2018-07-19 12:11:16 -0400845 else /* delete case */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700846 {
Dave Barach5e6b9582016-12-12 15:37:29 -0500847 for (i = 0; i < limit; i++)
Dave Barachc3799992016-08-15 11:12:27 -0400848 {
Dave Barachb9c8c572023-03-16 13:03:47 -0400849 /* no sense even looking at this one */
850 if (BV (clib_bihash_is_free) (&(v->kvp[i])))
851 continue;
Dave Barach508498f2018-07-19 12:11:16 -0400852 /* Found the key? Kill it... */
Dave Baracha11bf452019-04-17 17:27:31 -0400853 if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
Dave Barachc3799992016-08-15 11:12:27 -0400854 {
Dave Barachb9c8c572023-03-16 13:03:47 -0400855 BV (clib_bihash_mark_free) (&(v->kvp[i]));
Dave Barach508498f2018-07-19 12:11:16 -0400856 /* Is the bucket empty? */
857 if (PREDICT_TRUE (b->refcnt > 1))
Dave Barache7d212f2018-02-07 13:14:06 -0500858 {
Dave Barach508498f2018-07-19 12:11:16 -0400859 b->refcnt--;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400860 /* Switch back to the bucket-level kvp array? */
861 if (BIHASH_KVP_AT_BUCKET_LEVEL && b->refcnt == 1
862 && b->log2_pages > 0)
863 {
864 tmp_b.as_u64 = b->as_u64;
865 b->offset = BV (clib_bihash_get_offset)
866 (h, (void *) (b + 1));
867 b->linear_search = 0;
868 b->log2_pages = 0;
869 /* Clean up the bucket-level kvp array */
Dave Barachb9c8c572023-03-16 13:03:47 -0400870 BVT (clib_bihash_kv) *v = (void *) (b + 1);
871 int j;
872 for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
873 {
874 BV (clib_bihash_mark_free) (v);
875 v++;
876 }
Damjan Marion801ec2a2020-04-21 19:42:30 +0200877 CLIB_MEMORY_STORE_BARRIER ();
Dave Barach16e4a4a2020-04-16 12:00:14 -0400878 BV (clib_bihash_unlock_bucket) (b);
879 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
880 goto free_backing_store;
881 }
882
Damjan Marion801ec2a2020-04-21 19:42:30 +0200883 CLIB_MEMORY_STORE_BARRIER ();
Dave Barach508498f2018-07-19 12:11:16 -0400884 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400885 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
Dave Barach508498f2018-07-19 12:11:16 -0400886 return (0);
Dave Barache7d212f2018-02-07 13:14:06 -0500887 }
Dave Barach508498f2018-07-19 12:11:16 -0400888 else /* yes, free it */
Dave Barache7d212f2018-02-07 13:14:06 -0500889 {
Dave Barach508498f2018-07-19 12:11:16 -0400890 /* Save old bucket value, need log2_pages to free it */
891 tmp_b.as_u64 = b->as_u64;
Dave Barach508498f2018-07-19 12:11:16 -0400892
893 /* Kill and unlock the bucket */
894 b->as_u64 = 0;
895
Dave Barach16e4a4a2020-04-16 12:00:14 -0400896 free_backing_store:
Dave Barach508498f2018-07-19 12:11:16 -0400897 /* And free the backing storage */
898 BV (clib_bihash_alloc_lock) (h);
899 /* Note: v currently points into the middle of the bucket */
900 v = BV (clib_bihash_get_value) (h, tmp_b.offset);
901 BV (value_free) (h, v, tmp_b.log2_pages);
902 BV (clib_bihash_alloc_unlock) (h);
Dave Barach2ce28d62019-05-03 12:58:01 -0400903 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del_free,
904 1);
Dave Barach508498f2018-07-19 12:11:16 -0400905 return (0);
Dave Barache7d212f2018-02-07 13:14:06 -0500906 }
Dave Barachc3799992016-08-15 11:12:27 -0400907 }
908 }
Dave Barach508498f2018-07-19 12:11:16 -0400909 /* Not found... */
910 BV (clib_bihash_unlock_bucket) (b);
911 return (-3);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700912 }
913
Dave Barach508498f2018-07-19 12:11:16 -0400914 /* Move readers to a (locked) temp copy of the bucket */
915 BV (clib_bihash_alloc_lock) (h);
916 BV (make_working_copy) (h, b);
917
918 v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
919
Dave Barachba7ddfe2017-05-17 20:20:50 -0400920 old_log2_pages = h->saved_bucket.log2_pages;
921 new_log2_pages = old_log2_pages + 1;
Dave Barach5e6b9582016-12-12 15:37:29 -0500922 mark_bucket_linear = 0;
Dave Barach2ce28d62019-05-03 12:58:01 -0400923 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_split_add, 1);
924 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, old_log2_pages);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700925
Damjan Marionf55f9b82017-05-10 21:06:28 +0200926 working_copy = h->working_copies[thread_index];
Dave Barach5e6b9582016-12-12 15:37:29 -0500927 resplit_once = 0;
Dave Barach2ce28d62019-05-03 12:58:01 -0400928 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, 1);
Dave Barach5e6b9582016-12-12 15:37:29 -0500929
Dave Barachba7ddfe2017-05-17 20:20:50 -0400930 new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
931 new_log2_pages);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700932 if (new_v == 0)
933 {
Dave Barach5e6b9582016-12-12 15:37:29 -0500934 try_resplit:
935 resplit_once = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700936 new_log2_pages++;
Dave Barach5e6b9582016-12-12 15:37:29 -0500937 /* Try re-splitting. If that fails, fall back to linear search */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400938 new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
939 new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500940 if (new_v == 0)
941 {
942 mark_linear:
943 new_log2_pages--;
944 /* pinned collisions, use linear search */
945 new_v =
Dave Barachba7ddfe2017-05-17 20:20:50 -0400946 BV (split_and_rehash_linear) (h, working_copy, old_log2_pages,
947 new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500948 mark_bucket_linear = 1;
Dave Barach2ce28d62019-05-03 12:58:01 -0400949 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_linear, 1);
Dave Barach5e6b9582016-12-12 15:37:29 -0500950 }
Dave Barach2ce28d62019-05-03 12:58:01 -0400951 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_resplit, 1);
952 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits,
953 old_log2_pages + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700954 }
955
956 /* Try to add the new entry */
957 save_new_v = new_v;
Dave Barachc3799992016-08-15 11:12:27 -0400958 new_hash = BV (clib_bihash_hash) (add_v);
Dave Barach5e6b9582016-12-12 15:37:29 -0500959 limit = BIHASH_KVP_PER_PAGE;
960 if (mark_bucket_linear)
961 limit <<= new_log2_pages;
Damjan Marion68e5fd52020-04-23 13:41:47 +0200962 else
963 new_v += extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
Dave Barachc3799992016-08-15 11:12:27 -0400964
Dave Barach5e6b9582016-12-12 15:37:29 -0500965 for (i = 0; i < limit; i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700966 {
Dave Barachc3799992016-08-15 11:12:27 -0400967 if (BV (clib_bihash_is_free) (&(new_v->kvp[i])))
968 {
Dave Barach178cf492018-11-13 16:34:13 -0500969 clib_memcpy_fast (&(new_v->kvp[i]), add_v, sizeof (*add_v));
Dave Barachc3799992016-08-15 11:12:27 -0400970 goto expand_ok;
971 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700972 }
Dave Barachba7ddfe2017-05-17 20:20:50 -0400973
Ed Warnickecb9cada2015-12-08 15:45:58 -0700974 /* Crap. Try again */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400975 BV (value_free) (h, save_new_v, new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500976 /*
977 * If we've already doubled the size of the bucket once,
978 * fall back to linear search now.
979 */
980 if (resplit_once)
981 goto mark_linear;
982 else
983 goto try_resplit;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700984
Dave Barachc3799992016-08-15 11:12:27 -0400985expand_ok:
Dave Barach5e6b9582016-12-12 15:37:29 -0500986 tmp_b.log2_pages = new_log2_pages;
Dave Barachc3799992016-08-15 11:12:27 -0400987 tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
Dave Barach5e6b9582016-12-12 15:37:29 -0500988 tmp_b.linear_search = mark_bucket_linear;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400989#if BIHASH_KVP_AT_BUCKET_LEVEL
990 /* Compensate for permanent refcount bump at the bucket level */
991 if (new_log2_pages > 0)
992#endif
993 tmp_b.refcnt = h->saved_bucket.refcnt + 1;
Dave Barach9466c452018-08-24 17:21:14 -0400994 ASSERT (tmp_b.refcnt > 0);
Dave Barach508498f2018-07-19 12:11:16 -0400995 tmp_b.lock = 0;
Damjan Marion801ec2a2020-04-21 19:42:30 +0200996 CLIB_MEMORY_STORE_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700997 b->as_u64 = tmp_b.as_u64;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400998
999#if BIHASH_KVP_AT_BUCKET_LEVEL
1000 if (h->saved_bucket.log2_pages > 0)
1001 {
1002#endif
1003
1004 /* free the old bucket, except at the bucket level if so configured */
1005 v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
1006 BV (value_free) (h, v, h->saved_bucket.log2_pages);
1007
1008#if BIHASH_KVP_AT_BUCKET_LEVEL
1009 }
1010#endif
1011
1012
Dave Barach508498f2018-07-19 12:11:16 -04001013 BV (clib_bihash_alloc_unlock) (h);
1014 return (0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001015}
1016
Damjan Marion801ec2a2020-04-21 19:42:30 +02001017static_always_inline int BV (clib_bihash_add_del_inline)
1018 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add,
1019 int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
1020{
1021 u64 hash = BV (clib_bihash_hash) (add_v);
1022 return BV (clib_bihash_add_del_inline_with_hash) (h, add_v, hash, is_add,
Nathan Skrzypczak17ecd852021-12-02 14:40:06 +01001023 is_stale_cb, arg, 0, 0);
1024}
1025
1026int BV (clib_bihash_add_del_with_hash) (BVT (clib_bihash) * h,
1027 BVT (clib_bihash_kv) * add_v, u64 hash,
1028 int is_add)
1029{
1030 return BV (clib_bihash_add_del_inline_with_hash) (h, add_v, hash, is_add, 0,
1031 0, 0, 0);
Damjan Marion801ec2a2020-04-21 19:42:30 +02001032}
1033
Matus Fabian828d27e2018-08-21 03:15:50 -07001034int BV (clib_bihash_add_del)
1035 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add)
1036{
1037 return BV (clib_bihash_add_del_inline) (h, add_v, is_add, 0, 0);
1038}
1039
1040int BV (clib_bihash_add_or_overwrite_stale)
1041 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v,
1042 int (*stale_callback) (BVT (clib_bihash_kv) *, void *), void *arg)
1043{
1044 return BV (clib_bihash_add_del_inline) (h, add_v, 1, stale_callback, arg);
1045}
1046
Nathan Skrzypczak17ecd852021-12-02 14:40:06 +01001047int BV (clib_bihash_add_with_overwrite_cb) (
1048 BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v,
1049 void (overwrite_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
1050{
1051 u64 hash = BV (clib_bihash_hash) (add_v);
1052 return BV (clib_bihash_add_del_inline_with_hash) (h, add_v, hash, 1, 0, 0,
1053 overwrite_cb, arg);
1054}
1055
Dave Barachc3799992016-08-15 11:12:27 -04001056int BV (clib_bihash_search)
Dave Barach908a5ea2017-07-14 12:42:21 -04001057 (BVT (clib_bihash) * h,
Dave Barachc3799992016-08-15 11:12:27 -04001058 BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001059{
Damjan Marion68e5fd52020-04-23 13:41:47 +02001060 return BV (clib_bihash_search_inline_2) (h, search_key, valuep);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001061}
1062
Dave Barachc3799992016-08-15 11:12:27 -04001063u8 *BV (format_bihash) (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001064{
Dave Barachc3799992016-08-15 11:12:27 -04001065 BVT (clib_bihash) * h = va_arg (*args, BVT (clib_bihash) *);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001066 int verbose = va_arg (*args, int);
Dave Barach908a5ea2017-07-14 12:42:21 -04001067 BVT (clib_bihash_bucket) * b;
Dave Barachc3799992016-08-15 11:12:27 -04001068 BVT (clib_bihash_value) * v;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001069 int i, j, k;
1070 u64 active_elements = 0;
Dave Barache7d212f2018-02-07 13:14:06 -05001071 u64 active_buckets = 0;
1072 u64 linear_buckets = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001073
Damjan Marionf2b4a372020-09-30 14:15:24 +02001074 s = format (s, "Hash table '%s'\n", h->name ? h->name : (u8 *) "(unnamed)");
Dave Barachc3799992016-08-15 11:12:27 -04001075
Dave Barach16e4a4a2020-04-16 12:00:14 -04001076#if BIHASH_LAZY_INSTANTIATE
Nathan Skrzypczak42b29ba2020-08-17 14:14:56 +02001077 if (PREDICT_FALSE (h->instantiated == 0))
Damjan Marionf2b4a372020-09-30 14:15:24 +02001078 return format (s, " empty, uninitialized");
Dave Barach16e4a4a2020-04-16 12:00:14 -04001079#endif
Dave Barach32dcd3b2019-07-08 12:25:38 -04001080
Ed Warnickecb9cada2015-12-08 15:45:58 -07001081 for (i = 0; i < h->nbuckets; i++)
1082 {
Dave Barach16e4a4a2020-04-16 12:00:14 -04001083 b = BV (clib_bihash_get_bucket) (h, i);
Damjan Marion882fcfe2018-07-17 23:01:49 +02001084 if (BV (clib_bihash_bucket_is_empty) (b))
Dave Barachc3799992016-08-15 11:12:27 -04001085 {
1086 if (verbose > 1)
1087 s = format (s, "[%d]: empty\n", i);
1088 continue;
1089 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001090
Dave Barache7d212f2018-02-07 13:14:06 -05001091 active_buckets++;
1092
1093 if (b->linear_search)
1094 linear_buckets++;
1095
Ed Warnickecb9cada2015-12-08 15:45:58 -07001096 if (verbose)
Dave Barachc3799992016-08-15 11:12:27 -04001097 {
Dave Barach16e4a4a2020-04-16 12:00:14 -04001098 s = format
1099 (s, "[%d]: heap offset %lld, len %d, refcnt %d, linear %d\n", i,
1100 b->offset, (1 << b->log2_pages), b->refcnt, b->linear_search);
Dave Barachc3799992016-08-15 11:12:27 -04001101 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001102
Dave Barachc3799992016-08-15 11:12:27 -04001103 v = BV (clib_bihash_get_value) (h, b->offset);
1104 for (j = 0; j < (1 << b->log2_pages); j++)
1105 {
1106 for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
1107 {
1108 if (BV (clib_bihash_is_free) (&v->kvp[k]))
1109 {
1110 if (verbose > 1)
1111 s = format (s, " %d: empty\n",
1112 j * BIHASH_KVP_PER_PAGE + k);
1113 continue;
1114 }
1115 if (verbose)
1116 {
Damjan Marionf2b4a372020-09-30 14:15:24 +02001117 if (h->kvp_fmt_fn)
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -08001118 {
1119 s = format (s, " %d: %U\n",
1120 j * BIHASH_KVP_PER_PAGE + k,
Damjan Marionf2b4a372020-09-30 14:15:24 +02001121 h->kvp_fmt_fn, &(v->kvp[k]), verbose);
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -08001122 }
1123 else
1124 {
1125 s = format (s, " %d: %U\n",
1126 j * BIHASH_KVP_PER_PAGE + k,
1127 BV (format_bihash_kvp), &(v->kvp[k]));
1128 }
Dave Barachc3799992016-08-15 11:12:27 -04001129 }
1130 active_elements++;
1131 }
1132 v++;
1133 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001134 }
1135
Dave Barache7d212f2018-02-07 13:14:06 -05001136 s = format (s, " %lld active elements %lld active buckets\n",
1137 active_elements, active_buckets);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001138 s = format (s, " %d free lists\n", vec_len (h->freelists));
Dave Barache7d212f2018-02-07 13:14:06 -05001139
1140 for (i = 0; i < vec_len (h->freelists); i++)
1141 {
1142 u32 nfree = 0;
1143 BVT (clib_bihash_value) * free_elt;
Dave Barachffb14b92018-09-11 17:20:23 -04001144 u64 free_elt_as_u64 = h->freelists[i];
Dave Barache7d212f2018-02-07 13:14:06 -05001145
Dave Barachffb14b92018-09-11 17:20:23 -04001146 while (free_elt_as_u64)
Dave Barache7d212f2018-02-07 13:14:06 -05001147 {
Dave Barachffb14b92018-09-11 17:20:23 -04001148 free_elt = BV (clib_bihash_get_value) (h, free_elt_as_u64);
Dave Barache7d212f2018-02-07 13:14:06 -05001149 nfree++;
Dave Barachffb14b92018-09-11 17:20:23 -04001150 free_elt_as_u64 = free_elt->next_free_as_u64;
Dave Barache7d212f2018-02-07 13:14:06 -05001151 }
1152
Dave Barach9466c452018-08-24 17:21:14 -04001153 if (nfree || verbose)
1154 s = format (s, " [len %d] %u free elts\n", 1 << i, nfree);
Dave Barache7d212f2018-02-07 13:14:06 -05001155 }
1156
1157 s = format (s, " %lld linear search buckets\n", linear_buckets);
Damjan Marion2454de22020-09-26 19:32:34 +02001158 if (BIHASH_USE_HEAP)
1159 {
1160 BVT (clib_bihash_alloc_chunk) * c = h->chunks;
1161 uword bytes_left = 0, total_size = 0, n_chunks = 0;
1162
1163 while (c)
1164 {
1165 bytes_left += c->bytes_left;
1166 total_size += c->size;
1167 n_chunks += 1;
1168 c = c->next;
1169 }
1170 s = format (s,
Damjan Marionf2b4a372020-09-30 14:15:24 +02001171 " heap: %u chunk(s) allocated\n"
1172 " bytes: used %U, scrap %U\n", n_chunks,
Damjan Marion2454de22020-09-26 19:32:34 +02001173 format_memory_size, total_size,
1174 format_memory_size, bytes_left);
1175 }
1176 else
1177 {
1178 u64 used_bytes = alloc_arena_next (h);
1179 s = format (s,
1180 " arena: base %llx, next %llx\n"
1181 " used %lld b (%lld Mbytes) of %lld b (%lld Mbytes)\n",
1182 alloc_arena (h), alloc_arena_next (h),
1183 used_bytes, used_bytes >> 20,
1184 alloc_arena_size (h), alloc_arena_size (h) >> 20);
1185 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001186 return s;
1187}
1188
Dave Barachc3799992016-08-15 11:12:27 -04001189void BV (clib_bihash_foreach_key_value_pair)
Neale Rannsf50bac12019-12-06 05:53:17 +00001190 (BVT (clib_bihash) * h,
1191 BV (clib_bihash_foreach_key_value_pair_cb) cb, void *arg)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001192{
1193 int i, j, k;
Dave Barach908a5ea2017-07-14 12:42:21 -04001194 BVT (clib_bihash_bucket) * b;
Dave Barachc3799992016-08-15 11:12:27 -04001195 BVT (clib_bihash_value) * v;
Dave Barachc3799992016-08-15 11:12:27 -04001196
Dave Barach16e4a4a2020-04-16 12:00:14 -04001197
1198#if BIHASH_LAZY_INSTANTIATE
Nathan Skrzypczak42b29ba2020-08-17 14:14:56 +02001199 if (PREDICT_FALSE (h->instantiated == 0))
Dave Barach32dcd3b2019-07-08 12:25:38 -04001200 return;
Dave Barach16e4a4a2020-04-16 12:00:14 -04001201#endif
Dave Barach32dcd3b2019-07-08 12:25:38 -04001202
Ed Warnickecb9cada2015-12-08 15:45:58 -07001203 for (i = 0; i < h->nbuckets; i++)
1204 {
Dave Barach16e4a4a2020-04-16 12:00:14 -04001205 b = BV (clib_bihash_get_bucket) (h, i);
Damjan Marion882fcfe2018-07-17 23:01:49 +02001206 if (BV (clib_bihash_bucket_is_empty) (b))
Dave Barachc3799992016-08-15 11:12:27 -04001207 continue;
1208
1209 v = BV (clib_bihash_get_value) (h, b->offset);
1210 for (j = 0; j < (1 << b->log2_pages); j++)
1211 {
1212 for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
1213 {
1214 if (BV (clib_bihash_is_free) (&v->kvp[k]))
1215 continue;
1216
Neale Rannsf50bac12019-12-06 05:53:17 +00001217 if (BIHASH_WALK_STOP == cb (&v->kvp[k], arg))
1218 return;
Dave Barachca45ee72018-08-06 08:43:47 -04001219 /*
1220 * In case the callback deletes the last entry in the bucket...
1221 */
1222 if (BV (clib_bihash_bucket_is_empty) (b))
1223 goto doublebreak;
Dave Barachc3799992016-08-15 11:12:27 -04001224 }
1225 v++;
1226 }
Dave Barachca45ee72018-08-06 08:43:47 -04001227 doublebreak:
1228 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001229 }
1230}
Dave Barachdd3a57f2016-07-27 16:58:51 -04001231
Chris Luke16bcf7d2016-09-01 14:31:46 -04001232/** @endcond */
Dave Barachc3799992016-08-15 11:12:27 -04001233
1234/*
1235 * fd.io coding-style-patch-verification: ON
1236 *
1237 * Local Variables:
1238 * eval: (c-set-style "gnu")
1239 * End:
1240 */