blob: f7d88073418017448cae9b46c4e9593538423e5b [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
Chris Luke16bcf7d2016-09-01 14:31:46 -040016/** @cond DOCUMENTATION_IS_IN_BIHASH_DOC_H */
Dave Barachdd3a57f2016-07-27 16:58:51 -040017
Dave Barach16e4a4a2020-04-16 12:00:14 -040018#ifndef MAP_HUGE_SHIFT
19#define MAP_HUGE_SHIFT 26
20#endif
21
Dave Barach97f5af02018-02-22 09:48:45 -050022static inline void *BV (alloc_aligned) (BVT (clib_bihash) * h, uword nbytes)
23{
24 uword rv;
25
26 /* Round to an even number of cache lines */
27 nbytes += CLIB_CACHE_LINE_BYTES - 1;
28 nbytes &= ~(CLIB_CACHE_LINE_BYTES - 1);
29
Dave Barach9466c452018-08-24 17:21:14 -040030 rv = alloc_arena_next (h);
31 alloc_arena_next (h) += nbytes;
Dave Barach97f5af02018-02-22 09:48:45 -050032
Andreas Schultzb4b525e2019-07-19 11:14:50 +020033 if (alloc_arena_next (h) > alloc_arena_size (h))
Dave Barach97f5af02018-02-22 09:48:45 -050034 os_out_of_memory ();
35
Dave Barach16e4a4a2020-04-16 12:00:14 -040036 if (alloc_arena_next (h) > alloc_arena_mapped (h))
37 {
38 void *base, *rv;
39 uword alloc = alloc_arena_next (h) - alloc_arena_mapped (h);
40 int mmap_flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS;
41 int mmap_flags_huge = (mmap_flags | MAP_HUGETLB |
42 BIHASH_LOG2_HUGEPAGE_SIZE << MAP_HUGE_SHIFT);
43
44 /* new allocation is 25% of existing one */
45 if (alloc_arena_mapped (h) >> 2 > alloc)
46 alloc = alloc_arena_mapped (h) >> 2;
47
48 /* round allocation to page size */
49 alloc = round_pow2 (alloc, 1 << BIHASH_LOG2_HUGEPAGE_SIZE);
50
51 base = (void *) (uword) (alloc_arena (h) + alloc_arena_mapped (h));
52
53 rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags_huge, -1, 0);
54
55 /* fallback - maybe we are still able to allocate normal pages */
56 if (rv == MAP_FAILED)
57 rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
58
59 if (rv == MAP_FAILED)
60 os_out_of_memory ();
61
62 alloc_arena_mapped (h) += alloc;
63 }
64
Dave Barachffb14b92018-09-11 17:20:23 -040065 return (void *) (uword) (rv + alloc_arena (h));
Dave Barach97f5af02018-02-22 09:48:45 -050066}
67
Vijayabhaskar Katamreddyf0bae642020-01-15 13:45:19 -080068static void BV (clib_bihash_instantiate) (BVT (clib_bihash) * h)
Dave Barach32dcd3b2019-07-08 12:25:38 -040069{
70 uword bucket_size;
71
Dave Barach16e4a4a2020-04-16 12:00:14 -040072 alloc_arena (h) = clib_mem_vm_reserve (0, h->memory_size,
73 BIHASH_LOG2_HUGEPAGE_SIZE);
74 if (alloc_arena (h) == ~0)
75 os_out_of_memory ();
Dave Barach32dcd3b2019-07-08 12:25:38 -040076 alloc_arena_next (h) = 0;
77 alloc_arena_size (h) = h->memory_size;
Dave Barach16e4a4a2020-04-16 12:00:14 -040078 alloc_arena_mapped (h) = 0;
Dave Barach32dcd3b2019-07-08 12:25:38 -040079
80 bucket_size = h->nbuckets * sizeof (h->buckets[0]);
Dave Barach16e4a4a2020-04-16 12:00:14 -040081
82 if (BIHASH_KVP_AT_BUCKET_LEVEL)
83 bucket_size +=
84 h->nbuckets * BIHASH_KVP_PER_PAGE * sizeof (BVT (clib_bihash_kv));
85
Dave Barach32dcd3b2019-07-08 12:25:38 -040086 h->buckets = BV (alloc_aligned) (h, bucket_size);
Dave Barach16e4a4a2020-04-16 12:00:14 -040087
88 if (BIHASH_KVP_AT_BUCKET_LEVEL)
89 {
90 int i;
91 BVT (clib_bihash_bucket) * b;
92
93 b = h->buckets;
94
95 for (i = 0; i < h->nbuckets; i++)
96 {
97 b->offset = BV (clib_bihash_get_offset) (h, (void *) (b + 1));
98 b->refcnt = 1;
99 /* Mark all elements free */
100 clib_memset ((b + 1), 0xff,
101 BIHASH_KVP_PER_PAGE * sizeof (BVT (clib_bihash_kv)));
102
103 /* Compute next bucket start address */
104 b = (void *) (((uword) b) + sizeof (*b) +
105 (BIHASH_KVP_PER_PAGE *
106 sizeof (BVT (clib_bihash_kv))));
107 }
108 }
Damjan Marion801ec2a2020-04-21 19:42:30 +0200109 CLIB_MEMORY_STORE_BARRIER ();
Dave Barach67d09e02019-08-01 08:15:01 -0400110 h->instantiated = 1;
Dave Barach32dcd3b2019-07-08 12:25:38 -0400111}
Dave Barach97f5af02018-02-22 09:48:45 -0500112
Dave Barachbdf9b972019-09-03 10:57:19 -0400113void BV (clib_bihash_init2) (BVT (clib_bihash_init2_args) * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114{
Dave Barach32dcd3b2019-07-08 12:25:38 -0400115 int i;
116 void *oldheap;
Dave Barachbdf9b972019-09-03 10:57:19 -0400117 BVT (clib_bihash) * h = a->h;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118
Dave Barachbdf9b972019-09-03 10:57:19 -0400119 a->nbuckets = 1 << (max_log2 (a->nbuckets));
120
121 h->name = (u8 *) a->name;
122 h->nbuckets = a->nbuckets;
123 h->log2_nbuckets = max_log2 (a->nbuckets);
124 h->memory_size = a->memory_size;
Dave Barach67d09e02019-08-01 08:15:01 -0400125 h->instantiated = 0;
Dave Barachbdf9b972019-09-03 10:57:19 -0400126 h->fmt_fn = a->fmt_fn;
127
Dave Barach32dcd3b2019-07-08 12:25:38 -0400128 alloc_arena (h) = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700129
Dave Barach508498f2018-07-19 12:11:16 -0400130 /*
131 * Make sure the requested size is rational. The max table
132 * size without playing the alignment card is 64 Gbytes.
133 * If someone starts complaining that's not enough, we can shift
134 * the offset by CLIB_LOG2_CACHE_LINE_BYTES...
135 */
Dave Barachbdf9b972019-09-03 10:57:19 -0400136 ASSERT (h->memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS));
Dave Barach32dcd3b2019-07-08 12:25:38 -0400137
138 /* Add this hash table to the list */
Dave Barachbdf9b972019-09-03 10:57:19 -0400139 if (a->dont_add_to_all_bihash_list == 0)
140 {
141 for (i = 0; i < vec_len (clib_all_bihashes); i++)
142 if (clib_all_bihashes[i] == h)
143 goto do_lock;
144 oldheap = clib_all_bihash_set_heap ();
145 vec_add1 (clib_all_bihashes, (void *) h);
146 clib_mem_set_heap (oldheap);
147 }
Dave Barach32dcd3b2019-07-08 12:25:38 -0400148
Dave Barachbdf9b972019-09-03 10:57:19 -0400149do_lock:
150 if (h->alloc_lock)
151 clib_mem_free ((void *) h->alloc_lock);
Dave Barach67d09e02019-08-01 08:15:01 -0400152
153 /*
154 * Set up the lock now, so we can use it to make the first add
155 * thread-safe
156 */
157 h->alloc_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
158 CLIB_CACHE_LINE_BYTES);
159 h->alloc_lock[0] = 0;
160
Dave Barach16e4a4a2020-04-16 12:00:14 -0400161#if BIHASH_LAZY_INSTANTIATE
Dave Barachbdf9b972019-09-03 10:57:19 -0400162 if (a->instantiate_immediately)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400163#endif
Dave Barachbdf9b972019-09-03 10:57:19 -0400164 BV (clib_bihash_instantiate) (h);
165}
166
167void BV (clib_bihash_init)
168 (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size)
169{
170 BVT (clib_bihash_init2_args) _a, *a = &_a;
171
172 memset (a, 0, sizeof (*a));
173
174 a->h = h;
175 a->name = name;
176 a->nbuckets = nbuckets;
177 a->memory_size = memory_size;
178
179 BV (clib_bihash_init2) (a);
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -0800180}
181
Dave Barach9466c452018-08-24 17:21:14 -0400182#if BIHASH_32_64_SVM
183#if !defined (MFD_ALLOW_SEALING)
184#define MFD_ALLOW_SEALING 0x0002U
185#endif
186
187void BV (clib_bihash_master_init_svm)
Dave Barachffb14b92018-09-11 17:20:23 -0400188 (BVT (clib_bihash) * h, char *name, u32 nbuckets, u64 memory_size)
Dave Barach9466c452018-08-24 17:21:14 -0400189{
190 uword bucket_size;
191 u8 *mmap_addr;
192 vec_header_t *freelist_vh;
193 int fd;
194
Dave Barachffb14b92018-09-11 17:20:23 -0400195 ASSERT (memory_size < (1ULL << 32));
Dave Barach9466c452018-08-24 17:21:14 -0400196 /* Set up for memfd sharing */
197 if ((fd = memfd_create (name, MFD_ALLOW_SEALING)) == -1)
198 {
199 clib_unix_warning ("memfd_create");
200 return;
201 }
202
203 if (ftruncate (fd, memory_size) < 0)
204 {
205 clib_unix_warning ("ftruncate");
206 return;
207 }
208
209 /* Not mission-critical, complain and continue */
210 if ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1)
211 clib_unix_warning ("fcntl (F_ADD_SEALS)");
212
Dave Barachffb14b92018-09-11 17:20:23 -0400213 mmap_addr = mmap (0, memory_size,
214 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
Dave Barach9466c452018-08-24 17:21:14 -0400215
216 if (mmap_addr == MAP_FAILED)
217 {
218 clib_unix_warning ("mmap failed");
219 ASSERT (0);
220 }
221
222 h->sh = (void *) mmap_addr;
223 h->memfd = fd;
224 nbuckets = 1 << (max_log2 (nbuckets));
225
226 h->name = (u8 *) name;
227 h->sh->nbuckets = h->nbuckets = nbuckets;
228 h->log2_nbuckets = max_log2 (nbuckets);
229
230 alloc_arena (h) = (u64) (uword) mmap_addr;
Dave Barachffb14b92018-09-11 17:20:23 -0400231 alloc_arena_next (h) = CLIB_CACHE_LINE_BYTES;
Dave Barach9466c452018-08-24 17:21:14 -0400232 alloc_arena_size (h) = memory_size;
233
234 bucket_size = nbuckets * sizeof (h->buckets[0]);
235 h->buckets = BV (alloc_aligned) (h, bucket_size);
Dave Barachffb14b92018-09-11 17:20:23 -0400236 h->sh->buckets_as_u64 = (u64) BV (clib_bihash_get_offset) (h, h->buckets);
Dave Barach9466c452018-08-24 17:21:14 -0400237
238 h->alloc_lock = BV (alloc_aligned) (h, CLIB_CACHE_LINE_BYTES);
239 h->alloc_lock[0] = 0;
240
Dave Barachffb14b92018-09-11 17:20:23 -0400241 h->sh->alloc_lock_as_u64 =
242 (u64) BV (clib_bihash_get_offset) (h, (void *) h->alloc_lock);
243 freelist_vh =
244 BV (alloc_aligned) (h,
245 sizeof (vec_header_t) +
246 BIHASH_FREELIST_LENGTH * sizeof (u64));
Dave Barach9466c452018-08-24 17:21:14 -0400247 freelist_vh->len = BIHASH_FREELIST_LENGTH;
Dave Barachffb14b92018-09-11 17:20:23 -0400248 h->sh->freelists_as_u64 =
249 (u64) BV (clib_bihash_get_offset) (h, freelist_vh->vector_data);
250 h->freelists = (void *) (freelist_vh->vector_data);
Dave Barach9466c452018-08-24 17:21:14 -0400251
252 h->fmt_fn = NULL;
Vijayabhaskar Katamreddyf0bae642020-01-15 13:45:19 -0800253 h->instantiated = 1;
Dave Barach9466c452018-08-24 17:21:14 -0400254}
255
256void BV (clib_bihash_slave_init_svm)
257 (BVT (clib_bihash) * h, char *name, int fd)
258{
259 u8 *mmap_addr;
Dave Barachffb14b92018-09-11 17:20:23 -0400260 u64 memory_size;
Dave Barach9466c452018-08-24 17:21:14 -0400261 BVT (clib_bihash_shared_header) * sh;
262
Dave Barachffb14b92018-09-11 17:20:23 -0400263 /* Trial mapping, to learn the segment size */
Dave Barach9466c452018-08-24 17:21:14 -0400264 mmap_addr = mmap (0, 4096, PROT_READ, MAP_SHARED, fd, 0 /* offset */ );
265 if (mmap_addr == MAP_FAILED)
266 {
267 clib_unix_warning ("trial mmap failed");
268 ASSERT (0);
269 }
270
271 sh = (BVT (clib_bihash_shared_header) *) mmap_addr;
272
Dave Barach9466c452018-08-24 17:21:14 -0400273 memory_size = sh->alloc_arena_size;
274
275 munmap (mmap_addr, 4096);
276
Dave Barachffb14b92018-09-11 17:20:23 -0400277 /* Actual mapping, at the required size */
278 mmap_addr = mmap (0, memory_size,
279 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
Dave Barach9466c452018-08-24 17:21:14 -0400280
281 if (mmap_addr == MAP_FAILED)
282 {
283 clib_unix_warning ("mmap failed");
284 ASSERT (0);
285 }
286
287 (void) close (fd);
288
289 h->sh = (void *) mmap_addr;
Dave Barachffb14b92018-09-11 17:20:23 -0400290 alloc_arena (h) = (u64) (uword) mmap_addr;
Dave Barach9466c452018-08-24 17:21:14 -0400291 h->memfd = -1;
292
293 h->name = (u8 *) name;
Dave Barachffb14b92018-09-11 17:20:23 -0400294 h->buckets = BV (clib_bihash_get_value) (h, h->sh->buckets_as_u64);
Dave Barach9466c452018-08-24 17:21:14 -0400295 h->nbuckets = h->sh->nbuckets;
296 h->log2_nbuckets = max_log2 (h->nbuckets);
297
Dave Barachffb14b92018-09-11 17:20:23 -0400298 h->alloc_lock = BV (clib_bihash_get_value) (h, h->sh->alloc_lock_as_u64);
299 h->freelists = BV (clib_bihash_get_value) (h, h->sh->freelists_as_u64);
Dave Barach9466c452018-08-24 17:21:14 -0400300 h->fmt_fn = NULL;
301}
302#endif /* BIHASH_32_64_SVM */
303
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -0800304void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h,
305 format_function_t * fmt_fn)
306{
307 h->fmt_fn = fmt_fn;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308}
309
Dave Barachc3799992016-08-15 11:12:27 -0400310void BV (clib_bihash_free) (BVT (clib_bihash) * h)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700311{
Dave Barach32dcd3b2019-07-08 12:25:38 -0400312 int i;
313
Dave Barach67d09e02019-08-01 08:15:01 -0400314 if (PREDICT_FALSE (h->instantiated == 0))
Dave Barach32dcd3b2019-07-08 12:25:38 -0400315 goto never_initialized;
316
Dave Barach67d09e02019-08-01 08:15:01 -0400317 h->instantiated = 0;
Dave Barach97f5af02018-02-22 09:48:45 -0500318 vec_free (h->working_copies);
Vijayabhaskar Katamreddy72739a62019-05-07 13:27:32 -0700319 vec_free (h->working_copy_lengths);
Dave Barach9466c452018-08-24 17:21:14 -0400320#if BIHASH_32_64_SVM == 0
Dave Barach97f5af02018-02-22 09:48:45 -0500321 vec_free (h->freelists);
Dave Barach9466c452018-08-24 17:21:14 -0400322#else
323 if (h->memfd > 0)
324 (void) close (h->memfd);
325#endif
326 clib_mem_vm_free ((void *) (uword) (alloc_arena (h)), alloc_arena_size (h));
Dave Barach32dcd3b2019-07-08 12:25:38 -0400327never_initialized:
Dave Barachb7b92992018-10-17 10:38:51 -0400328 clib_memset (h, 0, sizeof (*h));
Dave Barach32dcd3b2019-07-08 12:25:38 -0400329 for (i = 0; i < vec_len (clib_all_bihashes); i++)
330 {
331 if ((void *) h == clib_all_bihashes[i])
332 {
333 vec_delete (clib_all_bihashes, 1, i);
334 return;
335 }
336 }
337 clib_warning ("Couldn't find hash table %llx on clib_all_bihashes...",
Vijayabhaskar Katamreddyf0bae642020-01-15 13:45:19 -0800338 (u64) (uword) h);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700339}
340
Dave Barachc3799992016-08-15 11:12:27 -0400341static
342BVT (clib_bihash_value) *
343BV (value_alloc) (BVT (clib_bihash) * h, u32 log2_pages)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700344{
Dave Barachc3799992016-08-15 11:12:27 -0400345 BVT (clib_bihash_value) * rv = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700346
Dave Barach508498f2018-07-19 12:11:16 -0400347 ASSERT (h->alloc_lock[0]);
Dave Barach9466c452018-08-24 17:21:14 -0400348
349#if BIHASH_32_64_SVM
350 ASSERT (log2_pages < vec_len (h->freelists));
351#endif
352
Dave Barachc3799992016-08-15 11:12:27 -0400353 if (log2_pages >= vec_len (h->freelists) || h->freelists[log2_pages] == 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700354 {
Dave Barach97f5af02018-02-22 09:48:45 -0500355 vec_validate_init_empty (h->freelists, log2_pages, 0);
356 rv = BV (alloc_aligned) (h, (sizeof (*rv) * (1 << log2_pages)));
Dave Barachc3799992016-08-15 11:12:27 -0400357 goto initialize;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700358 }
Dave Barachffb14b92018-09-11 17:20:23 -0400359 rv = BV (clib_bihash_get_value) (h, (uword) h->freelists[log2_pages]);
Dave Barach9466c452018-08-24 17:21:14 -0400360 h->freelists[log2_pages] = rv->next_free_as_u64;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700361
Dave Barachc3799992016-08-15 11:12:27 -0400362initialize:
363 ASSERT (rv);
Dave Barachc3799992016-08-15 11:12:27 -0400364 /*
365 * Latest gcc complains that the length arg is zero
366 * if we replace (1<<log2_pages) with vec_len(rv).
367 * No clue.
368 */
Dave Barachb7b92992018-10-17 10:38:51 -0400369 clib_memset (rv, 0xff, sizeof (*rv) * (1 << log2_pages));
Dave Barachc3799992016-08-15 11:12:27 -0400370 return rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700371}
372
373static void
Dave Barachba7ddfe2017-05-17 20:20:50 -0400374BV (value_free) (BVT (clib_bihash) * h, BVT (clib_bihash_value) * v,
375 u32 log2_pages)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700376{
Dave Barach508498f2018-07-19 12:11:16 -0400377 ASSERT (h->alloc_lock[0]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700378
Dave Barachc3799992016-08-15 11:12:27 -0400379 ASSERT (vec_len (h->freelists) > log2_pages);
380
Dave Barach508498f2018-07-19 12:11:16 -0400381 if (CLIB_DEBUG > 0)
Dave Barachb7b92992018-10-17 10:38:51 -0400382 clib_memset (v, 0xFE, sizeof (*v) * (1 << log2_pages));
Dave Barach508498f2018-07-19 12:11:16 -0400383
Dave Barach9466c452018-08-24 17:21:14 -0400384 v->next_free_as_u64 = (u64) h->freelists[log2_pages];
Dave Barachffb14b92018-09-11 17:20:23 -0400385 h->freelists[log2_pages] = (u64) BV (clib_bihash_get_offset) (h, v);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700386}
387
388static inline void
Dave Barach908a5ea2017-07-14 12:42:21 -0400389BV (make_working_copy) (BVT (clib_bihash) * h, BVT (clib_bihash_bucket) * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700390{
Dave Barachc3799992016-08-15 11:12:27 -0400391 BVT (clib_bihash_value) * v;
Dave Barach908a5ea2017-07-14 12:42:21 -0400392 BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
Dave Barachc3799992016-08-15 11:12:27 -0400393 BVT (clib_bihash_value) * working_copy;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200394 u32 thread_index = os_get_thread_index ();
Dave Barachba7ddfe2017-05-17 20:20:50 -0400395 int log2_working_copy_length;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700396
Dave Barach508498f2018-07-19 12:11:16 -0400397 ASSERT (h->alloc_lock[0]);
398
Damjan Marionf55f9b82017-05-10 21:06:28 +0200399 if (thread_index >= vec_len (h->working_copies))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700400 {
Damjan Marionf55f9b82017-05-10 21:06:28 +0200401 vec_validate (h->working_copies, thread_index);
Steve Shin871cdec2017-06-02 10:09:02 -0700402 vec_validate_init_empty (h->working_copy_lengths, thread_index, ~0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700403 }
404
Dave Barachc3799992016-08-15 11:12:27 -0400405 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700406 * working_copies are per-cpu so that near-simultaneous
407 * updates from multiple threads will not result in sporadic, spurious
Dave Barachc3799992016-08-15 11:12:27 -0400408 * lookup failures.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700409 */
Damjan Marionf55f9b82017-05-10 21:06:28 +0200410 working_copy = h->working_copies[thread_index];
Dave Barachba7ddfe2017-05-17 20:20:50 -0400411 log2_working_copy_length = h->working_copy_lengths[thread_index];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700412
413 h->saved_bucket.as_u64 = b->as_u64;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700414
Dave Barachba7ddfe2017-05-17 20:20:50 -0400415 if (b->log2_pages > log2_working_copy_length)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700416 {
Dave Barach97f5af02018-02-22 09:48:45 -0500417 /*
418 * It's not worth the bookkeeping to free working copies
419 * if (working_copy)
420 * clib_mem_free (working_copy);
421 */
422 working_copy = BV (alloc_aligned)
423 (h, sizeof (working_copy[0]) * (1 << b->log2_pages));
Dave Barachba7ddfe2017-05-17 20:20:50 -0400424 h->working_copy_lengths[thread_index] = b->log2_pages;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200425 h->working_copies[thread_index] = working_copy;
Dave Barach2ce28d62019-05-03 12:58:01 -0400426
427 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_working_copy_lost,
428 1ULL << b->log2_pages);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700429 }
430
Dave Barachc3799992016-08-15 11:12:27 -0400431 v = BV (clib_bihash_get_value) (h, b->offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700432
Dave Barach178cf492018-11-13 16:34:13 -0500433 clib_memcpy_fast (working_copy, v, sizeof (*v) * (1 << b->log2_pages));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700434 working_bucket.as_u64 = b->as_u64;
Dave Barachc3799992016-08-15 11:12:27 -0400435 working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy);
Damjan Marion801ec2a2020-04-21 19:42:30 +0200436 CLIB_MEMORY_STORE_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700437 b->as_u64 = working_bucket.as_u64;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200438 h->working_copies[thread_index] = working_copy;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700439}
440
Dave Barachc3799992016-08-15 11:12:27 -0400441static
442BVT (clib_bihash_value) *
443BV (split_and_rehash)
444 (BVT (clib_bihash) * h,
Dave Barachba7ddfe2017-05-17 20:20:50 -0400445 BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
446 u32 new_log2_pages)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700447{
Dave Barach5e6b9582016-12-12 15:37:29 -0500448 BVT (clib_bihash_value) * new_values, *new_v;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400449 int i, j, length_in_kvs;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700450
Dave Barach508498f2018-07-19 12:11:16 -0400451 ASSERT (h->alloc_lock[0]);
452
Dave Barachc3799992016-08-15 11:12:27 -0400453 new_values = BV (value_alloc) (h, new_log2_pages);
Dave Barachba7ddfe2017-05-17 20:20:50 -0400454 length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700455
Dave Barachba7ddfe2017-05-17 20:20:50 -0400456 for (i = 0; i < length_in_kvs; i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700457 {
458 u64 new_hash;
Dave Barachc3799992016-08-15 11:12:27 -0400459
Dave Barach5e6b9582016-12-12 15:37:29 -0500460 /* Entry not in use? Forget it */
461 if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
462 continue;
463
464 /* rehash the item onto its new home-page */
465 new_hash = BV (clib_bihash_hash) (&(old_values->kvp[i]));
Damjan Marion68e5fd52020-04-23 13:41:47 +0200466 new_hash = extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500467 new_v = &new_values[new_hash];
468
469 /* Across the new home-page */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700470 for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
Dave Barachc3799992016-08-15 11:12:27 -0400471 {
Dave Barach5e6b9582016-12-12 15:37:29 -0500472 /* Empty slot */
473 if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
Dave Barachc3799992016-08-15 11:12:27 -0400474 {
Dave Barach178cf492018-11-13 16:34:13 -0500475 clib_memcpy_fast (&(new_v->kvp[j]), &(old_values->kvp[i]),
476 sizeof (new_v->kvp[j]));
Dave Barach5e6b9582016-12-12 15:37:29 -0500477 goto doublebreak;
Dave Barachc3799992016-08-15 11:12:27 -0400478 }
Dave Barachc3799992016-08-15 11:12:27 -0400479 }
Dave Barach5e6b9582016-12-12 15:37:29 -0500480 /* Crap. Tell caller to try again */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400481 BV (value_free) (h, new_values, new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500482 return 0;
483 doublebreak:;
484 }
Dave Barachba7ddfe2017-05-17 20:20:50 -0400485
Dave Barach5e6b9582016-12-12 15:37:29 -0500486 return new_values;
487}
488
489static
490BVT (clib_bihash_value) *
491BV (split_and_rehash_linear)
492 (BVT (clib_bihash) * h,
Dave Barachba7ddfe2017-05-17 20:20:50 -0400493 BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
494 u32 new_log2_pages)
Dave Barach5e6b9582016-12-12 15:37:29 -0500495{
496 BVT (clib_bihash_value) * new_values;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400497 int i, j, new_length, old_length;
Dave Barach5e6b9582016-12-12 15:37:29 -0500498
Dave Barach508498f2018-07-19 12:11:16 -0400499 ASSERT (h->alloc_lock[0]);
500
Dave Barach5e6b9582016-12-12 15:37:29 -0500501 new_values = BV (value_alloc) (h, new_log2_pages);
502 new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400503 old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
Dave Barach5e6b9582016-12-12 15:37:29 -0500504
505 j = 0;
506 /* Across the old value array */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400507 for (i = 0; i < old_length; i++)
Dave Barach5e6b9582016-12-12 15:37:29 -0500508 {
509 /* Find a free slot in the new linear scan bucket */
510 for (; j < new_length; j++)
511 {
Dave Barach8f544962017-01-18 10:23:22 -0500512 /* Old value not in use? Forget it. */
Dave Barach5e6b9582016-12-12 15:37:29 -0500513 if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
514 goto doublebreak;
515
516 /* New value should never be in use */
517 if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
518 {
519 /* Copy the old value and move along */
Dave Barach178cf492018-11-13 16:34:13 -0500520 clib_memcpy_fast (&(new_values->kvp[j]), &(old_values->kvp[i]),
521 sizeof (new_values->kvp[j]));
Dave Barach5e6b9582016-12-12 15:37:29 -0500522 j++;
523 goto doublebreak;
524 }
Dave Barach5e6b9582016-12-12 15:37:29 -0500525 }
Dave Barach8f544962017-01-18 10:23:22 -0500526 /* This should never happen... */
527 clib_warning ("BUG: linear rehash failed!");
Dave Barachba7ddfe2017-05-17 20:20:50 -0400528 BV (value_free) (h, new_values, new_log2_pages);
Dave Barach8f544962017-01-18 10:23:22 -0500529 return 0;
530
Dave Barach5e6b9582016-12-12 15:37:29 -0500531 doublebreak:;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700532 }
533 return new_values;
534}
535
Damjan Marion801ec2a2020-04-21 19:42:30 +0200536static_always_inline int BV (clib_bihash_add_del_inline_with_hash)
537 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, u64 hash, int is_add,
Matus Fabian828d27e2018-08-21 03:15:50 -0700538 int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700539{
Dave Barach908a5ea2017-07-14 12:42:21 -0400540 BVT (clib_bihash_bucket) * b, tmp_b;
Dave Barachc3799992016-08-15 11:12:27 -0400541 BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy;
Dave Barach5e6b9582016-12-12 15:37:29 -0500542 int i, limit;
Damjan Marion801ec2a2020-04-21 19:42:30 +0200543 u64 new_hash;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400544 u32 new_log2_pages, old_log2_pages;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200545 u32 thread_index = os_get_thread_index ();
Dave Barach5e6b9582016-12-12 15:37:29 -0500546 int mark_bucket_linear;
547 int resplit_once;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700548
Damjan Marion68e5fd52020-04-23 13:41:47 +0200549 /* *INDENT-OFF* */
550 static const BVT (clib_bihash_bucket) mask = {
551 .linear_search = 1,
552 .log2_pages = -1
553 };
554 /* *INDENT-ON* */
555
556#if BIHASH_LAZY_INSTANTIATE
Dave Barach67d09e02019-08-01 08:15:01 -0400557 /*
558 * Create the table (is_add=1,2), or flunk the request now (is_add=0)
559 * Use the alloc_lock to protect the instantiate operation.
560 */
561 if (PREDICT_FALSE (h->instantiated == 0))
Dave Barach32dcd3b2019-07-08 12:25:38 -0400562 {
563 if (is_add == 0)
564 return (-1);
Dave Barach67d09e02019-08-01 08:15:01 -0400565
566 BV (clib_bihash_alloc_lock) (h);
567 if (h->instantiated == 0)
568 BV (clib_bihash_instantiate) (h);
569 BV (clib_bihash_alloc_unlock) (h);
Dave Barach32dcd3b2019-07-08 12:25:38 -0400570 }
Damjan Marion68e5fd52020-04-23 13:41:47 +0200571#endif
Dave Barach32dcd3b2019-07-08 12:25:38 -0400572
Damjan Marion4e149772020-03-27 16:57:28 +0100573 b = BV (clib_bihash_get_bucket) (h, hash);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700574
Dave Barach508498f2018-07-19 12:11:16 -0400575 BV (clib_bihash_lock_bucket) (b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700576
577 /* First elt in the bucket? */
Dave Barach16e4a4a2020-04-16 12:00:14 -0400578 if (BIHASH_KVP_AT_BUCKET_LEVEL == 0 && BV (clib_bihash_bucket_is_empty) (b))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700579 {
580 if (is_add == 0)
Dave Barachc3799992016-08-15 11:12:27 -0400581 {
Dave Barach508498f2018-07-19 12:11:16 -0400582 BV (clib_bihash_unlock_bucket) (b);
583 return (-1);
Dave Barachc3799992016-08-15 11:12:27 -0400584 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700585
Dave Barach508498f2018-07-19 12:11:16 -0400586 BV (clib_bihash_alloc_lock) (h);
Dave Barachc3799992016-08-15 11:12:27 -0400587 v = BV (value_alloc) (h, 0);
Dave Barach508498f2018-07-19 12:11:16 -0400588 BV (clib_bihash_alloc_unlock) (h);
Dave Barachba7ddfe2017-05-17 20:20:50 -0400589
Dave Barachc3799992016-08-15 11:12:27 -0400590 *v->kvp = *add_v;
Dave Barach508498f2018-07-19 12:11:16 -0400591 tmp_b.as_u64 = 0; /* clears bucket lock */
Dave Barachc3799992016-08-15 11:12:27 -0400592 tmp_b.offset = BV (clib_bihash_get_offset) (h, v);
Dave Barache7d212f2018-02-07 13:14:06 -0500593 tmp_b.refcnt = 1;
Damjan Marion801ec2a2020-04-21 19:42:30 +0200594 CLIB_MEMORY_STORE_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700595
Tom Seidenberg97f8ae92019-03-15 10:15:26 -0400596 b->as_u64 = tmp_b.as_u64; /* unlocks the bucket */
Dave Barach2ce28d62019-05-03 12:58:01 -0400597 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_alloc_add, 1);
598
Dave Barach508498f2018-07-19 12:11:16 -0400599 return (0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700600 }
601
Dave Barach508498f2018-07-19 12:11:16 -0400602 /* WARNING: we're still looking at the live copy... */
Dave Barach5e6b9582016-12-12 15:37:29 -0500603 limit = BIHASH_KVP_PER_PAGE;
Dave Barach508498f2018-07-19 12:11:16 -0400604 v = BV (clib_bihash_get_value) (h, b->offset);
605
Damjan Marion68e5fd52020-04-23 13:41:47 +0200606 if (PREDICT_FALSE (b->as_u64 & mask.as_u64))
607 {
608 if (PREDICT_FALSE (b->linear_search))
609 limit <<= b->log2_pages;
610 else
611 v += extract_bits (hash, h->log2_nbuckets, b->log2_pages);
612 }
Dave Barachc3799992016-08-15 11:12:27 -0400613
Ed Warnickecb9cada2015-12-08 15:45:58 -0700614 if (is_add)
615 {
Dave Barachc3799992016-08-15 11:12:27 -0400616 /*
Dave Barach508498f2018-07-19 12:11:16 -0400617 * Because reader threads are looking at live data,
618 * we have to be extra careful. Readers do NOT hold the
619 * bucket lock. We need to be SLOWER than a search, past the
620 * point where readers CHECK the bucket lock.
621 */
622
623 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700624 * For obvious (in hindsight) reasons, see if we're supposed to
625 * replace an existing key, then look for an empty slot.
626 */
Dave Barach5e6b9582016-12-12 15:37:29 -0500627 for (i = 0; i < limit; i++)
Dave Barachc3799992016-08-15 11:12:27 -0400628 {
Dave Baracha11bf452019-04-17 17:27:31 -0400629 if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
Dave Barachc3799992016-08-15 11:12:27 -0400630 {
Dave Barach9e4946b2019-07-08 14:47:44 -0400631 /* Add but do not overwrite? */
632 if (is_add == 2)
633 {
634 BV (clib_bihash_unlock_bucket) (b);
635 return (-2);
636 }
637
Damjan Marion801ec2a2020-04-21 19:42:30 +0200638 clib_memcpy_fast (&(v->kvp[i].value),
639 &add_v->value, sizeof (add_v->value));
Dave Barach508498f2018-07-19 12:11:16 -0400640 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400641 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
Dave Barach508498f2018-07-19 12:11:16 -0400642 return (0);
Dave Barachc3799992016-08-15 11:12:27 -0400643 }
644 }
Dave Barach508498f2018-07-19 12:11:16 -0400645 /*
646 * Look for an empty slot. If found, use it
647 */
Dave Barach5e6b9582016-12-12 15:37:29 -0500648 for (i = 0; i < limit; i++)
Dave Barachc3799992016-08-15 11:12:27 -0400649 {
650 if (BV (clib_bihash_is_free) (&(v->kvp[i])))
651 {
Dave Barach508498f2018-07-19 12:11:16 -0400652 /*
653 * Copy the value first, so that if a reader manages
654 * to match the new key, the value will be right...
655 */
Dave Barach178cf492018-11-13 16:34:13 -0500656 clib_memcpy_fast (&(v->kvp[i].value),
657 &add_v->value, sizeof (add_v->value));
Damjan Marion801ec2a2020-04-21 19:42:30 +0200658 CLIB_MEMORY_STORE_BARRIER (); /* Make sure the value has settled */
Dave Barach178cf492018-11-13 16:34:13 -0500659 clib_memcpy_fast (&(v->kvp[i]), &add_v->key,
660 sizeof (add_v->key));
Dave Barache7d212f2018-02-07 13:14:06 -0500661 b->refcnt++;
Dave Barach9466c452018-08-24 17:21:14 -0400662 ASSERT (b->refcnt > 0);
Dave Barach508498f2018-07-19 12:11:16 -0400663 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400664 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_add, 1);
Dave Barach508498f2018-07-19 12:11:16 -0400665 return (0);
Dave Barachc3799992016-08-15 11:12:27 -0400666 }
667 }
Matus Fabian828d27e2018-08-21 03:15:50 -0700668 /* look for stale data to overwrite */
669 if (is_stale_cb)
670 {
671 for (i = 0; i < limit; i++)
672 {
673 if (is_stale_cb (&(v->kvp[i]), arg))
674 {
Dave Barach178cf492018-11-13 16:34:13 -0500675 clib_memcpy_fast (&(v->kvp[i]), add_v, sizeof (*add_v));
Damjan Marion801ec2a2020-04-21 19:42:30 +0200676 CLIB_MEMORY_STORE_BARRIER ();
Matus Fabian828d27e2018-08-21 03:15:50 -0700677 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400678 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
Matus Fabian828d27e2018-08-21 03:15:50 -0700679 return (0);
680 }
681 }
682 }
Dave Barach508498f2018-07-19 12:11:16 -0400683 /* Out of space in this bucket, split the bucket... */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700684 }
Dave Barach508498f2018-07-19 12:11:16 -0400685 else /* delete case */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700686 {
Dave Barach5e6b9582016-12-12 15:37:29 -0500687 for (i = 0; i < limit; i++)
Dave Barachc3799992016-08-15 11:12:27 -0400688 {
Dave Barach508498f2018-07-19 12:11:16 -0400689 /* Found the key? Kill it... */
Dave Baracha11bf452019-04-17 17:27:31 -0400690 if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
Dave Barachc3799992016-08-15 11:12:27 -0400691 {
Damjan Marion801ec2a2020-04-21 19:42:30 +0200692 clib_memset_u8 (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
Dave Barach508498f2018-07-19 12:11:16 -0400693 /* Is the bucket empty? */
694 if (PREDICT_TRUE (b->refcnt > 1))
Dave Barache7d212f2018-02-07 13:14:06 -0500695 {
Dave Barach508498f2018-07-19 12:11:16 -0400696 b->refcnt--;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400697 /* Switch back to the bucket-level kvp array? */
698 if (BIHASH_KVP_AT_BUCKET_LEVEL && b->refcnt == 1
699 && b->log2_pages > 0)
700 {
701 tmp_b.as_u64 = b->as_u64;
702 b->offset = BV (clib_bihash_get_offset)
703 (h, (void *) (b + 1));
704 b->linear_search = 0;
705 b->log2_pages = 0;
706 /* Clean up the bucket-level kvp array */
Damjan Marion801ec2a2020-04-21 19:42:30 +0200707 clib_memset_u8 ((b + 1), 0xff, BIHASH_KVP_PER_PAGE *
708 sizeof (BVT (clib_bihash_kv)));
709 CLIB_MEMORY_STORE_BARRIER ();
Dave Barach16e4a4a2020-04-16 12:00:14 -0400710 BV (clib_bihash_unlock_bucket) (b);
711 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
712 goto free_backing_store;
713 }
714
Damjan Marion801ec2a2020-04-21 19:42:30 +0200715 CLIB_MEMORY_STORE_BARRIER ();
Dave Barach508498f2018-07-19 12:11:16 -0400716 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400717 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
Dave Barach508498f2018-07-19 12:11:16 -0400718 return (0);
Dave Barache7d212f2018-02-07 13:14:06 -0500719 }
Dave Barach508498f2018-07-19 12:11:16 -0400720 else /* yes, free it */
Dave Barache7d212f2018-02-07 13:14:06 -0500721 {
Dave Barach508498f2018-07-19 12:11:16 -0400722 /* Save old bucket value, need log2_pages to free it */
723 tmp_b.as_u64 = b->as_u64;
Dave Barach508498f2018-07-19 12:11:16 -0400724
725 /* Kill and unlock the bucket */
726 b->as_u64 = 0;
727
Dave Barach16e4a4a2020-04-16 12:00:14 -0400728 free_backing_store:
Dave Barach508498f2018-07-19 12:11:16 -0400729 /* And free the backing storage */
730 BV (clib_bihash_alloc_lock) (h);
731 /* Note: v currently points into the middle of the bucket */
732 v = BV (clib_bihash_get_value) (h, tmp_b.offset);
733 BV (value_free) (h, v, tmp_b.log2_pages);
734 BV (clib_bihash_alloc_unlock) (h);
Dave Barach2ce28d62019-05-03 12:58:01 -0400735 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del_free,
736 1);
Dave Barach508498f2018-07-19 12:11:16 -0400737 return (0);
Dave Barache7d212f2018-02-07 13:14:06 -0500738 }
Dave Barachc3799992016-08-15 11:12:27 -0400739 }
740 }
Dave Barach508498f2018-07-19 12:11:16 -0400741 /* Not found... */
742 BV (clib_bihash_unlock_bucket) (b);
743 return (-3);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700744 }
745
Dave Barach508498f2018-07-19 12:11:16 -0400746 /* Move readers to a (locked) temp copy of the bucket */
747 BV (clib_bihash_alloc_lock) (h);
748 BV (make_working_copy) (h, b);
749
750 v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
751
Dave Barachba7ddfe2017-05-17 20:20:50 -0400752 old_log2_pages = h->saved_bucket.log2_pages;
753 new_log2_pages = old_log2_pages + 1;
Dave Barach5e6b9582016-12-12 15:37:29 -0500754 mark_bucket_linear = 0;
Dave Barach2ce28d62019-05-03 12:58:01 -0400755 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_split_add, 1);
756 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, old_log2_pages);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700757
Damjan Marionf55f9b82017-05-10 21:06:28 +0200758 working_copy = h->working_copies[thread_index];
Dave Barach5e6b9582016-12-12 15:37:29 -0500759 resplit_once = 0;
Dave Barach2ce28d62019-05-03 12:58:01 -0400760 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, 1);
Dave Barach5e6b9582016-12-12 15:37:29 -0500761
Dave Barachba7ddfe2017-05-17 20:20:50 -0400762 new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
763 new_log2_pages);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700764 if (new_v == 0)
765 {
Dave Barach5e6b9582016-12-12 15:37:29 -0500766 try_resplit:
767 resplit_once = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700768 new_log2_pages++;
Dave Barach5e6b9582016-12-12 15:37:29 -0500769 /* Try re-splitting. If that fails, fall back to linear search */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400770 new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
771 new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500772 if (new_v == 0)
773 {
774 mark_linear:
775 new_log2_pages--;
776 /* pinned collisions, use linear search */
777 new_v =
Dave Barachba7ddfe2017-05-17 20:20:50 -0400778 BV (split_and_rehash_linear) (h, working_copy, old_log2_pages,
779 new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500780 mark_bucket_linear = 1;
Dave Barach2ce28d62019-05-03 12:58:01 -0400781 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_linear, 1);
Dave Barach5e6b9582016-12-12 15:37:29 -0500782 }
Dave Barach2ce28d62019-05-03 12:58:01 -0400783 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_resplit, 1);
784 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits,
785 old_log2_pages + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700786 }
787
788 /* Try to add the new entry */
789 save_new_v = new_v;
Dave Barachc3799992016-08-15 11:12:27 -0400790 new_hash = BV (clib_bihash_hash) (add_v);
Dave Barach5e6b9582016-12-12 15:37:29 -0500791 limit = BIHASH_KVP_PER_PAGE;
792 if (mark_bucket_linear)
793 limit <<= new_log2_pages;
Damjan Marion68e5fd52020-04-23 13:41:47 +0200794 else
795 new_v += extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
Dave Barachc3799992016-08-15 11:12:27 -0400796
Dave Barach5e6b9582016-12-12 15:37:29 -0500797 for (i = 0; i < limit; i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700798 {
Dave Barachc3799992016-08-15 11:12:27 -0400799 if (BV (clib_bihash_is_free) (&(new_v->kvp[i])))
800 {
Dave Barach178cf492018-11-13 16:34:13 -0500801 clib_memcpy_fast (&(new_v->kvp[i]), add_v, sizeof (*add_v));
Dave Barachc3799992016-08-15 11:12:27 -0400802 goto expand_ok;
803 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700804 }
Dave Barachba7ddfe2017-05-17 20:20:50 -0400805
Ed Warnickecb9cada2015-12-08 15:45:58 -0700806 /* Crap. Try again */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400807 BV (value_free) (h, save_new_v, new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500808 /*
809 * If we've already doubled the size of the bucket once,
810 * fall back to linear search now.
811 */
812 if (resplit_once)
813 goto mark_linear;
814 else
815 goto try_resplit;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700816
Dave Barachc3799992016-08-15 11:12:27 -0400817expand_ok:
Dave Barach5e6b9582016-12-12 15:37:29 -0500818 tmp_b.log2_pages = new_log2_pages;
Dave Barachc3799992016-08-15 11:12:27 -0400819 tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
Dave Barach5e6b9582016-12-12 15:37:29 -0500820 tmp_b.linear_search = mark_bucket_linear;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400821#if BIHASH_KVP_AT_BUCKET_LEVEL
822 /* Compensate for permanent refcount bump at the bucket level */
823 if (new_log2_pages > 0)
824#endif
825 tmp_b.refcnt = h->saved_bucket.refcnt + 1;
Dave Barach9466c452018-08-24 17:21:14 -0400826 ASSERT (tmp_b.refcnt > 0);
Dave Barach508498f2018-07-19 12:11:16 -0400827 tmp_b.lock = 0;
Damjan Marion801ec2a2020-04-21 19:42:30 +0200828 CLIB_MEMORY_STORE_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700829 b->as_u64 = tmp_b.as_u64;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400830
831#if BIHASH_KVP_AT_BUCKET_LEVEL
832 if (h->saved_bucket.log2_pages > 0)
833 {
834#endif
835
836 /* free the old bucket, except at the bucket level if so configured */
837 v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
838 BV (value_free) (h, v, h->saved_bucket.log2_pages);
839
840#if BIHASH_KVP_AT_BUCKET_LEVEL
841 }
842#endif
843
844
Dave Barach508498f2018-07-19 12:11:16 -0400845 BV (clib_bihash_alloc_unlock) (h);
846 return (0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700847}
848
Damjan Marion801ec2a2020-04-21 19:42:30 +0200849static_always_inline int BV (clib_bihash_add_del_inline)
850 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add,
851 int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
852{
853 u64 hash = BV (clib_bihash_hash) (add_v);
854 return BV (clib_bihash_add_del_inline_with_hash) (h, add_v, hash, is_add,
855 is_stale_cb, arg);
856}
857
Matus Fabian828d27e2018-08-21 03:15:50 -0700858int BV (clib_bihash_add_del)
859 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add)
860{
861 return BV (clib_bihash_add_del_inline) (h, add_v, is_add, 0, 0);
862}
863
864int BV (clib_bihash_add_or_overwrite_stale)
865 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v,
866 int (*stale_callback) (BVT (clib_bihash_kv) *, void *), void *arg)
867{
868 return BV (clib_bihash_add_del_inline) (h, add_v, 1, stale_callback, arg);
869}
870
Dave Barachc3799992016-08-15 11:12:27 -0400871int BV (clib_bihash_search)
Dave Barach908a5ea2017-07-14 12:42:21 -0400872 (BVT (clib_bihash) * h,
Dave Barachc3799992016-08-15 11:12:27 -0400873 BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700874{
Damjan Marion68e5fd52020-04-23 13:41:47 +0200875 return BV (clib_bihash_search_inline_2) (h, search_key, valuep);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700876}
877
Dave Barachc3799992016-08-15 11:12:27 -0400878u8 *BV (format_bihash) (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700879{
Dave Barachc3799992016-08-15 11:12:27 -0400880 BVT (clib_bihash) * h = va_arg (*args, BVT (clib_bihash) *);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700881 int verbose = va_arg (*args, int);
Dave Barach908a5ea2017-07-14 12:42:21 -0400882 BVT (clib_bihash_bucket) * b;
Dave Barachc3799992016-08-15 11:12:27 -0400883 BVT (clib_bihash_value) * v;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700884 int i, j, k;
885 u64 active_elements = 0;
Dave Barache7d212f2018-02-07 13:14:06 -0500886 u64 active_buckets = 0;
887 u64 linear_buckets = 0;
Dave Barach97f5af02018-02-22 09:48:45 -0500888 u64 used_bytes;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700889
890 s = format (s, "Hash table %s\n", h->name ? h->name : (u8 *) "(unnamed)");
Dave Barachc3799992016-08-15 11:12:27 -0400891
Dave Barach16e4a4a2020-04-16 12:00:14 -0400892#if BIHASH_LAZY_INSTANTIATE
Dave Barach32dcd3b2019-07-08 12:25:38 -0400893 if (PREDICT_FALSE (alloc_arena (h) == 0))
894 return format (s, "[empty, uninitialized]");
Dave Barach16e4a4a2020-04-16 12:00:14 -0400895#endif
Dave Barach32dcd3b2019-07-08 12:25:38 -0400896
Ed Warnickecb9cada2015-12-08 15:45:58 -0700897 for (i = 0; i < h->nbuckets; i++)
898 {
Dave Barach16e4a4a2020-04-16 12:00:14 -0400899 b = BV (clib_bihash_get_bucket) (h, i);
Damjan Marion882fcfe2018-07-17 23:01:49 +0200900 if (BV (clib_bihash_bucket_is_empty) (b))
Dave Barachc3799992016-08-15 11:12:27 -0400901 {
902 if (verbose > 1)
903 s = format (s, "[%d]: empty\n", i);
904 continue;
905 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700906
Dave Barache7d212f2018-02-07 13:14:06 -0500907 active_buckets++;
908
909 if (b->linear_search)
910 linear_buckets++;
911
Ed Warnickecb9cada2015-12-08 15:45:58 -0700912 if (verbose)
Dave Barachc3799992016-08-15 11:12:27 -0400913 {
Dave Barach16e4a4a2020-04-16 12:00:14 -0400914 s = format
915 (s, "[%d]: heap offset %lld, len %d, refcnt %d, linear %d\n", i,
916 b->offset, (1 << b->log2_pages), b->refcnt, b->linear_search);
Dave Barachc3799992016-08-15 11:12:27 -0400917 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700918
Dave Barachc3799992016-08-15 11:12:27 -0400919 v = BV (clib_bihash_get_value) (h, b->offset);
920 for (j = 0; j < (1 << b->log2_pages); j++)
921 {
922 for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
923 {
924 if (BV (clib_bihash_is_free) (&v->kvp[k]))
925 {
926 if (verbose > 1)
927 s = format (s, " %d: empty\n",
928 j * BIHASH_KVP_PER_PAGE + k);
929 continue;
930 }
931 if (verbose)
932 {
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -0800933 if (h->fmt_fn)
934 {
935 s = format (s, " %d: %U\n",
936 j * BIHASH_KVP_PER_PAGE + k,
Vijayabhaskar Katamreddy72739a62019-05-07 13:27:32 -0700937 h->fmt_fn, &(v->kvp[k]), verbose);
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -0800938 }
939 else
940 {
941 s = format (s, " %d: %U\n",
942 j * BIHASH_KVP_PER_PAGE + k,
943 BV (format_bihash_kvp), &(v->kvp[k]));
944 }
Dave Barachc3799992016-08-15 11:12:27 -0400945 }
946 active_elements++;
947 }
948 v++;
949 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700950 }
951
Dave Barache7d212f2018-02-07 13:14:06 -0500952 s = format (s, " %lld active elements %lld active buckets\n",
953 active_elements, active_buckets);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700954 s = format (s, " %d free lists\n", vec_len (h->freelists));
Dave Barache7d212f2018-02-07 13:14:06 -0500955
956 for (i = 0; i < vec_len (h->freelists); i++)
957 {
958 u32 nfree = 0;
959 BVT (clib_bihash_value) * free_elt;
Dave Barachffb14b92018-09-11 17:20:23 -0400960 u64 free_elt_as_u64 = h->freelists[i];
Dave Barache7d212f2018-02-07 13:14:06 -0500961
Dave Barachffb14b92018-09-11 17:20:23 -0400962 while (free_elt_as_u64)
Dave Barache7d212f2018-02-07 13:14:06 -0500963 {
Dave Barachffb14b92018-09-11 17:20:23 -0400964 free_elt = BV (clib_bihash_get_value) (h, free_elt_as_u64);
Dave Barache7d212f2018-02-07 13:14:06 -0500965 nfree++;
Dave Barachffb14b92018-09-11 17:20:23 -0400966 free_elt_as_u64 = free_elt->next_free_as_u64;
Dave Barache7d212f2018-02-07 13:14:06 -0500967 }
968
Dave Barach9466c452018-08-24 17:21:14 -0400969 if (nfree || verbose)
970 s = format (s, " [len %d] %u free elts\n", 1 << i, nfree);
Dave Barache7d212f2018-02-07 13:14:06 -0500971 }
972
973 s = format (s, " %lld linear search buckets\n", linear_buckets);
Dave Barachffb14b92018-09-11 17:20:23 -0400974 used_bytes = alloc_arena_next (h);
Dave Barach97f5af02018-02-22 09:48:45 -0500975 s = format (s,
976 " arena: base %llx, next %llx\n"
977 " used %lld b (%lld Mbytes) of %lld b (%lld Mbytes)\n",
Dave Barach9466c452018-08-24 17:21:14 -0400978 alloc_arena (h), alloc_arena_next (h),
Dave Barach97f5af02018-02-22 09:48:45 -0500979 used_bytes, used_bytes >> 20,
Dave Barach9466c452018-08-24 17:21:14 -0400980 alloc_arena_size (h), alloc_arena_size (h) >> 20);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700981 return s;
982}
983
Dave Barachc3799992016-08-15 11:12:27 -0400984void BV (clib_bihash_foreach_key_value_pair)
Neale Rannsf50bac12019-12-06 05:53:17 +0000985 (BVT (clib_bihash) * h,
986 BV (clib_bihash_foreach_key_value_pair_cb) cb, void *arg)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700987{
988 int i, j, k;
Dave Barach908a5ea2017-07-14 12:42:21 -0400989 BVT (clib_bihash_bucket) * b;
Dave Barachc3799992016-08-15 11:12:27 -0400990 BVT (clib_bihash_value) * v;
Dave Barachc3799992016-08-15 11:12:27 -0400991
Dave Barach16e4a4a2020-04-16 12:00:14 -0400992
993#if BIHASH_LAZY_INSTANTIATE
Dave Barach32dcd3b2019-07-08 12:25:38 -0400994 if (PREDICT_FALSE (alloc_arena (h) == 0))
995 return;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400996#endif
Dave Barach32dcd3b2019-07-08 12:25:38 -0400997
Ed Warnickecb9cada2015-12-08 15:45:58 -0700998 for (i = 0; i < h->nbuckets; i++)
999 {
Dave Barach16e4a4a2020-04-16 12:00:14 -04001000 b = BV (clib_bihash_get_bucket) (h, i);
Damjan Marion882fcfe2018-07-17 23:01:49 +02001001 if (BV (clib_bihash_bucket_is_empty) (b))
Dave Barachc3799992016-08-15 11:12:27 -04001002 continue;
1003
1004 v = BV (clib_bihash_get_value) (h, b->offset);
1005 for (j = 0; j < (1 << b->log2_pages); j++)
1006 {
1007 for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
1008 {
1009 if (BV (clib_bihash_is_free) (&v->kvp[k]))
1010 continue;
1011
Neale Rannsf50bac12019-12-06 05:53:17 +00001012 if (BIHASH_WALK_STOP == cb (&v->kvp[k], arg))
1013 return;
Dave Barachca45ee72018-08-06 08:43:47 -04001014 /*
1015 * In case the callback deletes the last entry in the bucket...
1016 */
1017 if (BV (clib_bihash_bucket_is_empty) (b))
1018 goto doublebreak;
Dave Barachc3799992016-08-15 11:12:27 -04001019 }
1020 v++;
1021 }
Dave Barachca45ee72018-08-06 08:43:47 -04001022 doublebreak:
1023 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001024 }
1025}
Dave Barachdd3a57f2016-07-27 16:58:51 -04001026
Chris Luke16bcf7d2016-09-01 14:31:46 -04001027/** @endcond */
Dave Barachc3799992016-08-15 11:12:27 -04001028
1029/*
1030 * fd.io coding-style-patch-verification: ON
1031 *
1032 * Local Variables:
1033 * eval: (c-set-style "gnu")
1034 * End:
1035 */