blob: e408d157da77c8b90709fbc377238a9973d28e75 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
Chris Luke16bcf7d2016-09-01 14:31:46 -040016/** @cond DOCUMENTATION_IS_IN_BIHASH_DOC_H */
Dave Barachdd3a57f2016-07-27 16:58:51 -040017
Dave Barach16e4a4a2020-04-16 12:00:14 -040018#ifndef MAP_HUGE_SHIFT
19#define MAP_HUGE_SHIFT 26
20#endif
21
Damjan Marion2454de22020-09-26 19:32:34 +020022#ifndef BIIHASH_MIN_ALLOC_LOG2_PAGES
23#define BIIHASH_MIN_ALLOC_LOG2_PAGES 10
24#endif
25
Dave Barach97f5af02018-02-22 09:48:45 -050026static inline void *BV (alloc_aligned) (BVT (clib_bihash) * h, uword nbytes)
27{
28 uword rv;
29
30 /* Round to an even number of cache lines */
Damjan Marion2454de22020-09-26 19:32:34 +020031 nbytes = round_pow2 (nbytes, CLIB_CACHE_LINE_BYTES);
32
33 if (BIHASH_USE_HEAP)
34 {
35 void *rv, *oldheap;
36 uword page_sz = sizeof (BVT (clib_bihash_value));
37 uword chunk_sz = round_pow2 (page_sz << BIIHASH_MIN_ALLOC_LOG2_PAGES,
38 CLIB_CACHE_LINE_BYTES);
39
40 BVT (clib_bihash_alloc_chunk) * chunk = h->chunks;
41
42 /* if there is enough space in the currenrt chunk */
43 if (chunk && chunk->bytes_left >= nbytes)
44 {
45 rv = chunk->next_alloc;
46 chunk->bytes_left -= nbytes;
47 chunk->next_alloc += nbytes;
48 return rv;
49 }
50
51 /* requested allocation is bigger than chunk size */
52 if (nbytes >= chunk_sz)
53 {
54 oldheap = clib_mem_set_heap (h->heap);
55 chunk = clib_mem_alloc_aligned (nbytes + sizeof (*chunk),
56 CLIB_CACHE_LINE_BYTES);
57 clib_mem_set_heap (oldheap);
58 clib_memset_u8 (chunk, 0, sizeof (*chunk));
59 chunk->size = nbytes;
60 rv = (u8 *) (chunk + 1);
61 if (h->chunks)
62 {
63 /* take 2nd place in the list */
64 chunk->next = h->chunks->next;
65 chunk->prev = h->chunks;
66 h->chunks->next = chunk;
67 if (chunk->next)
68 chunk->next->prev = chunk;
69 }
70 else
71 h->chunks = chunk;
72
73 return rv;
74 }
75
76 oldheap = clib_mem_set_heap (h->heap);
77 chunk = clib_mem_alloc_aligned (chunk_sz + sizeof (*chunk),
78 CLIB_CACHE_LINE_BYTES);
79 clib_mem_set_heap (oldheap);
80 chunk->size = chunk_sz;
81 chunk->bytes_left = chunk_sz;
82 chunk->next_alloc = (u8 *) (chunk + 1);
83 chunk->next = h->chunks;
84 chunk->prev = 0;
85 if (chunk->next)
86 chunk->next->prev = chunk;
87 h->chunks = chunk;
88 rv = chunk->next_alloc;
89 chunk->bytes_left -= nbytes;
90 chunk->next_alloc += nbytes;
91 return rv;
92 }
Dave Barach97f5af02018-02-22 09:48:45 -050093
Dave Barach9466c452018-08-24 17:21:14 -040094 rv = alloc_arena_next (h);
95 alloc_arena_next (h) += nbytes;
Dave Barach97f5af02018-02-22 09:48:45 -050096
Andreas Schultzb4b525e2019-07-19 11:14:50 +020097 if (alloc_arena_next (h) > alloc_arena_size (h))
Dave Barach97f5af02018-02-22 09:48:45 -050098 os_out_of_memory ();
99
Dave Barach16e4a4a2020-04-16 12:00:14 -0400100 if (alloc_arena_next (h) > alloc_arena_mapped (h))
101 {
102 void *base, *rv;
103 uword alloc = alloc_arena_next (h) - alloc_arena_mapped (h);
104 int mmap_flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS;
Damjan Marione6db7752020-05-24 20:43:10 +0200105 int mmap_flags_huge = (mmap_flags | MAP_HUGETLB | MAP_LOCKED |
Dave Barach16e4a4a2020-04-16 12:00:14 -0400106 BIHASH_LOG2_HUGEPAGE_SIZE << MAP_HUGE_SHIFT);
107
108 /* new allocation is 25% of existing one */
109 if (alloc_arena_mapped (h) >> 2 > alloc)
110 alloc = alloc_arena_mapped (h) >> 2;
111
112 /* round allocation to page size */
113 alloc = round_pow2 (alloc, 1 << BIHASH_LOG2_HUGEPAGE_SIZE);
114
115 base = (void *) (uword) (alloc_arena (h) + alloc_arena_mapped (h));
116
117 rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags_huge, -1, 0);
118
119 /* fallback - maybe we are still able to allocate normal pages */
Damjan Marion6183cf42020-05-27 16:43:35 +0200120 if (rv == MAP_FAILED || mlock (base, alloc) != 0)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400121 rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
122
123 if (rv == MAP_FAILED)
124 os_out_of_memory ();
125
126 alloc_arena_mapped (h) += alloc;
127 }
128
Dave Barachffb14b92018-09-11 17:20:23 -0400129 return (void *) (uword) (rv + alloc_arena (h));
Dave Barach97f5af02018-02-22 09:48:45 -0500130}
131
Vijayabhaskar Katamreddyf0bae642020-01-15 13:45:19 -0800132static void BV (clib_bihash_instantiate) (BVT (clib_bihash) * h)
Dave Barach32dcd3b2019-07-08 12:25:38 -0400133{
134 uword bucket_size;
135
Damjan Marion2454de22020-09-26 19:32:34 +0200136 if (BIHASH_USE_HEAP)
137 {
138 h->heap = clib_mem_get_heap ();
139 h->chunks = 0;
140 alloc_arena (h) = (uword) clib_mem_get_heap_base (h->heap);
141 }
142 else
143 {
144 alloc_arena (h) = clib_mem_vm_reserve (0, h->memory_size,
145 BIHASH_LOG2_HUGEPAGE_SIZE);
146 if (alloc_arena (h) == ~0)
147 os_out_of_memory ();
148 alloc_arena_next (h) = 0;
149 alloc_arena_size (h) = h->memory_size;
150 alloc_arena_mapped (h) = 0;
151 }
Dave Barach32dcd3b2019-07-08 12:25:38 -0400152
153 bucket_size = h->nbuckets * sizeof (h->buckets[0]);
Dave Barach16e4a4a2020-04-16 12:00:14 -0400154
155 if (BIHASH_KVP_AT_BUCKET_LEVEL)
156 bucket_size +=
157 h->nbuckets * BIHASH_KVP_PER_PAGE * sizeof (BVT (clib_bihash_kv));
158
Dave Barach32dcd3b2019-07-08 12:25:38 -0400159 h->buckets = BV (alloc_aligned) (h, bucket_size);
Damjan Marion2454de22020-09-26 19:32:34 +0200160 clib_memset_u8 (h->buckets, 0, bucket_size);
Dave Barach16e4a4a2020-04-16 12:00:14 -0400161
162 if (BIHASH_KVP_AT_BUCKET_LEVEL)
163 {
164 int i;
165 BVT (clib_bihash_bucket) * b;
166
167 b = h->buckets;
168
169 for (i = 0; i < h->nbuckets; i++)
170 {
171 b->offset = BV (clib_bihash_get_offset) (h, (void *) (b + 1));
172 b->refcnt = 1;
173 /* Mark all elements free */
Damjan Marion2454de22020-09-26 19:32:34 +0200174 clib_memset_u8 ((b + 1), 0xff, BIHASH_KVP_PER_PAGE *
175 sizeof (BVT (clib_bihash_kv)));
Dave Barach16e4a4a2020-04-16 12:00:14 -0400176
177 /* Compute next bucket start address */
178 b = (void *) (((uword) b) + sizeof (*b) +
179 (BIHASH_KVP_PER_PAGE *
180 sizeof (BVT (clib_bihash_kv))));
181 }
182 }
Damjan Marion801ec2a2020-04-21 19:42:30 +0200183 CLIB_MEMORY_STORE_BARRIER ();
Dave Barach67d09e02019-08-01 08:15:01 -0400184 h->instantiated = 1;
Dave Barach32dcd3b2019-07-08 12:25:38 -0400185}
Dave Barach97f5af02018-02-22 09:48:45 -0500186
Dave Barachbdf9b972019-09-03 10:57:19 -0400187void BV (clib_bihash_init2) (BVT (clib_bihash_init2_args) * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700188{
Dave Barach32dcd3b2019-07-08 12:25:38 -0400189 int i;
190 void *oldheap;
Dave Barachbdf9b972019-09-03 10:57:19 -0400191 BVT (clib_bihash) * h = a->h;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700192
Dave Barachbdf9b972019-09-03 10:57:19 -0400193 a->nbuckets = 1 << (max_log2 (a->nbuckets));
194
195 h->name = (u8 *) a->name;
196 h->nbuckets = a->nbuckets;
197 h->log2_nbuckets = max_log2 (a->nbuckets);
Damjan Marion2454de22020-09-26 19:32:34 +0200198 h->memory_size = BIHASH_USE_HEAP ? 0 : a->memory_size;
Dave Barach67d09e02019-08-01 08:15:01 -0400199 h->instantiated = 0;
Dave Barachbdf9b972019-09-03 10:57:19 -0400200 h->fmt_fn = a->fmt_fn;
201
Dave Barach32dcd3b2019-07-08 12:25:38 -0400202 alloc_arena (h) = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700203
Dave Barach508498f2018-07-19 12:11:16 -0400204 /*
205 * Make sure the requested size is rational. The max table
206 * size without playing the alignment card is 64 Gbytes.
207 * If someone starts complaining that's not enough, we can shift
208 * the offset by CLIB_LOG2_CACHE_LINE_BYTES...
209 */
Damjan Marion2454de22020-09-26 19:32:34 +0200210 if (BIHASH_USE_HEAP)
211 ASSERT (h->memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS));
Dave Barach32dcd3b2019-07-08 12:25:38 -0400212
213 /* Add this hash table to the list */
Dave Barachbdf9b972019-09-03 10:57:19 -0400214 if (a->dont_add_to_all_bihash_list == 0)
215 {
216 for (i = 0; i < vec_len (clib_all_bihashes); i++)
217 if (clib_all_bihashes[i] == h)
218 goto do_lock;
219 oldheap = clib_all_bihash_set_heap ();
220 vec_add1 (clib_all_bihashes, (void *) h);
221 clib_mem_set_heap (oldheap);
222 }
Dave Barach32dcd3b2019-07-08 12:25:38 -0400223
Dave Barachbdf9b972019-09-03 10:57:19 -0400224do_lock:
225 if (h->alloc_lock)
226 clib_mem_free ((void *) h->alloc_lock);
Dave Barach67d09e02019-08-01 08:15:01 -0400227
228 /*
229 * Set up the lock now, so we can use it to make the first add
230 * thread-safe
231 */
232 h->alloc_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
233 CLIB_CACHE_LINE_BYTES);
234 h->alloc_lock[0] = 0;
235
Dave Barach16e4a4a2020-04-16 12:00:14 -0400236#if BIHASH_LAZY_INSTANTIATE
Dave Barachbdf9b972019-09-03 10:57:19 -0400237 if (a->instantiate_immediately)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400238#endif
Dave Barachbdf9b972019-09-03 10:57:19 -0400239 BV (clib_bihash_instantiate) (h);
240}
241
242void BV (clib_bihash_init)
243 (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size)
244{
245 BVT (clib_bihash_init2_args) _a, *a = &_a;
246
247 memset (a, 0, sizeof (*a));
248
249 a->h = h;
250 a->name = name;
251 a->nbuckets = nbuckets;
252 a->memory_size = memory_size;
253
254 BV (clib_bihash_init2) (a);
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -0800255}
256
Dave Barach9466c452018-08-24 17:21:14 -0400257#if BIHASH_32_64_SVM
258#if !defined (MFD_ALLOW_SEALING)
259#define MFD_ALLOW_SEALING 0x0002U
260#endif
261
Dave Barachd4a639b2020-08-06 11:38:40 -0400262void BV (clib_bihash_initiator_init_svm)
Dave Barachffb14b92018-09-11 17:20:23 -0400263 (BVT (clib_bihash) * h, char *name, u32 nbuckets, u64 memory_size)
Dave Barach9466c452018-08-24 17:21:14 -0400264{
265 uword bucket_size;
266 u8 *mmap_addr;
267 vec_header_t *freelist_vh;
268 int fd;
269
Damjan Marion2454de22020-09-26 19:32:34 +0200270 ASSERT (BIHASH_USE_HEAP == 0);
271
Dave Barachffb14b92018-09-11 17:20:23 -0400272 ASSERT (memory_size < (1ULL << 32));
Dave Barach9466c452018-08-24 17:21:14 -0400273 /* Set up for memfd sharing */
274 if ((fd = memfd_create (name, MFD_ALLOW_SEALING)) == -1)
275 {
276 clib_unix_warning ("memfd_create");
277 return;
278 }
279
280 if (ftruncate (fd, memory_size) < 0)
281 {
282 clib_unix_warning ("ftruncate");
283 return;
284 }
285
286 /* Not mission-critical, complain and continue */
287 if ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1)
288 clib_unix_warning ("fcntl (F_ADD_SEALS)");
289
Dave Barachffb14b92018-09-11 17:20:23 -0400290 mmap_addr = mmap (0, memory_size,
291 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
Dave Barach9466c452018-08-24 17:21:14 -0400292
293 if (mmap_addr == MAP_FAILED)
294 {
295 clib_unix_warning ("mmap failed");
296 ASSERT (0);
297 }
298
299 h->sh = (void *) mmap_addr;
300 h->memfd = fd;
301 nbuckets = 1 << (max_log2 (nbuckets));
302
303 h->name = (u8 *) name;
304 h->sh->nbuckets = h->nbuckets = nbuckets;
305 h->log2_nbuckets = max_log2 (nbuckets);
306
307 alloc_arena (h) = (u64) (uword) mmap_addr;
Dave Barachffb14b92018-09-11 17:20:23 -0400308 alloc_arena_next (h) = CLIB_CACHE_LINE_BYTES;
Dave Barach9466c452018-08-24 17:21:14 -0400309 alloc_arena_size (h) = memory_size;
310
311 bucket_size = nbuckets * sizeof (h->buckets[0]);
312 h->buckets = BV (alloc_aligned) (h, bucket_size);
Damjan Marion2454de22020-09-26 19:32:34 +0200313 clib_memset_u8 (h->buckets, 0, bucket_size);
Dave Barachffb14b92018-09-11 17:20:23 -0400314 h->sh->buckets_as_u64 = (u64) BV (clib_bihash_get_offset) (h, h->buckets);
Dave Barach9466c452018-08-24 17:21:14 -0400315
316 h->alloc_lock = BV (alloc_aligned) (h, CLIB_CACHE_LINE_BYTES);
317 h->alloc_lock[0] = 0;
318
Dave Barachffb14b92018-09-11 17:20:23 -0400319 h->sh->alloc_lock_as_u64 =
320 (u64) BV (clib_bihash_get_offset) (h, (void *) h->alloc_lock);
321 freelist_vh =
322 BV (alloc_aligned) (h,
323 sizeof (vec_header_t) +
324 BIHASH_FREELIST_LENGTH * sizeof (u64));
Dave Barach9466c452018-08-24 17:21:14 -0400325 freelist_vh->len = BIHASH_FREELIST_LENGTH;
Dave Barachffb14b92018-09-11 17:20:23 -0400326 h->sh->freelists_as_u64 =
327 (u64) BV (clib_bihash_get_offset) (h, freelist_vh->vector_data);
328 h->freelists = (void *) (freelist_vh->vector_data);
Dave Barach9466c452018-08-24 17:21:14 -0400329
330 h->fmt_fn = NULL;
Vijayabhaskar Katamreddyf0bae642020-01-15 13:45:19 -0800331 h->instantiated = 1;
Dave Barach9466c452018-08-24 17:21:14 -0400332}
333
Dave Barachd4a639b2020-08-06 11:38:40 -0400334void BV (clib_bihash_responder_init_svm)
Dave Barach9466c452018-08-24 17:21:14 -0400335 (BVT (clib_bihash) * h, char *name, int fd)
336{
337 u8 *mmap_addr;
Dave Barachffb14b92018-09-11 17:20:23 -0400338 u64 memory_size;
Dave Barach9466c452018-08-24 17:21:14 -0400339 BVT (clib_bihash_shared_header) * sh;
340
Damjan Marion2454de22020-09-26 19:32:34 +0200341 ASSERT (BIHASH_USE_HEAP == 0);
342
Dave Barachffb14b92018-09-11 17:20:23 -0400343 /* Trial mapping, to learn the segment size */
Dave Barach9466c452018-08-24 17:21:14 -0400344 mmap_addr = mmap (0, 4096, PROT_READ, MAP_SHARED, fd, 0 /* offset */ );
345 if (mmap_addr == MAP_FAILED)
346 {
347 clib_unix_warning ("trial mmap failed");
348 ASSERT (0);
349 }
350
351 sh = (BVT (clib_bihash_shared_header) *) mmap_addr;
352
Dave Barach9466c452018-08-24 17:21:14 -0400353 memory_size = sh->alloc_arena_size;
354
355 munmap (mmap_addr, 4096);
356
Dave Barachffb14b92018-09-11 17:20:23 -0400357 /* Actual mapping, at the required size */
358 mmap_addr = mmap (0, memory_size,
359 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
Dave Barach9466c452018-08-24 17:21:14 -0400360
361 if (mmap_addr == MAP_FAILED)
362 {
363 clib_unix_warning ("mmap failed");
364 ASSERT (0);
365 }
366
367 (void) close (fd);
368
369 h->sh = (void *) mmap_addr;
Dave Barachffb14b92018-09-11 17:20:23 -0400370 alloc_arena (h) = (u64) (uword) mmap_addr;
Dave Barach9466c452018-08-24 17:21:14 -0400371 h->memfd = -1;
372
373 h->name = (u8 *) name;
Dave Barachffb14b92018-09-11 17:20:23 -0400374 h->buckets = BV (clib_bihash_get_value) (h, h->sh->buckets_as_u64);
Dave Barach9466c452018-08-24 17:21:14 -0400375 h->nbuckets = h->sh->nbuckets;
376 h->log2_nbuckets = max_log2 (h->nbuckets);
377
Dave Barachffb14b92018-09-11 17:20:23 -0400378 h->alloc_lock = BV (clib_bihash_get_value) (h, h->sh->alloc_lock_as_u64);
379 h->freelists = BV (clib_bihash_get_value) (h, h->sh->freelists_as_u64);
Dave Barach9466c452018-08-24 17:21:14 -0400380 h->fmt_fn = NULL;
381}
382#endif /* BIHASH_32_64_SVM */
383
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -0800384void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h,
385 format_function_t * fmt_fn)
386{
387 h->fmt_fn = fmt_fn;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700388}
389
Dave Barachc3799992016-08-15 11:12:27 -0400390void BV (clib_bihash_free) (BVT (clib_bihash) * h)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700391{
Dave Barach32dcd3b2019-07-08 12:25:38 -0400392 int i;
393
Dave Barach67d09e02019-08-01 08:15:01 -0400394 if (PREDICT_FALSE (h->instantiated == 0))
Dave Barach32dcd3b2019-07-08 12:25:38 -0400395 goto never_initialized;
396
Dave Barach67d09e02019-08-01 08:15:01 -0400397 h->instantiated = 0;
Damjan Marion2454de22020-09-26 19:32:34 +0200398
399 if (BIHASH_USE_HEAP)
400 {
401 BVT (clib_bihash_alloc_chunk) * next, *chunk;
402 void *oldheap = clib_mem_set_heap (h->heap);
403
404 chunk = h->chunks;
405 while (chunk)
406 {
407 next = chunk->next;
408 clib_mem_free (chunk);
409 chunk = next;
410 }
411 clib_mem_set_heap (oldheap);
412 }
413
Dave Barach97f5af02018-02-22 09:48:45 -0500414 vec_free (h->working_copies);
Vijayabhaskar Katamreddy72739a62019-05-07 13:27:32 -0700415 vec_free (h->working_copy_lengths);
Dave Barach9466c452018-08-24 17:21:14 -0400416#if BIHASH_32_64_SVM == 0
Dave Barach97f5af02018-02-22 09:48:45 -0500417 vec_free (h->freelists);
Dave Barach9466c452018-08-24 17:21:14 -0400418#else
419 if (h->memfd > 0)
420 (void) close (h->memfd);
421#endif
Damjan Marion2454de22020-09-26 19:32:34 +0200422 if (BIHASH_USE_HEAP == 0)
423 clib_mem_vm_free ((void *) (uword) (alloc_arena (h)),
424 alloc_arena_size (h));
Dave Barach32dcd3b2019-07-08 12:25:38 -0400425never_initialized:
Damjan Marion2454de22020-09-26 19:32:34 +0200426 clib_memset_u8 (h, 0, sizeof (*h));
Dave Barach32dcd3b2019-07-08 12:25:38 -0400427 for (i = 0; i < vec_len (clib_all_bihashes); i++)
428 {
429 if ((void *) h == clib_all_bihashes[i])
430 {
431 vec_delete (clib_all_bihashes, 1, i);
432 return;
433 }
434 }
435 clib_warning ("Couldn't find hash table %llx on clib_all_bihashes...",
Vijayabhaskar Katamreddyf0bae642020-01-15 13:45:19 -0800436 (u64) (uword) h);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700437}
438
Dave Barachc3799992016-08-15 11:12:27 -0400439static
440BVT (clib_bihash_value) *
441BV (value_alloc) (BVT (clib_bihash) * h, u32 log2_pages)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700442{
Dave Barachc3799992016-08-15 11:12:27 -0400443 BVT (clib_bihash_value) * rv = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700444
Dave Barach508498f2018-07-19 12:11:16 -0400445 ASSERT (h->alloc_lock[0]);
Dave Barach9466c452018-08-24 17:21:14 -0400446
447#if BIHASH_32_64_SVM
448 ASSERT (log2_pages < vec_len (h->freelists));
449#endif
450
Dave Barachc3799992016-08-15 11:12:27 -0400451 if (log2_pages >= vec_len (h->freelists) || h->freelists[log2_pages] == 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700452 {
Dave Barach97f5af02018-02-22 09:48:45 -0500453 vec_validate_init_empty (h->freelists, log2_pages, 0);
454 rv = BV (alloc_aligned) (h, (sizeof (*rv) * (1 << log2_pages)));
Dave Barachc3799992016-08-15 11:12:27 -0400455 goto initialize;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700456 }
Dave Barachffb14b92018-09-11 17:20:23 -0400457 rv = BV (clib_bihash_get_value) (h, (uword) h->freelists[log2_pages]);
Dave Barach9466c452018-08-24 17:21:14 -0400458 h->freelists[log2_pages] = rv->next_free_as_u64;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700459
Dave Barachc3799992016-08-15 11:12:27 -0400460initialize:
461 ASSERT (rv);
Dave Barachc3799992016-08-15 11:12:27 -0400462 /*
463 * Latest gcc complains that the length arg is zero
464 * if we replace (1<<log2_pages) with vec_len(rv).
465 * No clue.
466 */
Damjan Marion2454de22020-09-26 19:32:34 +0200467 clib_memset_u8 (rv, 0xff, sizeof (*rv) * (1 << log2_pages));
Dave Barachc3799992016-08-15 11:12:27 -0400468 return rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700469}
470
471static void
Dave Barachba7ddfe2017-05-17 20:20:50 -0400472BV (value_free) (BVT (clib_bihash) * h, BVT (clib_bihash_value) * v,
473 u32 log2_pages)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700474{
Dave Barach508498f2018-07-19 12:11:16 -0400475 ASSERT (h->alloc_lock[0]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700476
Dave Barachc3799992016-08-15 11:12:27 -0400477 ASSERT (vec_len (h->freelists) > log2_pages);
478
Damjan Marion2454de22020-09-26 19:32:34 +0200479 if (BIHASH_USE_HEAP && log2_pages >= BIIHASH_MIN_ALLOC_LOG2_PAGES)
480 {
481 /* allocations bigger or equal to chunk size always contain single
482 * alloc and they can be given back to heap */
483 void *oldheap;
484 BVT (clib_bihash_alloc_chunk) * c;
485 c = (BVT (clib_bihash_alloc_chunk) *) v - 1;
486
487 if (c->prev)
488 c->prev->next = c->next;
489 else
490 h->chunks = c->next;
491
492 if (c->next)
493 c->next->prev = c->prev;
494
495 oldheap = clib_mem_set_heap (h->heap);
496 clib_mem_free (c);
497 clib_mem_set_heap (oldheap);
498 return;
499 }
500
Dave Barach508498f2018-07-19 12:11:16 -0400501 if (CLIB_DEBUG > 0)
Damjan Marion2454de22020-09-26 19:32:34 +0200502 clib_memset_u8 (v, 0xFE, sizeof (*v) * (1 << log2_pages));
Dave Barach508498f2018-07-19 12:11:16 -0400503
Dave Barach9466c452018-08-24 17:21:14 -0400504 v->next_free_as_u64 = (u64) h->freelists[log2_pages];
Dave Barachffb14b92018-09-11 17:20:23 -0400505 h->freelists[log2_pages] = (u64) BV (clib_bihash_get_offset) (h, v);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700506}
507
508static inline void
Dave Barach908a5ea2017-07-14 12:42:21 -0400509BV (make_working_copy) (BVT (clib_bihash) * h, BVT (clib_bihash_bucket) * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700510{
Dave Barachc3799992016-08-15 11:12:27 -0400511 BVT (clib_bihash_value) * v;
Dave Barach908a5ea2017-07-14 12:42:21 -0400512 BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
Dave Barachc3799992016-08-15 11:12:27 -0400513 BVT (clib_bihash_value) * working_copy;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200514 u32 thread_index = os_get_thread_index ();
Dave Barachba7ddfe2017-05-17 20:20:50 -0400515 int log2_working_copy_length;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700516
Dave Barach508498f2018-07-19 12:11:16 -0400517 ASSERT (h->alloc_lock[0]);
518
Damjan Marionf55f9b82017-05-10 21:06:28 +0200519 if (thread_index >= vec_len (h->working_copies))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700520 {
Damjan Marionf55f9b82017-05-10 21:06:28 +0200521 vec_validate (h->working_copies, thread_index);
Steve Shin871cdec2017-06-02 10:09:02 -0700522 vec_validate_init_empty (h->working_copy_lengths, thread_index, ~0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700523 }
524
Dave Barachc3799992016-08-15 11:12:27 -0400525 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700526 * working_copies are per-cpu so that near-simultaneous
527 * updates from multiple threads will not result in sporadic, spurious
Dave Barachc3799992016-08-15 11:12:27 -0400528 * lookup failures.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700529 */
Damjan Marionf55f9b82017-05-10 21:06:28 +0200530 working_copy = h->working_copies[thread_index];
Dave Barachba7ddfe2017-05-17 20:20:50 -0400531 log2_working_copy_length = h->working_copy_lengths[thread_index];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700532
533 h->saved_bucket.as_u64 = b->as_u64;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700534
Dave Barachba7ddfe2017-05-17 20:20:50 -0400535 if (b->log2_pages > log2_working_copy_length)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700536 {
Dave Barach97f5af02018-02-22 09:48:45 -0500537 /*
538 * It's not worth the bookkeeping to free working copies
539 * if (working_copy)
540 * clib_mem_free (working_copy);
541 */
542 working_copy = BV (alloc_aligned)
543 (h, sizeof (working_copy[0]) * (1 << b->log2_pages));
Dave Barachba7ddfe2017-05-17 20:20:50 -0400544 h->working_copy_lengths[thread_index] = b->log2_pages;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200545 h->working_copies[thread_index] = working_copy;
Dave Barach2ce28d62019-05-03 12:58:01 -0400546
547 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_working_copy_lost,
548 1ULL << b->log2_pages);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700549 }
550
Dave Barachc3799992016-08-15 11:12:27 -0400551 v = BV (clib_bihash_get_value) (h, b->offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700552
Dave Barach178cf492018-11-13 16:34:13 -0500553 clib_memcpy_fast (working_copy, v, sizeof (*v) * (1 << b->log2_pages));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700554 working_bucket.as_u64 = b->as_u64;
Dave Barachc3799992016-08-15 11:12:27 -0400555 working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy);
Damjan Marion801ec2a2020-04-21 19:42:30 +0200556 CLIB_MEMORY_STORE_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700557 b->as_u64 = working_bucket.as_u64;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200558 h->working_copies[thread_index] = working_copy;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700559}
560
Dave Barachc3799992016-08-15 11:12:27 -0400561static
562BVT (clib_bihash_value) *
563BV (split_and_rehash)
564 (BVT (clib_bihash) * h,
Dave Barachba7ddfe2017-05-17 20:20:50 -0400565 BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
566 u32 new_log2_pages)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700567{
Dave Barach5e6b9582016-12-12 15:37:29 -0500568 BVT (clib_bihash_value) * new_values, *new_v;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400569 int i, j, length_in_kvs;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700570
Dave Barach508498f2018-07-19 12:11:16 -0400571 ASSERT (h->alloc_lock[0]);
572
Dave Barachc3799992016-08-15 11:12:27 -0400573 new_values = BV (value_alloc) (h, new_log2_pages);
Dave Barachba7ddfe2017-05-17 20:20:50 -0400574 length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700575
Dave Barachba7ddfe2017-05-17 20:20:50 -0400576 for (i = 0; i < length_in_kvs; i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700577 {
578 u64 new_hash;
Dave Barachc3799992016-08-15 11:12:27 -0400579
Dave Barach5e6b9582016-12-12 15:37:29 -0500580 /* Entry not in use? Forget it */
581 if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
582 continue;
583
584 /* rehash the item onto its new home-page */
585 new_hash = BV (clib_bihash_hash) (&(old_values->kvp[i]));
Damjan Marion68e5fd52020-04-23 13:41:47 +0200586 new_hash = extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500587 new_v = &new_values[new_hash];
588
589 /* Across the new home-page */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700590 for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
Dave Barachc3799992016-08-15 11:12:27 -0400591 {
Dave Barach5e6b9582016-12-12 15:37:29 -0500592 /* Empty slot */
593 if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
Dave Barachc3799992016-08-15 11:12:27 -0400594 {
Dave Barach178cf492018-11-13 16:34:13 -0500595 clib_memcpy_fast (&(new_v->kvp[j]), &(old_values->kvp[i]),
596 sizeof (new_v->kvp[j]));
Dave Barach5e6b9582016-12-12 15:37:29 -0500597 goto doublebreak;
Dave Barachc3799992016-08-15 11:12:27 -0400598 }
Dave Barachc3799992016-08-15 11:12:27 -0400599 }
Dave Barach5e6b9582016-12-12 15:37:29 -0500600 /* Crap. Tell caller to try again */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400601 BV (value_free) (h, new_values, new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500602 return 0;
603 doublebreak:;
604 }
Dave Barachba7ddfe2017-05-17 20:20:50 -0400605
Dave Barach5e6b9582016-12-12 15:37:29 -0500606 return new_values;
607}
608
609static
610BVT (clib_bihash_value) *
611BV (split_and_rehash_linear)
612 (BVT (clib_bihash) * h,
Dave Barachba7ddfe2017-05-17 20:20:50 -0400613 BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
614 u32 new_log2_pages)
Dave Barach5e6b9582016-12-12 15:37:29 -0500615{
616 BVT (clib_bihash_value) * new_values;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400617 int i, j, new_length, old_length;
Dave Barach5e6b9582016-12-12 15:37:29 -0500618
Dave Barach508498f2018-07-19 12:11:16 -0400619 ASSERT (h->alloc_lock[0]);
620
Dave Barach5e6b9582016-12-12 15:37:29 -0500621 new_values = BV (value_alloc) (h, new_log2_pages);
622 new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400623 old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
Dave Barach5e6b9582016-12-12 15:37:29 -0500624
625 j = 0;
626 /* Across the old value array */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400627 for (i = 0; i < old_length; i++)
Dave Barach5e6b9582016-12-12 15:37:29 -0500628 {
629 /* Find a free slot in the new linear scan bucket */
630 for (; j < new_length; j++)
631 {
Dave Barach8f544962017-01-18 10:23:22 -0500632 /* Old value not in use? Forget it. */
Dave Barach5e6b9582016-12-12 15:37:29 -0500633 if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
634 goto doublebreak;
635
636 /* New value should never be in use */
637 if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
638 {
639 /* Copy the old value and move along */
Dave Barach178cf492018-11-13 16:34:13 -0500640 clib_memcpy_fast (&(new_values->kvp[j]), &(old_values->kvp[i]),
641 sizeof (new_values->kvp[j]));
Dave Barach5e6b9582016-12-12 15:37:29 -0500642 j++;
643 goto doublebreak;
644 }
Dave Barach5e6b9582016-12-12 15:37:29 -0500645 }
Dave Barach8f544962017-01-18 10:23:22 -0500646 /* This should never happen... */
647 clib_warning ("BUG: linear rehash failed!");
Dave Barachba7ddfe2017-05-17 20:20:50 -0400648 BV (value_free) (h, new_values, new_log2_pages);
Dave Barach8f544962017-01-18 10:23:22 -0500649 return 0;
650
Dave Barach5e6b9582016-12-12 15:37:29 -0500651 doublebreak:;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700652 }
653 return new_values;
654}
655
Damjan Marion801ec2a2020-04-21 19:42:30 +0200656static_always_inline int BV (clib_bihash_add_del_inline_with_hash)
657 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, u64 hash, int is_add,
Matus Fabian828d27e2018-08-21 03:15:50 -0700658 int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700659{
Dave Barach908a5ea2017-07-14 12:42:21 -0400660 BVT (clib_bihash_bucket) * b, tmp_b;
Dave Barachc3799992016-08-15 11:12:27 -0400661 BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy;
Dave Barach5e6b9582016-12-12 15:37:29 -0500662 int i, limit;
Damjan Marion801ec2a2020-04-21 19:42:30 +0200663 u64 new_hash;
Dave Barachba7ddfe2017-05-17 20:20:50 -0400664 u32 new_log2_pages, old_log2_pages;
Damjan Marionf55f9b82017-05-10 21:06:28 +0200665 u32 thread_index = os_get_thread_index ();
Dave Barach5e6b9582016-12-12 15:37:29 -0500666 int mark_bucket_linear;
667 int resplit_once;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700668
Damjan Marion68e5fd52020-04-23 13:41:47 +0200669 /* *INDENT-OFF* */
670 static const BVT (clib_bihash_bucket) mask = {
671 .linear_search = 1,
672 .log2_pages = -1
673 };
674 /* *INDENT-ON* */
675
676#if BIHASH_LAZY_INSTANTIATE
Dave Barach67d09e02019-08-01 08:15:01 -0400677 /*
678 * Create the table (is_add=1,2), or flunk the request now (is_add=0)
679 * Use the alloc_lock to protect the instantiate operation.
680 */
681 if (PREDICT_FALSE (h->instantiated == 0))
Dave Barach32dcd3b2019-07-08 12:25:38 -0400682 {
683 if (is_add == 0)
684 return (-1);
Dave Barach67d09e02019-08-01 08:15:01 -0400685
686 BV (clib_bihash_alloc_lock) (h);
687 if (h->instantiated == 0)
688 BV (clib_bihash_instantiate) (h);
689 BV (clib_bihash_alloc_unlock) (h);
Dave Barach32dcd3b2019-07-08 12:25:38 -0400690 }
Dave Baracha90ba642020-04-23 16:56:15 -0400691#else
692 /* Debug image: make sure the table has been instantiated */
693 ASSERT (h->instantiated != 0);
Damjan Marion68e5fd52020-04-23 13:41:47 +0200694#endif
Dave Barach32dcd3b2019-07-08 12:25:38 -0400695
Damjan Marion4e149772020-03-27 16:57:28 +0100696 b = BV (clib_bihash_get_bucket) (h, hash);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700697
Dave Barach508498f2018-07-19 12:11:16 -0400698 BV (clib_bihash_lock_bucket) (b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700699
700 /* First elt in the bucket? */
Dave Barach16e4a4a2020-04-16 12:00:14 -0400701 if (BIHASH_KVP_AT_BUCKET_LEVEL == 0 && BV (clib_bihash_bucket_is_empty) (b))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700702 {
703 if (is_add == 0)
Dave Barachc3799992016-08-15 11:12:27 -0400704 {
Dave Barach508498f2018-07-19 12:11:16 -0400705 BV (clib_bihash_unlock_bucket) (b);
706 return (-1);
Dave Barachc3799992016-08-15 11:12:27 -0400707 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700708
Dave Barach508498f2018-07-19 12:11:16 -0400709 BV (clib_bihash_alloc_lock) (h);
Dave Barachc3799992016-08-15 11:12:27 -0400710 v = BV (value_alloc) (h, 0);
Dave Barach508498f2018-07-19 12:11:16 -0400711 BV (clib_bihash_alloc_unlock) (h);
Dave Barachba7ddfe2017-05-17 20:20:50 -0400712
Dave Barachc3799992016-08-15 11:12:27 -0400713 *v->kvp = *add_v;
Dave Barach508498f2018-07-19 12:11:16 -0400714 tmp_b.as_u64 = 0; /* clears bucket lock */
Dave Barachc3799992016-08-15 11:12:27 -0400715 tmp_b.offset = BV (clib_bihash_get_offset) (h, v);
Dave Barache7d212f2018-02-07 13:14:06 -0500716 tmp_b.refcnt = 1;
Damjan Marion801ec2a2020-04-21 19:42:30 +0200717 CLIB_MEMORY_STORE_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700718
Tom Seidenberg97f8ae92019-03-15 10:15:26 -0400719 b->as_u64 = tmp_b.as_u64; /* unlocks the bucket */
Dave Barach2ce28d62019-05-03 12:58:01 -0400720 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_alloc_add, 1);
721
Dave Barach508498f2018-07-19 12:11:16 -0400722 return (0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700723 }
724
Dave Barach508498f2018-07-19 12:11:16 -0400725 /* WARNING: we're still looking at the live copy... */
Dave Barach5e6b9582016-12-12 15:37:29 -0500726 limit = BIHASH_KVP_PER_PAGE;
Dave Barach508498f2018-07-19 12:11:16 -0400727 v = BV (clib_bihash_get_value) (h, b->offset);
728
Damjan Marion68e5fd52020-04-23 13:41:47 +0200729 if (PREDICT_FALSE (b->as_u64 & mask.as_u64))
730 {
731 if (PREDICT_FALSE (b->linear_search))
732 limit <<= b->log2_pages;
733 else
734 v += extract_bits (hash, h->log2_nbuckets, b->log2_pages);
735 }
Dave Barachc3799992016-08-15 11:12:27 -0400736
Ed Warnickecb9cada2015-12-08 15:45:58 -0700737 if (is_add)
738 {
Dave Barachc3799992016-08-15 11:12:27 -0400739 /*
Dave Barach508498f2018-07-19 12:11:16 -0400740 * Because reader threads are looking at live data,
741 * we have to be extra careful. Readers do NOT hold the
742 * bucket lock. We need to be SLOWER than a search, past the
743 * point where readers CHECK the bucket lock.
744 */
745
746 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700747 * For obvious (in hindsight) reasons, see if we're supposed to
748 * replace an existing key, then look for an empty slot.
749 */
Dave Barach5e6b9582016-12-12 15:37:29 -0500750 for (i = 0; i < limit; i++)
Dave Barachc3799992016-08-15 11:12:27 -0400751 {
Dave Baracha11bf452019-04-17 17:27:31 -0400752 if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
Dave Barachc3799992016-08-15 11:12:27 -0400753 {
Dave Barach9e4946b2019-07-08 14:47:44 -0400754 /* Add but do not overwrite? */
755 if (is_add == 2)
756 {
757 BV (clib_bihash_unlock_bucket) (b);
758 return (-2);
759 }
760
Damjan Marion801ec2a2020-04-21 19:42:30 +0200761 clib_memcpy_fast (&(v->kvp[i].value),
762 &add_v->value, sizeof (add_v->value));
Dave Barach508498f2018-07-19 12:11:16 -0400763 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400764 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
Dave Barach508498f2018-07-19 12:11:16 -0400765 return (0);
Dave Barachc3799992016-08-15 11:12:27 -0400766 }
767 }
Dave Barach508498f2018-07-19 12:11:16 -0400768 /*
769 * Look for an empty slot. If found, use it
770 */
Dave Barach5e6b9582016-12-12 15:37:29 -0500771 for (i = 0; i < limit; i++)
Dave Barachc3799992016-08-15 11:12:27 -0400772 {
773 if (BV (clib_bihash_is_free) (&(v->kvp[i])))
774 {
Dave Barach508498f2018-07-19 12:11:16 -0400775 /*
776 * Copy the value first, so that if a reader manages
777 * to match the new key, the value will be right...
778 */
Dave Barach178cf492018-11-13 16:34:13 -0500779 clib_memcpy_fast (&(v->kvp[i].value),
780 &add_v->value, sizeof (add_v->value));
Damjan Marion801ec2a2020-04-21 19:42:30 +0200781 CLIB_MEMORY_STORE_BARRIER (); /* Make sure the value has settled */
Dave Barach178cf492018-11-13 16:34:13 -0500782 clib_memcpy_fast (&(v->kvp[i]), &add_v->key,
783 sizeof (add_v->key));
Dave Barache7d212f2018-02-07 13:14:06 -0500784 b->refcnt++;
Dave Barach9466c452018-08-24 17:21:14 -0400785 ASSERT (b->refcnt > 0);
Dave Barach508498f2018-07-19 12:11:16 -0400786 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400787 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_add, 1);
Dave Barach508498f2018-07-19 12:11:16 -0400788 return (0);
Dave Barachc3799992016-08-15 11:12:27 -0400789 }
790 }
Matus Fabian828d27e2018-08-21 03:15:50 -0700791 /* look for stale data to overwrite */
792 if (is_stale_cb)
793 {
794 for (i = 0; i < limit; i++)
795 {
796 if (is_stale_cb (&(v->kvp[i]), arg))
797 {
Dave Barach178cf492018-11-13 16:34:13 -0500798 clib_memcpy_fast (&(v->kvp[i]), add_v, sizeof (*add_v));
Damjan Marion801ec2a2020-04-21 19:42:30 +0200799 CLIB_MEMORY_STORE_BARRIER ();
Matus Fabian828d27e2018-08-21 03:15:50 -0700800 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400801 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
Matus Fabian828d27e2018-08-21 03:15:50 -0700802 return (0);
803 }
804 }
805 }
Dave Barach508498f2018-07-19 12:11:16 -0400806 /* Out of space in this bucket, split the bucket... */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700807 }
Dave Barach508498f2018-07-19 12:11:16 -0400808 else /* delete case */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700809 {
Dave Barach5e6b9582016-12-12 15:37:29 -0500810 for (i = 0; i < limit; i++)
Dave Barachc3799992016-08-15 11:12:27 -0400811 {
Dave Barach508498f2018-07-19 12:11:16 -0400812 /* Found the key? Kill it... */
Dave Baracha11bf452019-04-17 17:27:31 -0400813 if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
Dave Barachc3799992016-08-15 11:12:27 -0400814 {
Damjan Marion801ec2a2020-04-21 19:42:30 +0200815 clib_memset_u8 (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
Dave Barach508498f2018-07-19 12:11:16 -0400816 /* Is the bucket empty? */
817 if (PREDICT_TRUE (b->refcnt > 1))
Dave Barache7d212f2018-02-07 13:14:06 -0500818 {
Dave Barach508498f2018-07-19 12:11:16 -0400819 b->refcnt--;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400820 /* Switch back to the bucket-level kvp array? */
821 if (BIHASH_KVP_AT_BUCKET_LEVEL && b->refcnt == 1
822 && b->log2_pages > 0)
823 {
824 tmp_b.as_u64 = b->as_u64;
825 b->offset = BV (clib_bihash_get_offset)
826 (h, (void *) (b + 1));
827 b->linear_search = 0;
828 b->log2_pages = 0;
829 /* Clean up the bucket-level kvp array */
Damjan Marion801ec2a2020-04-21 19:42:30 +0200830 clib_memset_u8 ((b + 1), 0xff, BIHASH_KVP_PER_PAGE *
831 sizeof (BVT (clib_bihash_kv)));
832 CLIB_MEMORY_STORE_BARRIER ();
Dave Barach16e4a4a2020-04-16 12:00:14 -0400833 BV (clib_bihash_unlock_bucket) (b);
834 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
835 goto free_backing_store;
836 }
837
Damjan Marion801ec2a2020-04-21 19:42:30 +0200838 CLIB_MEMORY_STORE_BARRIER ();
Dave Barach508498f2018-07-19 12:11:16 -0400839 BV (clib_bihash_unlock_bucket) (b);
Dave Barach2ce28d62019-05-03 12:58:01 -0400840 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
Dave Barach508498f2018-07-19 12:11:16 -0400841 return (0);
Dave Barache7d212f2018-02-07 13:14:06 -0500842 }
Dave Barach508498f2018-07-19 12:11:16 -0400843 else /* yes, free it */
Dave Barache7d212f2018-02-07 13:14:06 -0500844 {
Dave Barach508498f2018-07-19 12:11:16 -0400845 /* Save old bucket value, need log2_pages to free it */
846 tmp_b.as_u64 = b->as_u64;
Dave Barach508498f2018-07-19 12:11:16 -0400847
848 /* Kill and unlock the bucket */
849 b->as_u64 = 0;
850
Dave Barach16e4a4a2020-04-16 12:00:14 -0400851 free_backing_store:
Dave Barach508498f2018-07-19 12:11:16 -0400852 /* And free the backing storage */
853 BV (clib_bihash_alloc_lock) (h);
854 /* Note: v currently points into the middle of the bucket */
855 v = BV (clib_bihash_get_value) (h, tmp_b.offset);
856 BV (value_free) (h, v, tmp_b.log2_pages);
857 BV (clib_bihash_alloc_unlock) (h);
Dave Barach2ce28d62019-05-03 12:58:01 -0400858 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del_free,
859 1);
Dave Barach508498f2018-07-19 12:11:16 -0400860 return (0);
Dave Barache7d212f2018-02-07 13:14:06 -0500861 }
Dave Barachc3799992016-08-15 11:12:27 -0400862 }
863 }
Dave Barach508498f2018-07-19 12:11:16 -0400864 /* Not found... */
865 BV (clib_bihash_unlock_bucket) (b);
866 return (-3);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700867 }
868
Dave Barach508498f2018-07-19 12:11:16 -0400869 /* Move readers to a (locked) temp copy of the bucket */
870 BV (clib_bihash_alloc_lock) (h);
871 BV (make_working_copy) (h, b);
872
873 v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
874
Dave Barachba7ddfe2017-05-17 20:20:50 -0400875 old_log2_pages = h->saved_bucket.log2_pages;
876 new_log2_pages = old_log2_pages + 1;
Dave Barach5e6b9582016-12-12 15:37:29 -0500877 mark_bucket_linear = 0;
Dave Barach2ce28d62019-05-03 12:58:01 -0400878 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_split_add, 1);
879 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, old_log2_pages);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700880
Damjan Marionf55f9b82017-05-10 21:06:28 +0200881 working_copy = h->working_copies[thread_index];
Dave Barach5e6b9582016-12-12 15:37:29 -0500882 resplit_once = 0;
Dave Barach2ce28d62019-05-03 12:58:01 -0400883 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, 1);
Dave Barach5e6b9582016-12-12 15:37:29 -0500884
Dave Barachba7ddfe2017-05-17 20:20:50 -0400885 new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
886 new_log2_pages);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700887 if (new_v == 0)
888 {
Dave Barach5e6b9582016-12-12 15:37:29 -0500889 try_resplit:
890 resplit_once = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700891 new_log2_pages++;
Dave Barach5e6b9582016-12-12 15:37:29 -0500892 /* Try re-splitting. If that fails, fall back to linear search */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400893 new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
894 new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500895 if (new_v == 0)
896 {
897 mark_linear:
898 new_log2_pages--;
899 /* pinned collisions, use linear search */
900 new_v =
Dave Barachba7ddfe2017-05-17 20:20:50 -0400901 BV (split_and_rehash_linear) (h, working_copy, old_log2_pages,
902 new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500903 mark_bucket_linear = 1;
Dave Barach2ce28d62019-05-03 12:58:01 -0400904 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_linear, 1);
Dave Barach5e6b9582016-12-12 15:37:29 -0500905 }
Dave Barach2ce28d62019-05-03 12:58:01 -0400906 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_resplit, 1);
907 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits,
908 old_log2_pages + 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700909 }
910
911 /* Try to add the new entry */
912 save_new_v = new_v;
Dave Barachc3799992016-08-15 11:12:27 -0400913 new_hash = BV (clib_bihash_hash) (add_v);
Dave Barach5e6b9582016-12-12 15:37:29 -0500914 limit = BIHASH_KVP_PER_PAGE;
915 if (mark_bucket_linear)
916 limit <<= new_log2_pages;
Damjan Marion68e5fd52020-04-23 13:41:47 +0200917 else
918 new_v += extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
Dave Barachc3799992016-08-15 11:12:27 -0400919
Dave Barach5e6b9582016-12-12 15:37:29 -0500920 for (i = 0; i < limit; i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700921 {
Dave Barachc3799992016-08-15 11:12:27 -0400922 if (BV (clib_bihash_is_free) (&(new_v->kvp[i])))
923 {
Dave Barach178cf492018-11-13 16:34:13 -0500924 clib_memcpy_fast (&(new_v->kvp[i]), add_v, sizeof (*add_v));
Dave Barachc3799992016-08-15 11:12:27 -0400925 goto expand_ok;
926 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700927 }
Dave Barachba7ddfe2017-05-17 20:20:50 -0400928
Ed Warnickecb9cada2015-12-08 15:45:58 -0700929 /* Crap. Try again */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400930 BV (value_free) (h, save_new_v, new_log2_pages);
Dave Barach5e6b9582016-12-12 15:37:29 -0500931 /*
932 * If we've already doubled the size of the bucket once,
933 * fall back to linear search now.
934 */
935 if (resplit_once)
936 goto mark_linear;
937 else
938 goto try_resplit;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700939
Dave Barachc3799992016-08-15 11:12:27 -0400940expand_ok:
Dave Barach5e6b9582016-12-12 15:37:29 -0500941 tmp_b.log2_pages = new_log2_pages;
Dave Barachc3799992016-08-15 11:12:27 -0400942 tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
Dave Barach5e6b9582016-12-12 15:37:29 -0500943 tmp_b.linear_search = mark_bucket_linear;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400944#if BIHASH_KVP_AT_BUCKET_LEVEL
945 /* Compensate for permanent refcount bump at the bucket level */
946 if (new_log2_pages > 0)
947#endif
948 tmp_b.refcnt = h->saved_bucket.refcnt + 1;
Dave Barach9466c452018-08-24 17:21:14 -0400949 ASSERT (tmp_b.refcnt > 0);
Dave Barach508498f2018-07-19 12:11:16 -0400950 tmp_b.lock = 0;
Damjan Marion801ec2a2020-04-21 19:42:30 +0200951 CLIB_MEMORY_STORE_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700952 b->as_u64 = tmp_b.as_u64;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400953
954#if BIHASH_KVP_AT_BUCKET_LEVEL
955 if (h->saved_bucket.log2_pages > 0)
956 {
957#endif
958
959 /* free the old bucket, except at the bucket level if so configured */
960 v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
961 BV (value_free) (h, v, h->saved_bucket.log2_pages);
962
963#if BIHASH_KVP_AT_BUCKET_LEVEL
964 }
965#endif
966
967
Dave Barach508498f2018-07-19 12:11:16 -0400968 BV (clib_bihash_alloc_unlock) (h);
969 return (0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700970}
971
Damjan Marion801ec2a2020-04-21 19:42:30 +0200972static_always_inline int BV (clib_bihash_add_del_inline)
973 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add,
974 int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
975{
976 u64 hash = BV (clib_bihash_hash) (add_v);
977 return BV (clib_bihash_add_del_inline_with_hash) (h, add_v, hash, is_add,
978 is_stale_cb, arg);
979}
980
Matus Fabian828d27e2018-08-21 03:15:50 -0700981int BV (clib_bihash_add_del)
982 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add)
983{
984 return BV (clib_bihash_add_del_inline) (h, add_v, is_add, 0, 0);
985}
986
987int BV (clib_bihash_add_or_overwrite_stale)
988 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v,
989 int (*stale_callback) (BVT (clib_bihash_kv) *, void *), void *arg)
990{
991 return BV (clib_bihash_add_del_inline) (h, add_v, 1, stale_callback, arg);
992}
993
Dave Barachc3799992016-08-15 11:12:27 -0400994int BV (clib_bihash_search)
Dave Barach908a5ea2017-07-14 12:42:21 -0400995 (BVT (clib_bihash) * h,
Dave Barachc3799992016-08-15 11:12:27 -0400996 BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700997{
Damjan Marion68e5fd52020-04-23 13:41:47 +0200998 return BV (clib_bihash_search_inline_2) (h, search_key, valuep);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700999}
1000
Dave Barachc3799992016-08-15 11:12:27 -04001001u8 *BV (format_bihash) (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001002{
Dave Barachc3799992016-08-15 11:12:27 -04001003 BVT (clib_bihash) * h = va_arg (*args, BVT (clib_bihash) *);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001004 int verbose = va_arg (*args, int);
Dave Barach908a5ea2017-07-14 12:42:21 -04001005 BVT (clib_bihash_bucket) * b;
Dave Barachc3799992016-08-15 11:12:27 -04001006 BVT (clib_bihash_value) * v;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001007 int i, j, k;
1008 u64 active_elements = 0;
Dave Barache7d212f2018-02-07 13:14:06 -05001009 u64 active_buckets = 0;
1010 u64 linear_buckets = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001011
1012 s = format (s, "Hash table %s\n", h->name ? h->name : (u8 *) "(unnamed)");
Dave Barachc3799992016-08-15 11:12:27 -04001013
Dave Barach16e4a4a2020-04-16 12:00:14 -04001014#if BIHASH_LAZY_INSTANTIATE
Nathan Skrzypczak42b29ba2020-08-17 14:14:56 +02001015 if (PREDICT_FALSE (h->instantiated == 0))
Dave Barach32dcd3b2019-07-08 12:25:38 -04001016 return format (s, "[empty, uninitialized]");
Dave Barach16e4a4a2020-04-16 12:00:14 -04001017#endif
Dave Barach32dcd3b2019-07-08 12:25:38 -04001018
Ed Warnickecb9cada2015-12-08 15:45:58 -07001019 for (i = 0; i < h->nbuckets; i++)
1020 {
Dave Barach16e4a4a2020-04-16 12:00:14 -04001021 b = BV (clib_bihash_get_bucket) (h, i);
Damjan Marion882fcfe2018-07-17 23:01:49 +02001022 if (BV (clib_bihash_bucket_is_empty) (b))
Dave Barachc3799992016-08-15 11:12:27 -04001023 {
1024 if (verbose > 1)
1025 s = format (s, "[%d]: empty\n", i);
1026 continue;
1027 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001028
Dave Barache7d212f2018-02-07 13:14:06 -05001029 active_buckets++;
1030
1031 if (b->linear_search)
1032 linear_buckets++;
1033
Ed Warnickecb9cada2015-12-08 15:45:58 -07001034 if (verbose)
Dave Barachc3799992016-08-15 11:12:27 -04001035 {
Dave Barach16e4a4a2020-04-16 12:00:14 -04001036 s = format
1037 (s, "[%d]: heap offset %lld, len %d, refcnt %d, linear %d\n", i,
1038 b->offset, (1 << b->log2_pages), b->refcnt, b->linear_search);
Dave Barachc3799992016-08-15 11:12:27 -04001039 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001040
Dave Barachc3799992016-08-15 11:12:27 -04001041 v = BV (clib_bihash_get_value) (h, b->offset);
1042 for (j = 0; j < (1 << b->log2_pages); j++)
1043 {
1044 for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
1045 {
1046 if (BV (clib_bihash_is_free) (&v->kvp[k]))
1047 {
1048 if (verbose > 1)
1049 s = format (s, " %d: empty\n",
1050 j * BIHASH_KVP_PER_PAGE + k);
1051 continue;
1052 }
1053 if (verbose)
1054 {
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -08001055 if (h->fmt_fn)
1056 {
1057 s = format (s, " %d: %U\n",
1058 j * BIHASH_KVP_PER_PAGE + k,
Vijayabhaskar Katamreddy72739a62019-05-07 13:27:32 -07001059 h->fmt_fn, &(v->kvp[k]), verbose);
Vijayabhaskar Katamreddyfb8e61c2017-12-14 13:20:50 -08001060 }
1061 else
1062 {
1063 s = format (s, " %d: %U\n",
1064 j * BIHASH_KVP_PER_PAGE + k,
1065 BV (format_bihash_kvp), &(v->kvp[k]));
1066 }
Dave Barachc3799992016-08-15 11:12:27 -04001067 }
1068 active_elements++;
1069 }
1070 v++;
1071 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001072 }
1073
Dave Barache7d212f2018-02-07 13:14:06 -05001074 s = format (s, " %lld active elements %lld active buckets\n",
1075 active_elements, active_buckets);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001076 s = format (s, " %d free lists\n", vec_len (h->freelists));
Dave Barache7d212f2018-02-07 13:14:06 -05001077
1078 for (i = 0; i < vec_len (h->freelists); i++)
1079 {
1080 u32 nfree = 0;
1081 BVT (clib_bihash_value) * free_elt;
Dave Barachffb14b92018-09-11 17:20:23 -04001082 u64 free_elt_as_u64 = h->freelists[i];
Dave Barache7d212f2018-02-07 13:14:06 -05001083
Dave Barachffb14b92018-09-11 17:20:23 -04001084 while (free_elt_as_u64)
Dave Barache7d212f2018-02-07 13:14:06 -05001085 {
Dave Barachffb14b92018-09-11 17:20:23 -04001086 free_elt = BV (clib_bihash_get_value) (h, free_elt_as_u64);
Dave Barache7d212f2018-02-07 13:14:06 -05001087 nfree++;
Dave Barachffb14b92018-09-11 17:20:23 -04001088 free_elt_as_u64 = free_elt->next_free_as_u64;
Dave Barache7d212f2018-02-07 13:14:06 -05001089 }
1090
Dave Barach9466c452018-08-24 17:21:14 -04001091 if (nfree || verbose)
1092 s = format (s, " [len %d] %u free elts\n", 1 << i, nfree);
Dave Barache7d212f2018-02-07 13:14:06 -05001093 }
1094
1095 s = format (s, " %lld linear search buckets\n", linear_buckets);
Damjan Marion2454de22020-09-26 19:32:34 +02001096 if (BIHASH_USE_HEAP)
1097 {
1098 BVT (clib_bihash_alloc_chunk) * c = h->chunks;
1099 uword bytes_left = 0, total_size = 0, n_chunks = 0;
1100
1101 while (c)
1102 {
1103 bytes_left += c->bytes_left;
1104 total_size += c->size;
1105 n_chunks += 1;
1106 c = c->next;
1107 }
1108 s = format (s,
1109 " heap: %u chunks allocated\n"
1110 " used %UB, scrap %UB\n", n_chunks,
1111 format_memory_size, total_size,
1112 format_memory_size, bytes_left);
1113 }
1114 else
1115 {
1116 u64 used_bytes = alloc_arena_next (h);
1117 s = format (s,
1118 " arena: base %llx, next %llx\n"
1119 " used %lld b (%lld Mbytes) of %lld b (%lld Mbytes)\n",
1120 alloc_arena (h), alloc_arena_next (h),
1121 used_bytes, used_bytes >> 20,
1122 alloc_arena_size (h), alloc_arena_size (h) >> 20);
1123 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001124 return s;
1125}
1126
Dave Barachc3799992016-08-15 11:12:27 -04001127void BV (clib_bihash_foreach_key_value_pair)
Neale Rannsf50bac12019-12-06 05:53:17 +00001128 (BVT (clib_bihash) * h,
1129 BV (clib_bihash_foreach_key_value_pair_cb) cb, void *arg)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001130{
1131 int i, j, k;
Dave Barach908a5ea2017-07-14 12:42:21 -04001132 BVT (clib_bihash_bucket) * b;
Dave Barachc3799992016-08-15 11:12:27 -04001133 BVT (clib_bihash_value) * v;
Dave Barachc3799992016-08-15 11:12:27 -04001134
Dave Barach16e4a4a2020-04-16 12:00:14 -04001135
1136#if BIHASH_LAZY_INSTANTIATE
Nathan Skrzypczak42b29ba2020-08-17 14:14:56 +02001137 if (PREDICT_FALSE (h->instantiated == 0))
Dave Barach32dcd3b2019-07-08 12:25:38 -04001138 return;
Dave Barach16e4a4a2020-04-16 12:00:14 -04001139#endif
Dave Barach32dcd3b2019-07-08 12:25:38 -04001140
Ed Warnickecb9cada2015-12-08 15:45:58 -07001141 for (i = 0; i < h->nbuckets; i++)
1142 {
Dave Barach16e4a4a2020-04-16 12:00:14 -04001143 b = BV (clib_bihash_get_bucket) (h, i);
Damjan Marion882fcfe2018-07-17 23:01:49 +02001144 if (BV (clib_bihash_bucket_is_empty) (b))
Dave Barachc3799992016-08-15 11:12:27 -04001145 continue;
1146
1147 v = BV (clib_bihash_get_value) (h, b->offset);
1148 for (j = 0; j < (1 << b->log2_pages); j++)
1149 {
1150 for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
1151 {
1152 if (BV (clib_bihash_is_free) (&v->kvp[k]))
1153 continue;
1154
Neale Rannsf50bac12019-12-06 05:53:17 +00001155 if (BIHASH_WALK_STOP == cb (&v->kvp[k], arg))
1156 return;
Dave Barachca45ee72018-08-06 08:43:47 -04001157 /*
1158 * In case the callback deletes the last entry in the bucket...
1159 */
1160 if (BV (clib_bihash_bucket_is_empty) (b))
1161 goto doublebreak;
Dave Barachc3799992016-08-15 11:12:27 -04001162 }
1163 v++;
1164 }
Dave Barachca45ee72018-08-06 08:43:47 -04001165 doublebreak:
1166 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001167 }
1168}
Dave Barachdd3a57f2016-07-27 16:58:51 -04001169
Chris Luke16bcf7d2016-09-01 14:31:46 -04001170/** @endcond */
Dave Barachc3799992016-08-15 11:12:27 -04001171
1172/*
1173 * fd.io coding-style-patch-verification: ON
1174 *
1175 * Local Variables:
1176 * eval: (c-set-style "gnu")
1177 * End:
1178 */