blob: 5b71873b9ed255f9af538a8e63d335a14c108b2e [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#include <vppinfra/bitops.h>
39#include <vppinfra/hash.h>
40#include <vppinfra/format.h>
41#include <vppinfra/mheap.h>
42#include <vppinfra/os.h>
43#include <vppinfra/time.h>
44
45#ifdef CLIB_UNIX
46#include <vppinfra/elf_clib.h>
47#endif
48
Dave Barachc3799992016-08-15 11:12:27 -040049static void mheap_get_trace (void *v, uword offset, uword size);
50static void mheap_put_trace (void *v, uword offset, uword size);
51static int mheap_trace_sort (const void *t1, const void *t2);
Ed Warnickecb9cada2015-12-08 15:45:58 -070052
Dave Barachc3799992016-08-15 11:12:27 -040053always_inline void
54mheap_maybe_lock (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -070055{
Dave Barachc3799992016-08-15 11:12:27 -040056 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -070057 if (v && (h->flags & MHEAP_FLAG_THREAD_SAFE))
58 {
Damjan Marionf55f9b82017-05-10 21:06:28 +020059 u32 my_cpu = os_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -070060 if (h->owner_cpu == my_cpu)
Dave Barachc3799992016-08-15 11:12:27 -040061 {
62 h->recursion_count++;
63 return;
64 }
65
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 while (__sync_lock_test_and_set (&h->lock, 1))
Dave Barachc3799992016-08-15 11:12:27 -040067 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -070068
69 h->owner_cpu = my_cpu;
70 h->recursion_count = 1;
71 }
72}
73
Dave Barachc3799992016-08-15 11:12:27 -040074always_inline void
75mheap_maybe_unlock (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -070076{
Dave Barachc3799992016-08-15 11:12:27 -040077 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -070078 if (v && h->flags & MHEAP_FLAG_THREAD_SAFE)
79 {
Damjan Marionf55f9b82017-05-10 21:06:28 +020080 ASSERT (os_get_thread_index () == h->owner_cpu);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 if (--h->recursion_count == 0)
Dave Barachc3799992016-08-15 11:12:27 -040082 {
83 h->owner_cpu = ~0;
84 CLIB_MEMORY_BARRIER ();
85 h->lock = 0;
86 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 }
88}
89
90/* Find bin for objects with size at least n_user_data_bytes. */
91always_inline uword
92user_data_size_to_bin_index (uword n_user_data_bytes)
93{
94 uword n_user_data_words;
95 word small_bin, large_bin;
96
97 /* User size must be at least big enough to hold free elt. */
98 n_user_data_bytes = clib_max (n_user_data_bytes, MHEAP_MIN_USER_DATA_BYTES);
99
100 /* Round to words. */
Dave Barachc3799992016-08-15 11:12:27 -0400101 n_user_data_words =
102 (round_pow2 (n_user_data_bytes, MHEAP_USER_DATA_WORD_BYTES) /
103 MHEAP_USER_DATA_WORD_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700104
105 ASSERT (n_user_data_words > 0);
Dave Barachc3799992016-08-15 11:12:27 -0400106 small_bin =
107 n_user_data_words -
108 (MHEAP_MIN_USER_DATA_BYTES / MHEAP_USER_DATA_WORD_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109 ASSERT (small_bin >= 0);
110
Dave Barachc3799992016-08-15 11:12:27 -0400111 large_bin =
112 MHEAP_N_SMALL_OBJECT_BINS + max_log2 (n_user_data_bytes) -
113 MHEAP_LOG2_N_SMALL_OBJECT_BINS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114
115 return small_bin < MHEAP_N_SMALL_OBJECT_BINS ? small_bin : large_bin;
116}
117
118always_inline uword
119mheap_elt_size_to_user_n_bytes (uword n_bytes)
120{
121 ASSERT (n_bytes >= sizeof (mheap_elt_t));
122 return (n_bytes - STRUCT_OFFSET_OF (mheap_elt_t, user_data));
123}
124
Dave Barachc3799992016-08-15 11:12:27 -0400125always_inline uword __attribute__ ((unused))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126mheap_elt_size_to_user_n_words (uword n_bytes)
127{
128 ASSERT (n_bytes % MHEAP_USER_DATA_WORD_BYTES == 0);
Dave Barachc3799992016-08-15 11:12:27 -0400129 return mheap_elt_size_to_user_n_bytes (n_bytes) /
130 MHEAP_USER_DATA_WORD_BYTES;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700131}
132
133always_inline void
Dave Barachc3799992016-08-15 11:12:27 -0400134mheap_elt_set_size (void *v,
135 uword uoffset, uword n_user_data_bytes, uword is_free)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136{
Dave Barachc3799992016-08-15 11:12:27 -0400137 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700138
139 e = mheap_elt_at_uoffset (v, uoffset);
140
141 ASSERT (n_user_data_bytes % MHEAP_USER_DATA_WORD_BYTES == 0);
Dave Barachb3d93da2016-08-03 14:34:38 -0400142
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143 e->n_user_data = n_user_data_bytes / MHEAP_USER_DATA_WORD_BYTES;
144 e->is_free = is_free;
Dave Barachc3799992016-08-15 11:12:27 -0400145 ASSERT (e->prev_n_user_data * sizeof (e->user_data[0]) >=
146 MHEAP_MIN_USER_DATA_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147
148 n = mheap_next_elt (e);
149 n->prev_n_user_data = e->n_user_data;
150 n->prev_is_free = is_free;
151}
152
Dave Barachc3799992016-08-15 11:12:27 -0400153always_inline void
154set_first_free_elt_offset (mheap_t * h, uword bin, uword uoffset)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155{
156 uword i0, i1;
157
158 h->first_free_elt_uoffset_by_bin[bin] = uoffset;
159
160 i0 = bin / BITS (h->non_empty_free_elt_heads[0]);
161 i1 = (uword) 1 << (uword) (bin % BITS (h->non_empty_free_elt_heads[0]));
162
163 ASSERT (i0 < ARRAY_LEN (h->non_empty_free_elt_heads));
Dave Barachb3d93da2016-08-03 14:34:38 -0400164 if (h->first_free_elt_uoffset_by_bin[bin] == MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165 h->non_empty_free_elt_heads[i0] &= ~i1;
166 else
167 h->non_empty_free_elt_heads[i0] |= i1;
168}
169
170always_inline void
Dave Barachc3799992016-08-15 11:12:27 -0400171set_free_elt (void *v, uword uoffset, uword n_user_data_bytes)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700172{
Dave Barachc3799992016-08-15 11:12:27 -0400173 mheap_t *h = mheap_header (v);
174 mheap_elt_t *e = mheap_elt_at_uoffset (v, uoffset);
175 mheap_elt_t *n = mheap_next_elt (e);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700176 uword bin = user_data_size_to_bin_index (n_user_data_bytes);
177
178 ASSERT (n->prev_is_free);
179 ASSERT (e->is_free);
180
Dave Barachb3d93da2016-08-03 14:34:38 -0400181 e->free_elt.prev_uoffset = MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700182 e->free_elt.next_uoffset = h->first_free_elt_uoffset_by_bin[bin];
183
184 /* Fill in next free elt's previous pointer. */
Dave Barachb3d93da2016-08-03 14:34:38 -0400185 if (e->free_elt.next_uoffset != MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186 {
Dave Barachc3799992016-08-15 11:12:27 -0400187 mheap_elt_t *nf = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700188 ASSERT (nf->is_free);
189 nf->free_elt.prev_uoffset = uoffset;
190 }
191
192 set_first_free_elt_offset (h, bin, uoffset);
193}
194
195always_inline void
Dave Barachc3799992016-08-15 11:12:27 -0400196new_free_elt (void *v, uword uoffset, uword n_user_data_bytes)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700197{
198 mheap_elt_set_size (v, uoffset, n_user_data_bytes, /* is_free */ 1);
199 set_free_elt (v, uoffset, n_user_data_bytes);
200}
201
202always_inline void
Dave Barachc3799992016-08-15 11:12:27 -0400203remove_free_elt (void *v, mheap_elt_t * e, uword bin)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700204{
Dave Barachc3799992016-08-15 11:12:27 -0400205 mheap_t *h = mheap_header (v);
206 mheap_elt_t *p, *n;
Dave Barachb3d93da2016-08-03 14:34:38 -0400207#if CLIB_VEC64 > 0
208 u64 no, po;
209#else
Ed Warnickecb9cada2015-12-08 15:45:58 -0700210 u32 no, po;
Dave Barachb3d93da2016-08-03 14:34:38 -0400211#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212
213 no = e->free_elt.next_uoffset;
Dave Barachb3d93da2016-08-03 14:34:38 -0400214
215 n = no != MHEAP_GROUNDED ? mheap_elt_at_uoffset (v, no) : 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216 po = e->free_elt.prev_uoffset;
Dave Barachb3d93da2016-08-03 14:34:38 -0400217 p = po != MHEAP_GROUNDED ? mheap_elt_at_uoffset (v, po) : 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218
Dave Barachc3799992016-08-15 11:12:27 -0400219 if (!p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700220 set_first_free_elt_offset (h, bin, no);
221 else
222 p->free_elt.next_uoffset = no;
223
224 if (n)
225 n->free_elt.prev_uoffset = po;
226}
227
228always_inline void
Dave Barachc3799992016-08-15 11:12:27 -0400229remove_free_elt2 (void *v, mheap_elt_t * e)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700230{
231 uword bin;
232 bin = user_data_size_to_bin_index (mheap_elt_data_bytes (e));
233 remove_free_elt (v, e, bin);
234}
235
236#define MHEAP_VM_MAP (1 << 0)
237#define MHEAP_VM_UNMAP (1 << 1)
238#define MHEAP_VM_NOMAP (0 << 1)
239#define MHEAP_VM_ROUND (1 << 2)
240#define MHEAP_VM_ROUND_UP MHEAP_VM_ROUND
241#define MHEAP_VM_ROUND_DOWN (0 << 2)
242
243static uword mheap_page_size;
244
Dave Barachc3799992016-08-15 11:12:27 -0400245static_always_inline uword
246mheap_page_round (uword addr)
247{
248 return (addr + mheap_page_size - 1) & ~(mheap_page_size - 1);
249}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250
251static_always_inline uword
Dave Barachc3799992016-08-15 11:12:27 -0400252mheap_page_truncate (uword addr)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253{
Dave Barachc3799992016-08-15 11:12:27 -0400254 return addr & ~(mheap_page_size - 1);
255}
256
257static_always_inline uword
258mheap_vm (void *v, uword flags, clib_address_t start_addr, uword size)
259{
260 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261 clib_address_t start_page, end_page, end_addr;
262 uword mapped_bytes;
263
Dave Barachc3799992016-08-15 11:12:27 -0400264 ASSERT (!(h->flags & MHEAP_FLAG_DISABLE_VM));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265
266 end_addr = start_addr + size;
267
268 /* Round start/end address up to page boundary. */
269 start_page = mheap_page_round (start_addr);
270
271 if ((flags & MHEAP_VM_ROUND) == MHEAP_VM_ROUND_UP)
272 end_page = mheap_page_round (end_addr);
273 else
274 end_page = mheap_page_truncate (end_addr);
275
276 mapped_bytes = 0;
277 if (end_page > start_page)
278 {
279 mapped_bytes = end_page - start_page;
280 if (flags & MHEAP_VM_MAP)
281 clib_mem_vm_map ((void *) start_page, end_page - start_page);
282 else if (flags & MHEAP_VM_UNMAP)
283 clib_mem_vm_unmap ((void *) start_page, end_page - start_page);
284 }
285
286 return mapped_bytes;
287}
288
289static_always_inline uword
Dave Barachc3799992016-08-15 11:12:27 -0400290mheap_vm_elt (void *v, uword flags, uword offset)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700291{
Dave Barachc3799992016-08-15 11:12:27 -0400292 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293 clib_address_t start_addr, end_addr;
294
295 e = mheap_elt_at_uoffset (v, offset);
296 start_addr = (clib_address_t) ((void *) e->user_data);
297 end_addr = (clib_address_t) mheap_next_elt (e);
298 return mheap_vm (v, flags, start_addr, end_addr - start_addr);
299}
300
301always_inline uword
302mheap_small_object_cache_mask (mheap_small_object_cache_t * c, uword bin)
303{
304 uword mask;
305
306/* $$$$ ELIOT FIXME: add Altivec version of this routine */
Damjan Marion7bee80c2017-04-26 15:32:12 +0200307#if !defined (CLIB_HAVE_VEC128) || defined (__ALTIVEC__) || defined (__i386__)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308 mask = 0;
309#else
310 u8x16 b = u8x16_splat (bin);
311
312 ASSERT (bin < 256);
313
Damjan Mariona52e1662018-05-19 00:04:23 +0200314#define _(i) ((uword) u8x16_compare_byte_mask ((b == c->bins.as_u8x16[i])) << (uword) ((i)*16))
Dave Barachc3799992016-08-15 11:12:27 -0400315 mask = _(0) | _(1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700316 if (BITS (uword) > 32)
Dave Barachc3799992016-08-15 11:12:27 -0400317 mask |= _(2) | _(3);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700318#undef _
319
320#endif
321 return mask;
322}
323
324always_inline uword
325mheap_get_small_object (mheap_t * h, uword bin)
326{
Dave Barachc3799992016-08-15 11:12:27 -0400327 mheap_small_object_cache_t *c = &h->small_object_cache;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700328 uword mask = mheap_small_object_cache_mask (c, bin + 1);
Dave Barachb3d93da2016-08-03 14:34:38 -0400329 uword offset = MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700330
331 if (mask)
332 {
333 uword i = min_log2 (mask);
334 uword o = c->offsets[i];
Dave Barachb3d93da2016-08-03 14:34:38 -0400335 ASSERT (o != MHEAP_GROUNDED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700336 c->bins.as_u8[i] = 0;
337 offset = o;
338 }
339
340 return offset;
341}
342
343always_inline uword
344mheap_put_small_object (mheap_t * h, uword bin, uword offset)
345{
Dave Barachc3799992016-08-15 11:12:27 -0400346 mheap_small_object_cache_t *c = &h->small_object_cache;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700347 uword free_mask = mheap_small_object_cache_mask (c, 0);
348 uword b = bin + 1;
349 uword i;
350
351 if (free_mask != 0)
352 {
353 i = min_log2 (free_mask);
354 c->bins.as_u8[i] = b;
355 c->offsets[i] = offset;
356 return 0;
357 }
358 else
359 /* Nothing free with right size: cyclic replacement. */
360 {
361 uword old_offset;
362
363 i = c->replacement_index++;
364 i %= BITS (uword);
365 c->bins.as_u8[i] = b;
366 old_offset = c->offsets[i];
367 c->offsets[i] = offset;
368
369 /* Return old offset so it can be freed. */
370 return old_offset;
371 }
372}
373
374static uword
Dave Barachc3799992016-08-15 11:12:27 -0400375mheap_get_search_free_bin (void *v,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700376 uword bin,
377 uword * n_user_data_bytes_arg,
Dave Barachc3799992016-08-15 11:12:27 -0400378 uword align, uword align_offset)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379{
Dave Barachc3799992016-08-15 11:12:27 -0400380 mheap_t *h = mheap_header (v);
381 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700382
383 /* Free object is at offset f0 ... f1;
384 Allocatted object is at offset o0 ... o1. */
385 word o0, o1, f0, f1, search_n_user_data_bytes;
386 word lo_free_usize, hi_free_usize;
387
Dave Barachb3d93da2016-08-03 14:34:38 -0400388 ASSERT (h->first_free_elt_uoffset_by_bin[bin] != MHEAP_GROUNDED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700389 e = mheap_elt_at_uoffset (v, h->first_free_elt_uoffset_by_bin[bin]);
390
391 search_n_user_data_bytes = *n_user_data_bytes_arg;
392
393 /* Silence compiler warning. */
394 o0 = o1 = f0 = f1 = 0;
395
396 h->stats.free_list.n_search_attempts += 1;
397
398 /* Find an object that is large enough with correct alignment at given alignment offset. */
399 while (1)
400 {
401 uword this_object_n_user_data_bytes = mheap_elt_data_bytes (e);
402
403 ASSERT (e->is_free);
404 if (bin < MHEAP_N_SMALL_OBJECT_BINS)
405 ASSERT (this_object_n_user_data_bytes >= search_n_user_data_bytes);
406
407 h->stats.free_list.n_objects_searched += 1;
408
409 if (this_object_n_user_data_bytes < search_n_user_data_bytes)
410 goto next;
411
412 /* Bounds of free object: from f0 to f1. */
413 f0 = ((void *) e->user_data - v);
414 f1 = f0 + this_object_n_user_data_bytes;
415
416 /* Place candidate object at end of free block and align as requested. */
Dave Barachc3799992016-08-15 11:12:27 -0400417 o0 = ((f1 - search_n_user_data_bytes) & ~(align - 1)) - align_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700418 while (o0 < f0)
419 o0 += align;
420
421 /* Make sure that first free fragment is either empty or
Dave Barachc3799992016-08-15 11:12:27 -0400422 large enough to be valid. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700423 while (1)
424 {
425 lo_free_usize = o0 != f0 ? o0 - f0 - MHEAP_ELT_OVERHEAD_BYTES : 0;
426 if (o0 <= f0 || lo_free_usize >= (word) MHEAP_MIN_USER_DATA_BYTES)
427 break;
428 o0 -= align;
429 }
430
431 o1 = o0 + search_n_user_data_bytes;
432
433 /* Does it fit? */
434 if (o0 >= f0 && o1 <= f1)
435 goto found;
436
437 next:
438 /* Reached end of free list without finding large enough object. */
Dave Barachb3d93da2016-08-03 14:34:38 -0400439 if (e->free_elt.next_uoffset == MHEAP_GROUNDED)
440 return MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700441
442 /* Otherwise keep searching for large enough object. */
443 e = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
444 }
445
Dave Barachc3799992016-08-15 11:12:27 -0400446found:
Ed Warnickecb9cada2015-12-08 15:45:58 -0700447 /* Free fragment at end. */
448 hi_free_usize = f1 != o1 ? f1 - o1 - MHEAP_ELT_OVERHEAD_BYTES : 0;
449
450 /* If fragment at end is too small to be a new object,
451 give user's object a bit more space than requested. */
452 if (hi_free_usize < (word) MHEAP_MIN_USER_DATA_BYTES)
453 {
454 search_n_user_data_bytes += f1 - o1;
455 o1 = f1;
456 hi_free_usize = 0;
457 }
458
459 /* Need to make sure that relevant memory areas are mapped. */
Dave Barachc3799992016-08-15 11:12:27 -0400460 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700461 {
Dave Barachc3799992016-08-15 11:12:27 -0400462 mheap_elt_t *f0_elt = mheap_elt_at_uoffset (v, f0);
463 mheap_elt_t *f1_elt = mheap_elt_at_uoffset (v, f1);
464 mheap_elt_t *o0_elt = mheap_elt_at_uoffset (v, o0);
465 mheap_elt_t *o1_elt = mheap_elt_at_uoffset (v, o1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700466
467 uword f0_page_start, f0_page_end;
468 uword o0_page_start, o0_page_end;
469
470 /* Free elt is mapped. Addresses after that may not be mapped. */
471 f0_page_start = mheap_page_round (pointer_to_uword (f0_elt->user_data));
Dave Barachc3799992016-08-15 11:12:27 -0400472 f0_page_end = mheap_page_truncate (pointer_to_uword (f1_elt));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700473
474 o0_page_start = mheap_page_truncate (pointer_to_uword (o0_elt));
475 o0_page_end = mheap_page_round (pointer_to_uword (o1_elt->user_data));
476
477 if (o0_page_start < f0_page_start)
478 o0_page_start = f0_page_start;
479 if (o0_page_end > f0_page_end)
480 o0_page_end = f0_page_end;
481
482 if (o0_page_end > o0_page_start)
483 clib_mem_vm_map (uword_to_pointer (o0_page_start, void *),
484 o0_page_end - o0_page_start);
485 }
486
487 /* Remove free object from free list. */
488 remove_free_elt (v, e, bin);
489
490 /* Free fragment at begining. */
491 if (lo_free_usize > 0)
492 {
493 ASSERT (lo_free_usize >= (word) MHEAP_MIN_USER_DATA_BYTES);
494 mheap_elt_set_size (v, f0, lo_free_usize, /* is_free */ 1);
495 new_free_elt (v, f0, lo_free_usize);
496 }
497
498 mheap_elt_set_size (v, o0, search_n_user_data_bytes, /* is_free */ 0);
499
500 if (hi_free_usize > 0)
501 {
502 uword uo = o1 + MHEAP_ELT_OVERHEAD_BYTES;
503 mheap_elt_set_size (v, uo, hi_free_usize, /* is_free */ 1);
504 new_free_elt (v, uo, hi_free_usize);
505 }
506
507 /* Return actual size of block. */
508 *n_user_data_bytes_arg = search_n_user_data_bytes;
509
510 h->stats.free_list.n_objects_found += 1;
511
512 return o0;
513}
514
515/* Search free lists for object with given size and alignment. */
516static uword
Dave Barachc3799992016-08-15 11:12:27 -0400517mheap_get_search_free_list (void *v,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700518 uword * n_user_bytes_arg,
Dave Barachc3799992016-08-15 11:12:27 -0400519 uword align, uword align_offset)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700520{
Dave Barachc3799992016-08-15 11:12:27 -0400521 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700522 uword bin, n_user_bytes, i, bi;
523
524 n_user_bytes = *n_user_bytes_arg;
525 bin = user_data_size_to_bin_index (n_user_bytes);
526
527 if (MHEAP_HAVE_SMALL_OBJECT_CACHE
528 && (h->flags & MHEAP_FLAG_SMALL_OBJECT_CACHE)
529 && bin < 255
530 && align == STRUCT_SIZE_OF (mheap_elt_t, user_data[0])
531 && align_offset == 0)
532 {
533 uword r = mheap_get_small_object (h, bin);
534 h->stats.n_small_object_cache_attempts += 1;
Dave Barachb3d93da2016-08-03 14:34:38 -0400535 if (r != MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700536 {
537 h->stats.n_small_object_cache_hits += 1;
538 return r;
539 }
540 }
541
Dave Barachc3799992016-08-15 11:12:27 -0400542 for (i = bin / BITS (uword); i < ARRAY_LEN (h->non_empty_free_elt_heads);
543 i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700544 {
545 uword non_empty_bin_mask = h->non_empty_free_elt_heads[i];
546
547 /* No need to search smaller bins. */
548 if (i == bin / BITS (uword))
549 non_empty_bin_mask &= ~pow2_mask (bin % BITS (uword));
550
551 /* Search each occupied free bin which is large enough. */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400552 /* *INDENT-OFF* */
553 foreach_set_bit (bi, non_empty_bin_mask,
554 ({
555 uword r =
556 mheap_get_search_free_bin (v, bi + i * BITS (uword),
557 n_user_bytes_arg,
558 align,
559 align_offset);
560 if (r != MHEAP_GROUNDED) return r;
561 }));
562 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700563 }
564
Dave Barachb3d93da2016-08-03 14:34:38 -0400565 return MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700566}
567
568static never_inline void *
Dave Barachc3799992016-08-15 11:12:27 -0400569mheap_get_extend_vector (void *v,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700570 uword n_user_data_bytes,
571 uword align,
Dave Barachc3799992016-08-15 11:12:27 -0400572 uword align_offset, uword * offset_return)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700573{
574 /* Bounds of free and allocated objects (as above). */
575 uword f0, f1, o0, o1;
576 word free_size;
Dave Barachc3799992016-08-15 11:12:27 -0400577 mheap_t *h = mheap_header (v);
578 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700579
580 if (_vec_len (v) == 0)
581 {
582 _vec_len (v) = MHEAP_ELT_OVERHEAD_BYTES;
583
584 /* Create first element of heap. */
585 e = mheap_elt_at_uoffset (v, _vec_len (v));
586 e->prev_n_user_data = MHEAP_N_USER_DATA_INVALID;
587 }
588
589 f0 = _vec_len (v);
590
591 o0 = round_pow2 (f0, align) - align_offset;
592 while (1)
593 {
594 free_size = o0 - f0 - MHEAP_ELT_OVERHEAD_BYTES;
595 if (o0 == f0 || free_size >= (word) sizeof (mheap_elt_t))
596 break;
597
598 o0 += align;
599 }
600
601 o1 = o0 + n_user_data_bytes;
602 f1 = o1 + MHEAP_ELT_OVERHEAD_BYTES;
Dave Barachc3799992016-08-15 11:12:27 -0400603
Ed Warnickecb9cada2015-12-08 15:45:58 -0700604 ASSERT (v != 0);
605 h = mheap_header (v);
606
607 /* Make sure we have space for object plus overhead. */
608 if (f1 > h->max_size)
609 {
Dave Barachb3d93da2016-08-03 14:34:38 -0400610 *offset_return = MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700611 return v;
612 }
613
614 _vec_len (v) = f1;
615
Dave Barachc3799992016-08-15 11:12:27 -0400616 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700617 {
Dave Barachc3799992016-08-15 11:12:27 -0400618 mheap_elt_t *f0_elt = mheap_elt_at_uoffset (v, f0);
619 mheap_elt_t *f1_elt = mheap_elt_at_uoffset (v, f1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700620
621 uword f0_page = mheap_page_round (pointer_to_uword (f0_elt->user_data));
622 uword f1_page = mheap_page_round (pointer_to_uword (f1_elt->user_data));
623
624 if (f1_page > f0_page)
625 mheap_vm (v, MHEAP_VM_MAP, f0_page, f1_page - f0_page);
626 }
627
628 if (free_size > 0)
629 new_free_elt (v, f0, free_size);
630
631 mheap_elt_set_size (v, o0, n_user_data_bytes, /* is_free */ 0);
632
633 /* Mark last element. */
634 e = mheap_elt_at_uoffset (v, f1);
635 e->n_user_data = MHEAP_N_USER_DATA_INVALID;
636
637 *offset_return = o0;
638
639 return v;
640}
641
Dave Barachc3799992016-08-15 11:12:27 -0400642void *
643mheap_get_aligned (void *v,
644 uword n_user_data_bytes,
645 uword align, uword align_offset, uword * offset_return)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700646{
Dave Barachc3799992016-08-15 11:12:27 -0400647 mheap_t *h;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700648 uword offset;
649 u64 cpu_times[2];
650
651 cpu_times[0] = clib_cpu_time_now ();
652
653 align = clib_max (align, STRUCT_SIZE_OF (mheap_elt_t, user_data[0]));
654 align = max_pow2 (align);
655
656 /* Correct align offset to be smaller than alignment. */
657 align_offset &= (align - 1);
658
659 /* Align offset must be multiple of minimum object size. */
660 if (align_offset % STRUCT_SIZE_OF (mheap_elt_t, user_data[0]) != 0)
661 {
Dave Barachb3d93da2016-08-03 14:34:38 -0400662 *offset_return = MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700663 return v;
664 }
665
Dave Barach9c949e72018-06-28 10:59:05 -0400666 /*
667 * Round requested size.
668 *
669 * Step 1: round up to the minimum object size.
670 * Step 2: round up to a multiple of the user data size (e.g. 4)
671 * Step 3: if non-trivial alignment requested, round up
672 * so that the object precisely fills a chunk
673 * as big as the alignment request.
674 *
675 * Step 3 prevents the code from going into "bin search hyperspace":
676 * looking at a huge number of fractional remainder chunks, none of which
677 * will satisfy the alignment constraint. This fixes an allocator
678 * performance issue when one requests a large number of 16 byte objects
679 * aligned to 64 bytes, to name one variation on the theme.
680 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700681 n_user_data_bytes = clib_max (n_user_data_bytes, MHEAP_MIN_USER_DATA_BYTES);
Dave Barachc3799992016-08-15 11:12:27 -0400682 n_user_data_bytes =
683 round_pow2 (n_user_data_bytes,
684 STRUCT_SIZE_OF (mheap_elt_t, user_data[0]));
Dave Barach9c949e72018-06-28 10:59:05 -0400685 if (align > MHEAP_ELT_OVERHEAD_BYTES)
686 n_user_data_bytes = clib_max (n_user_data_bytes,
687 align - MHEAP_ELT_OVERHEAD_BYTES);
Dave Barachc3799992016-08-15 11:12:27 -0400688 if (!v)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700689 v = mheap_alloc (0, 64 << 20);
690
691 mheap_maybe_lock (v);
692
693 h = mheap_header (v);
694
695 if (h->flags & MHEAP_FLAG_VALIDATE)
696 mheap_validate (v);
697
698 /* First search free lists for object. */
Dave Barachc3799992016-08-15 11:12:27 -0400699 offset =
700 mheap_get_search_free_list (v, &n_user_data_bytes, align, align_offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700701
702 h = mheap_header (v);
703
704 /* If that fails allocate object at end of heap by extending vector. */
Dave Barachb3d93da2016-08-03 14:34:38 -0400705 if (offset == MHEAP_GROUNDED && _vec_len (v) < h->max_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700706 {
Dave Barachc3799992016-08-15 11:12:27 -0400707 v =
708 mheap_get_extend_vector (v, n_user_data_bytes, align, align_offset,
709 &offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700710 h = mheap_header (v);
Dave Barachb3d93da2016-08-03 14:34:38 -0400711 h->stats.n_vector_expands += offset != MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700712 }
713
714 *offset_return = offset;
Dave Barachb3d93da2016-08-03 14:34:38 -0400715 if (offset != MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700716 {
717 h->n_elts += 1;
718
719 if (h->flags & MHEAP_FLAG_TRACE)
720 {
721 /* Recursion block for case when we are traceing main clib heap. */
722 h->flags &= ~MHEAP_FLAG_TRACE;
723
724 mheap_get_trace (v, offset, n_user_data_bytes);
725
726 h->flags |= MHEAP_FLAG_TRACE;
727 }
728 }
729
730 if (h->flags & MHEAP_FLAG_VALIDATE)
731 mheap_validate (v);
732
733 mheap_maybe_unlock (v);
734
735 cpu_times[1] = clib_cpu_time_now ();
736 h->stats.n_clocks_get += cpu_times[1] - cpu_times[0];
737 h->stats.n_gets += 1;
738
739 return v;
740}
741
Dave Barachc3799992016-08-15 11:12:27 -0400742static void
743free_last_elt (void *v, mheap_elt_t * e)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700744{
Dave Barachc3799992016-08-15 11:12:27 -0400745 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700746
747 /* Possibly delete preceeding free element also. */
748 if (e->prev_is_free)
749 {
750 e = mheap_prev_elt (e);
751 remove_free_elt2 (v, e);
752 }
753
754 if (e->prev_n_user_data == MHEAP_N_USER_DATA_INVALID)
755 {
Dave Barachc3799992016-08-15 11:12:27 -0400756 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700757 mheap_vm_elt (v, MHEAP_VM_UNMAP, mheap_elt_uoffset (v, e));
758 _vec_len (v) = 0;
759 }
760 else
761 {
762 uword uo = mheap_elt_uoffset (v, e);
Dave Barachc3799992016-08-15 11:12:27 -0400763 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700764 mheap_vm_elt (v, MHEAP_VM_UNMAP, uo);
765 e->n_user_data = MHEAP_N_USER_DATA_INVALID;
766 _vec_len (v) = uo;
767 }
768}
769
Dave Barachc3799992016-08-15 11:12:27 -0400770void
771mheap_put (void *v, uword uoffset)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700772{
Dave Barachc3799992016-08-15 11:12:27 -0400773 mheap_t *h;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700774 uword n_user_data_bytes, bin;
Dave Barachc3799992016-08-15 11:12:27 -0400775 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700776 uword trace_uoffset, trace_n_user_data_bytes;
777 u64 cpu_times[2];
778
779 cpu_times[0] = clib_cpu_time_now ();
780
781 h = mheap_header (v);
782
783 mheap_maybe_lock (v);
784
785 if (h->flags & MHEAP_FLAG_VALIDATE)
786 mheap_validate (v);
787
788 ASSERT (h->n_elts > 0);
789 h->n_elts--;
790 h->stats.n_puts += 1;
791
792 e = mheap_elt_at_uoffset (v, uoffset);
793 n = mheap_next_elt (e);
794 n_user_data_bytes = mheap_elt_data_bytes (e);
795
796 trace_uoffset = uoffset;
797 trace_n_user_data_bytes = n_user_data_bytes;
798
799 bin = user_data_size_to_bin_index (n_user_data_bytes);
800 if (MHEAP_HAVE_SMALL_OBJECT_CACHE
Dave Barachc3799992016-08-15 11:12:27 -0400801 && bin < 255 && (h->flags & MHEAP_FLAG_SMALL_OBJECT_CACHE))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700802 {
803 uoffset = mheap_put_small_object (h, bin, uoffset);
Dave Barachc3799992016-08-15 11:12:27 -0400804 if (uoffset == 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700805 goto done;
806
807 e = mheap_elt_at_uoffset (v, uoffset);
808 n = mheap_next_elt (e);
809 n_user_data_bytes = mheap_elt_data_bytes (e);
810 }
811
812 /* Assert that forward and back pointers are equal. */
813 if (e->n_user_data != n->prev_n_user_data)
814 os_panic ();
815
816 /* Forward and backwards is_free must agree. */
817 if (e->is_free != n->prev_is_free)
818 os_panic ();
819
820 /* Object was already freed. */
821 if (e->is_free)
822 os_panic ();
823
824 /* Special case: delete last element in heap. */
825 if (n->n_user_data == MHEAP_N_USER_DATA_INVALID)
826 free_last_elt (v, e);
827
828 else
829 {
830 uword f0, f1, n_combine;
831
832 f0 = uoffset;
833 f1 = f0 + n_user_data_bytes;
834 n_combine = 0;
835
836 if (e->prev_is_free)
837 {
Dave Barachc3799992016-08-15 11:12:27 -0400838 mheap_elt_t *p = mheap_prev_elt (e);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700839 f0 = mheap_elt_uoffset (v, p);
840 remove_free_elt2 (v, p);
841 n_combine++;
842 }
843
844 if (n->is_free)
845 {
Dave Barachc3799992016-08-15 11:12:27 -0400846 mheap_elt_t *m = mheap_next_elt (n);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700847 f1 = (void *) m - v;
848 remove_free_elt2 (v, n);
849 n_combine++;
850 }
851
852 if (n_combine)
853 mheap_elt_set_size (v, f0, f1 - f0, /* is_free */ 1);
854 else
855 e->is_free = n->prev_is_free = 1;
856 set_free_elt (v, f0, f1 - f0);
857
Dave Barachc3799992016-08-15 11:12:27 -0400858 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700859 mheap_vm_elt (v, MHEAP_VM_UNMAP, f0);
860 }
861
Dave Barachc3799992016-08-15 11:12:27 -0400862done:
Ed Warnickecb9cada2015-12-08 15:45:58 -0700863 h = mheap_header (v);
864
865 if (h->flags & MHEAP_FLAG_TRACE)
866 {
867 /* Recursion block for case when we are traceing main clib heap. */
868 h->flags &= ~MHEAP_FLAG_TRACE;
869
870 mheap_put_trace (v, trace_uoffset, trace_n_user_data_bytes);
871
872 h->flags |= MHEAP_FLAG_TRACE;
873 }
874
875 if (h->flags & MHEAP_FLAG_VALIDATE)
876 mheap_validate (v);
877
878 mheap_maybe_unlock (v);
879
880 cpu_times[1] = clib_cpu_time_now ();
881 h->stats.n_clocks_put += cpu_times[1] - cpu_times[0];
882}
883
Dave Barachc3799992016-08-15 11:12:27 -0400884void *
885mheap_alloc_with_flags (void *memory, uword memory_size, uword flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700886{
Dave Barachc3799992016-08-15 11:12:27 -0400887 mheap_t *h;
888 void *v;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700889 uword size;
890
Dave Barachc3799992016-08-15 11:12:27 -0400891 if (!mheap_page_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700892 mheap_page_size = clib_mem_get_page_size ();
893
Dave Barachc3799992016-08-15 11:12:27 -0400894 if (!memory)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700895 {
896 /* No memory given, try to VM allocate some. */
897 memory = clib_mem_vm_alloc (memory_size);
Dave Barachc3799992016-08-15 11:12:27 -0400898 if (!memory)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700899 return 0;
900
901 /* No memory region implies we have virtual memory. */
902 flags &= ~MHEAP_FLAG_DISABLE_VM;
903 }
904
905 /* Make sure that given memory is page aligned. */
906 {
907 uword am, av, ah;
908
909 am = pointer_to_uword (memory);
910 av = mheap_page_round (am);
911 v = uword_to_pointer (av, void *);
912 h = mheap_header (v);
913 ah = pointer_to_uword (h);
914 while (ah < am)
915 ah += mheap_page_size;
916
917 h = uword_to_pointer (ah, void *);
918 v = mheap_vector (h);
919
Dave Barachc3799992016-08-15 11:12:27 -0400920 if (PREDICT_FALSE (memory + memory_size < v))
921 {
Pierre Pfister889178c2016-06-17 13:30:02 +0100922 /*
923 * This will happen when the requested memory_size is too
924 * small to cope with the heap header and/or memory alignment.
925 */
Dave Barachc3799992016-08-15 11:12:27 -0400926 clib_mem_vm_free (memory, memory_size);
Pierre Pfister889178c2016-06-17 13:30:02 +0100927 return 0;
Dave Barachc3799992016-08-15 11:12:27 -0400928 }
Pierre Pfister889178c2016-06-17 13:30:02 +0100929
Ed Warnickecb9cada2015-12-08 15:45:58 -0700930 size = memory + memory_size - v;
931 }
932
933 /* VM map header so we can use memory. */
Dave Barachc3799992016-08-15 11:12:27 -0400934 if (!(flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700935 clib_mem_vm_map (h, sizeof (h[0]));
936
937 /* Zero vector header: both heap header and vector length. */
938 memset (h, 0, sizeof (h[0]));
939 _vec_len (v) = 0;
940
941 h->vm_alloc_offset_from_header = (void *) h - memory;
942 h->vm_alloc_size = memory_size;
943
944 h->max_size = size;
945 h->owner_cpu = ~0;
946
947 /* Set flags based on those given less builtin-flags. */
Dave Barachc3799992016-08-15 11:12:27 -0400948 h->flags |= (flags & ~MHEAP_FLAG_TRACE);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700949
950 /* Unmap remainder of heap until we will be ready to use it. */
Dave Barachc3799992016-08-15 11:12:27 -0400951 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700952 mheap_vm (v, MHEAP_VM_UNMAP | MHEAP_VM_ROUND_UP,
953 (clib_address_t) v, h->max_size);
954
955 /* Initialize free list heads to empty. */
Dave Barachc3799992016-08-15 11:12:27 -0400956 memset (h->first_free_elt_uoffset_by_bin, 0xFF,
957 sizeof (h->first_free_elt_uoffset_by_bin));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700958
959 return v;
960}
961
Dave Barachc3799992016-08-15 11:12:27 -0400962void *
963mheap_alloc (void *memory, uword size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700964{
965 uword flags = 0;
966
967 if (memory != 0)
968 flags |= MHEAP_FLAG_DISABLE_VM;
969
970#ifdef CLIB_HAVE_VEC128
971 flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE;
972#endif
973
974 return mheap_alloc_with_flags (memory, size, flags);
975}
976
Dave Barachc3799992016-08-15 11:12:27 -0400977void *
Dave Barach6a5adc32018-07-04 10:56:23 -0400978mheap_alloc_with_lock (void *memory, uword size, int locked)
979{
980 uword flags = 0;
981 void *rv;
982
983 if (memory != 0)
984 flags |= MHEAP_FLAG_DISABLE_VM;
985
986#ifdef CLIB_HAVE_VEC128
987 flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE;
988#endif
989
990 rv = mheap_alloc_with_flags (memory, size, flags);
991
992 if (rv && locked)
993 {
994 mheap_t *h = mheap_header (rv);
995 h->flags |= MHEAP_FLAG_THREAD_SAFE;
996 }
997 return rv;
998}
999
1000void *
Dave Barachc3799992016-08-15 11:12:27 -04001001_mheap_free (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001002{
Dave Barachc3799992016-08-15 11:12:27 -04001003 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001004
1005 if (v)
Dave Barachc3799992016-08-15 11:12:27 -04001006 clib_mem_vm_free ((void *) h - h->vm_alloc_offset_from_header,
1007 h->vm_alloc_size);
1008
Ed Warnickecb9cada2015-12-08 15:45:58 -07001009 return 0;
1010}
1011
1012/* Call user's function with each object in heap. */
Dave Barachc3799992016-08-15 11:12:27 -04001013void
1014mheap_foreach (void *v,
1015 uword (*func) (void *arg, void *v, void *elt_data,
1016 uword elt_size), void *arg)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001017{
Dave Barachc3799992016-08-15 11:12:27 -04001018 mheap_elt_t *e;
1019 u8 *stack_heap, *clib_mem_mheap_save;
1020 u8 tmp_heap_memory[16 * 1024];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001021
1022 mheap_maybe_lock (v);
1023
1024 if (vec_len (v) == 0)
1025 goto done;
1026
1027 clib_mem_mheap_save = 0;
1028 stack_heap = 0;
1029
1030 /* Allocate a new temporary heap on the stack.
1031 This is so that our hash table & user's callback function can
1032 themselves allocate memory somewhere without getting in the way
1033 of the heap we are looking at. */
1034 if (v == clib_mem_get_heap ())
1035 {
1036 stack_heap = mheap_alloc (tmp_heap_memory, sizeof (tmp_heap_memory));
1037 clib_mem_mheap_save = v;
1038 clib_mem_set_heap (stack_heap);
1039 }
1040
1041 for (e = v;
Dave Barachc3799992016-08-15 11:12:27 -04001042 e->n_user_data != MHEAP_N_USER_DATA_INVALID; e = mheap_next_elt (e))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001043 {
Dave Barachc3799992016-08-15 11:12:27 -04001044 void *p = mheap_elt_data (v, e);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001045 if (e->is_free)
1046 continue;
Dave Barachc3799992016-08-15 11:12:27 -04001047 if ((*func) (arg, v, p, mheap_elt_data_bytes (e)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001048 break;
1049 }
1050
1051 /* Restore main CLIB heap. */
1052 if (clib_mem_mheap_save)
1053 clib_mem_set_heap (clib_mem_mheap_save);
1054
Dave Barachc3799992016-08-15 11:12:27 -04001055done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001056 mheap_maybe_unlock (v);
1057}
1058
1059/* Bytes in mheap header overhead not including data bytes. */
1060always_inline uword
Dave Barachc3799992016-08-15 11:12:27 -04001061mheap_bytes_overhead (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001062{
Dave Barachc3799992016-08-15 11:12:27 -04001063 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001064 return v ? sizeof (h[0]) + h->n_elts * sizeof (mheap_elt_t) : 0;
1065}
1066
1067/* Total number of bytes including both data and overhead. */
Dave Barachc3799992016-08-15 11:12:27 -04001068uword
1069mheap_bytes (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001070{
Dave Barachc3799992016-08-15 11:12:27 -04001071 return mheap_bytes_overhead (v) + vec_bytes (v);
1072}
1073
1074static void
1075mheap_usage_no_lock (void *v, clib_mem_usage_t * usage)
1076{
1077 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001078 uword used = 0, free = 0, free_vm_unmapped = 0;
1079
1080 if (vec_len (v) > 0)
1081 {
Dave Barachc3799992016-08-15 11:12:27 -04001082 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001083
1084 for (e = v;
1085 e->n_user_data != MHEAP_N_USER_DATA_INVALID;
1086 e = mheap_next_elt (e))
1087 {
1088 uword size = mheap_elt_data_bytes (e);
1089 if (e->is_free)
1090 {
1091 free += size;
Dave Barachc3799992016-08-15 11:12:27 -04001092 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001093 free_vm_unmapped +=
1094 mheap_vm_elt (v, MHEAP_VM_NOMAP, mheap_elt_uoffset (v, e));
1095 }
1096 else
1097 used += size;
1098 }
1099 }
1100
1101 usage->object_count = mheap_elts (v);
1102 usage->bytes_total = mheap_bytes (v);
1103 usage->bytes_overhead = mheap_bytes_overhead (v);
1104 usage->bytes_max = mheap_max_size (v);
1105 usage->bytes_used = used;
1106 usage->bytes_free = free;
1107 usage->bytes_free_reclaimed = free_vm_unmapped;
1108}
1109
Dave Barachc3799992016-08-15 11:12:27 -04001110void
1111mheap_usage (void *v, clib_mem_usage_t * usage)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001112{
1113 mheap_maybe_lock (v);
1114 mheap_usage_no_lock (v, usage);
1115 mheap_maybe_unlock (v);
1116}
1117
Dave Barachc3799992016-08-15 11:12:27 -04001118static u8 *
1119format_mheap_byte_count (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001120{
1121 uword n_bytes = va_arg (*va, uword);
1122 if (n_bytes < 1024)
1123 return format (s, "%wd", n_bytes);
1124 else
1125 return format (s, "%wdk", n_bytes / 1024);
1126}
1127
1128/* Returns first corrupt heap element. */
Dave Barachc3799992016-08-15 11:12:27 -04001129static mheap_elt_t *
1130mheap_first_corrupt (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001131{
Dave Barachc3799992016-08-15 11:12:27 -04001132 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001133
1134 if (vec_len (v) == 0)
1135 return 0;
1136
1137 e = v;
1138 while (1)
1139 {
1140 if (e->n_user_data == MHEAP_N_USER_DATA_INVALID)
1141 break;
1142
1143 n = mheap_next_elt (e);
1144
1145 if (e->n_user_data != n->prev_n_user_data)
1146 return e;
1147
1148 if (e->is_free != n->prev_is_free)
1149 return e;
1150
1151 e = n;
1152 }
1153
1154 return 0;
1155}
1156
Dave Barachc3799992016-08-15 11:12:27 -04001157static u8 *
1158format_mheap_stats (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001159{
Dave Barachc3799992016-08-15 11:12:27 -04001160 mheap_t *h = va_arg (*va, mheap_t *);
1161 mheap_stats_t *st = &h->stats;
Christophe Fontained3c008d2017-10-02 18:10:54 +02001162 u32 indent = format_get_indent (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001163
Dave Barachc3799992016-08-15 11:12:27 -04001164 s =
1165 format (s,
1166 "alloc. from small object cache: %Ld hits %Ld attempts (%.2f%%) replacements %d",
1167 st->n_small_object_cache_hits, st->n_small_object_cache_attempts,
1168 (st->n_small_object_cache_attempts !=
1169 0 ? 100. * (f64) st->n_small_object_cache_hits /
1170 (f64) st->n_small_object_cache_attempts : 0.),
1171 h->small_object_cache.replacement_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001172
Dave Barachc3799992016-08-15 11:12:27 -04001173 s =
1174 format (s,
1175 "\n%Ualloc. from free-list: %Ld attempts, %Ld hits (%.2f%%), %Ld considered (per-attempt %.2f)",
1176 format_white_space, indent, st->free_list.n_search_attempts,
1177 st->free_list.n_objects_found,
1178 (st->free_list.n_search_attempts !=
1179 0 ? 100. * (f64) st->free_list.n_objects_found /
1180 (f64) st->free_list.n_search_attempts : 0.),
1181 st->free_list.n_objects_searched,
1182 (st->free_list.n_search_attempts !=
1183 0 ? (f64) st->free_list.n_objects_searched /
1184 (f64) st->free_list.n_search_attempts : 0.));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001185
1186 s = format (s, "\n%Ualloc. from vector-expand: %Ld",
Dave Barachc3799992016-08-15 11:12:27 -04001187 format_white_space, indent, st->n_vector_expands);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001188
1189 s = format (s, "\n%Uallocs: %Ld %.2f clocks/call",
1190 format_white_space, indent,
Dave Barachc3799992016-08-15 11:12:27 -04001191 st->n_gets, (f64) st->n_clocks_get / (f64) st->n_gets);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001192
1193 s = format (s, "\n%Ufrees: %Ld %.2f clocks/call",
1194 format_white_space, indent,
Dave Barachc3799992016-08-15 11:12:27 -04001195 st->n_puts, (f64) st->n_clocks_put / (f64) st->n_puts);
1196
Ed Warnickecb9cada2015-12-08 15:45:58 -07001197 return s;
1198}
1199
Dave Barachc3799992016-08-15 11:12:27 -04001200u8 *
1201format_mheap (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001202{
Dave Barachc3799992016-08-15 11:12:27 -04001203 void *v = va_arg (*va, u8 *);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001204 int verbose = va_arg (*va, int);
1205
Dave Barachc3799992016-08-15 11:12:27 -04001206 mheap_t *h;
Gabriel Gannee3ea7972017-10-12 10:53:31 +02001207 uword i, size;
1208 u32 indent;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001209 clib_mem_usage_t usage;
Dave Barachc3799992016-08-15 11:12:27 -04001210 mheap_elt_t *first_corrupt;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001211
1212 mheap_maybe_lock (v);
1213
1214 h = mheap_header (v);
1215
1216 mheap_usage_no_lock (v, &usage);
1217
1218 indent = format_get_indent (s);
1219
Dave Barachc3799992016-08-15 11:12:27 -04001220 s =
1221 format (s,
1222 "%d objects, %U of %U used, %U free, %U reclaimed, %U overhead",
1223 usage.object_count, format_mheap_byte_count, usage.bytes_used,
1224 format_mheap_byte_count, usage.bytes_total,
1225 format_mheap_byte_count, usage.bytes_free,
1226 format_mheap_byte_count, usage.bytes_free_reclaimed,
1227 format_mheap_byte_count, usage.bytes_overhead);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001228
1229 if (usage.bytes_max != ~0)
1230 s = format (s, ", %U capacity", format_mheap_byte_count, usage.bytes_max);
1231
1232 /* Show histogram of sizes. */
1233 if (verbose > 1)
1234 {
1235 uword hist[MHEAP_N_BINS];
Dave Barachc3799992016-08-15 11:12:27 -04001236 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001237 uword i, n_hist;
1238
1239 memset (hist, 0, sizeof (hist));
1240
1241 n_hist = 0;
1242 for (e = v;
1243 e->n_user_data != MHEAP_N_USER_DATA_INVALID;
1244 e = mheap_next_elt (e))
1245 {
1246 uword n_user_data_bytes = mheap_elt_data_bytes (e);
1247 uword bin = user_data_size_to_bin_index (n_user_data_bytes);
Dave Barachc3799992016-08-15 11:12:27 -04001248 if (!e->is_free)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001249 {
1250 hist[bin] += 1;
1251 n_hist += 1;
1252 }
1253 }
1254
1255 s = format (s, "\n%U%=12s%=12s%=16s",
1256 format_white_space, indent + 2,
1257 "Size", "Count", "Fraction");
1258
1259 for (i = 0; i < ARRAY_LEN (hist); i++)
1260 {
1261 if (hist[i] == 0)
1262 continue;
1263 s = format (s, "\n%U%12d%12wd%16.4f",
1264 format_white_space, indent + 2,
Dave Barachc3799992016-08-15 11:12:27 -04001265 MHEAP_MIN_USER_DATA_BYTES +
1266 i * MHEAP_USER_DATA_WORD_BYTES, hist[i],
Ed Warnickecb9cada2015-12-08 15:45:58 -07001267 (f64) hist[i] / (f64) n_hist);
1268 }
1269 }
1270
1271 if (verbose)
1272 s = format (s, "\n%U%U",
Dave Barachc3799992016-08-15 11:12:27 -04001273 format_white_space, indent + 2, format_mheap_stats, h);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001274
1275 if ((h->flags & MHEAP_FLAG_TRACE) && vec_len (h->trace_main.traces) > 0)
1276 {
1277 /* Make a copy of traces since we'll be sorting them. */
Dave Barachc3799992016-08-15 11:12:27 -04001278 mheap_trace_t *t, *traces_copy;
Christophe Fontained3c008d2017-10-02 18:10:54 +02001279 u32 indent, total_objects_traced;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001280
1281 traces_copy = vec_dup (h->trace_main.traces);
1282 qsort (traces_copy, vec_len (traces_copy), sizeof (traces_copy[0]),
1283 mheap_trace_sort);
1284
1285 total_objects_traced = 0;
1286 s = format (s, "\n");
Dave Barachc3799992016-08-15 11:12:27 -04001287 vec_foreach (t, traces_copy)
1288 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001289 /* Skip over free elements. */
1290 if (t->n_allocations == 0)
1291 continue;
1292
1293 total_objects_traced += t->n_allocations;
1294
1295 /* When not verbose only report allocations of more than 1k. */
Dave Barachc3799992016-08-15 11:12:27 -04001296 if (!verbose && t->n_bytes < 1024)
1297 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001298
1299 if (t == traces_copy)
Dave Barachc3799992016-08-15 11:12:27 -04001300 s = format (s, "%=9s%=9s %=10s Traceback\n", "Bytes", "Count",
1301 "Sample");
1302 s = format (s, "%9d%9d %p", t->n_bytes, t->n_allocations,
1303 t->offset + v);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001304 indent = format_get_indent (s);
1305 for (i = 0; i < ARRAY_LEN (t->callers) && t->callers[i]; i++)
1306 {
1307 if (i > 0)
1308 s = format (s, "%U", format_white_space, indent);
1309#ifdef CLIB_UNIX
Dave Barachc3799992016-08-15 11:12:27 -04001310 s =
1311 format (s, " %U\n", format_clib_elf_symbol_with_address,
1312 t->callers[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001313#else
1314 s = format (s, " %p\n", t->callers[i]);
1315#endif
1316 }
1317 }
1318
1319 s = format (s, "%d total traced objects\n", total_objects_traced);
1320
1321 vec_free (traces_copy);
Dave Barachc3799992016-08-15 11:12:27 -04001322 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001323
1324 first_corrupt = mheap_first_corrupt (v);
1325 if (first_corrupt)
1326 {
1327 size = mheap_elt_data_bytes (first_corrupt);
1328 s = format (s, "\n first corrupt object: %p, size %wd\n %U",
Dave Barachc3799992016-08-15 11:12:27 -04001329 first_corrupt, size, format_hex_bytes, first_corrupt, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001330 }
1331
1332 /* FIXME. This output could be wrong in the unlikely case that format
1333 uses the same mheap as we are currently inspecting. */
1334 if (verbose > 1)
1335 {
Dave Barachc3799992016-08-15 11:12:27 -04001336 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001337 uword i, o;
1338
1339 s = format (s, "\n");
1340
1341 e = mheap_elt_at_uoffset (v, 0);
1342 i = 0;
1343 while (1)
1344 {
1345 if ((i % 8) == 0)
1346 s = format (s, "%8d: ", i);
1347
1348 o = mheap_elt_uoffset (v, e);
1349
1350 if (e->is_free)
1351 s = format (s, "(%8d) ", o);
1352 else
1353 s = format (s, " %8d ", o);
1354
1355 if ((i % 8) == 7 || (i + 1) >= h->n_elts)
1356 s = format (s, "\n");
1357 }
1358 }
1359
1360 mheap_maybe_unlock (v);
1361
1362 return s;
1363}
1364
Dave Barachc3799992016-08-15 11:12:27 -04001365void
1366dmh (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001367{
Dave Barachc3799992016-08-15 11:12:27 -04001368 fformat (stderr, "%U", format_mheap, v, 1);
1369}
1370
1371static void
1372mheap_validate_breakpoint ()
1373{
1374 os_panic ();
1375}
1376
1377void
1378mheap_validate (void *v)
1379{
1380 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001381 uword i, s;
1382
1383 uword elt_count, elt_size;
1384 uword free_count_from_free_lists, free_size_from_free_lists;
1385 uword small_elt_free_count, small_elt_free_size;
1386
1387#define CHECK(x) if (! (x)) { mheap_validate_breakpoint (); os_panic (); }
1388
1389 if (vec_len (v) == 0)
1390 return;
1391
1392 mheap_maybe_lock (v);
1393
1394 /* Validate number of elements and size. */
1395 free_size_from_free_lists = free_count_from_free_lists = 0;
1396 for (i = 0; i < ARRAY_LEN (h->first_free_elt_uoffset_by_bin); i++)
1397 {
Dave Barachc3799992016-08-15 11:12:27 -04001398 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001399 uword is_first;
1400
Dave Barachb3d93da2016-08-03 14:34:38 -04001401 CHECK ((h->first_free_elt_uoffset_by_bin[i] != MHEAP_GROUNDED)
Dave Barachc3799992016-08-15 11:12:27 -04001402 ==
1403 ((h->non_empty_free_elt_heads[i /
1404 BITS (uword)] & ((uword) 1 <<
1405 (uword) (i %
1406 BITS
1407 (uword))))
1408 != 0));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001409
Dave Barachb3d93da2016-08-03 14:34:38 -04001410 if (h->first_free_elt_uoffset_by_bin[i] == MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001411 continue;
1412
1413 e = mheap_elt_at_uoffset (v, h->first_free_elt_uoffset_by_bin[i]);
1414 is_first = 1;
1415 while (1)
1416 {
1417 uword s;
1418
1419 n = mheap_next_elt (e);
1420
1421 /* Object must be marked free. */
1422 CHECK (e->is_free);
1423
1424 /* Next object's previous free bit must also be set. */
1425 CHECK (n->prev_is_free);
1426
1427 if (is_first)
Dave Barachb3d93da2016-08-03 14:34:38 -04001428 CHECK (e->free_elt.prev_uoffset == MHEAP_GROUNDED);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001429 is_first = 0;
1430
1431 s = mheap_elt_data_bytes (e);
1432 CHECK (user_data_size_to_bin_index (s) == i);
1433
1434 free_count_from_free_lists += 1;
1435 free_size_from_free_lists += s;
1436
Dave Barachb3d93da2016-08-03 14:34:38 -04001437 if (e->free_elt.next_uoffset == MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001438 break;
1439
1440 n = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
1441
1442 /* Check free element linkages. */
1443 CHECK (n->free_elt.prev_uoffset == mheap_elt_uoffset (v, e));
1444
1445 e = n;
1446 }
1447 }
1448
1449 /* Go through small object cache. */
1450 small_elt_free_count = small_elt_free_size = 0;
1451 for (i = 0; i < ARRAY_LEN (h->small_object_cache.bins.as_u8); i++)
1452 {
1453 if (h->small_object_cache.bins.as_u8[i] != 0)
1454 {
Dave Barachc3799992016-08-15 11:12:27 -04001455 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001456 uword b = h->small_object_cache.bins.as_u8[i] - 1;
1457 uword o = h->small_object_cache.offsets[i];
1458 uword s;
1459
1460 e = mheap_elt_at_uoffset (v, o);
1461
1462 /* Object must be allocated. */
Dave Barachc3799992016-08-15 11:12:27 -04001463 CHECK (!e->is_free);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001464
1465 s = mheap_elt_data_bytes (e);
1466 CHECK (user_data_size_to_bin_index (s) == b);
1467
1468 small_elt_free_count += 1;
1469 small_elt_free_size += s;
1470 }
1471 }
1472
1473 {
Dave Barachc3799992016-08-15 11:12:27 -04001474 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001475 uword elt_free_size, elt_free_count;
1476
1477 elt_count = elt_size = elt_free_size = elt_free_count = 0;
Dave Barachc3799992016-08-15 11:12:27 -04001478 for (e = v; e->n_user_data != MHEAP_N_USER_DATA_INVALID; e = n)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001479 {
1480 if (e->prev_n_user_data != MHEAP_N_USER_DATA_INVALID)
Dave Barachc3799992016-08-15 11:12:27 -04001481 CHECK (e->prev_n_user_data * sizeof (e->user_data[0]) >=
1482 MHEAP_MIN_USER_DATA_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001483
Dave Barachc3799992016-08-15 11:12:27 -04001484 CHECK (e->n_user_data * sizeof (e->user_data[0]) >=
1485 MHEAP_MIN_USER_DATA_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001486
1487 n = mheap_next_elt (e);
1488
1489 CHECK (e->is_free == n->prev_is_free);
1490
1491 elt_count++;
1492 s = mheap_elt_data_bytes (e);
1493 elt_size += s;
1494
1495 if (e->is_free)
1496 {
1497 elt_free_count++;
1498 elt_free_size += s;
1499 }
1500
1501 /* Consecutive free objects should have been combined. */
Dave Barachc3799992016-08-15 11:12:27 -04001502 CHECK (!(e->prev_is_free && n->prev_is_free));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001503 }
1504
1505 CHECK (free_count_from_free_lists == elt_free_count);
1506 CHECK (free_size_from_free_lists == elt_free_size);
1507 CHECK (elt_count == h->n_elts + elt_free_count + small_elt_free_count);
Dave Barachc3799992016-08-15 11:12:27 -04001508 CHECK (elt_size + (elt_count + 1) * MHEAP_ELT_OVERHEAD_BYTES ==
1509 vec_len (v));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001510 }
1511
1512 {
Dave Barachc3799992016-08-15 11:12:27 -04001513 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001514
Dave Barachc3799992016-08-15 11:12:27 -04001515 for (e = v; e->n_user_data == MHEAP_N_USER_DATA_INVALID; e = n)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001516 {
1517 n = mheap_next_elt (e);
1518 CHECK (e->n_user_data == n->prev_n_user_data);
1519 }
1520 }
1521
1522#undef CHECK
1523
1524 mheap_maybe_unlock (v);
1525
1526 h->validate_serial += 1;
1527}
1528
Dave Barachc3799992016-08-15 11:12:27 -04001529static void
1530mheap_get_trace (void *v, uword offset, uword size)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001531{
Dave Barachc3799992016-08-15 11:12:27 -04001532 mheap_t *h;
1533 mheap_trace_main_t *tm;
1534 mheap_trace_t *t;
1535 uword i, n_callers, trace_index, *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001536 mheap_trace_t trace;
1537
1538 /* Spurious Coverity warnings be gone. */
Dave Barachc3799992016-08-15 11:12:27 -04001539 memset (&trace, 0, sizeof (trace));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001540
1541 n_callers = clib_backtrace (trace.callers, ARRAY_LEN (trace.callers),
1542 /* Skip mheap_get_aligned's frame */ 1);
1543 if (n_callers == 0)
Dave Barachc3799992016-08-15 11:12:27 -04001544 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001545
1546 for (i = n_callers; i < ARRAY_LEN (trace.callers); i++)
1547 trace.callers[i] = 0;
1548
1549 h = mheap_header (v);
1550 tm = &h->trace_main;
1551
Dave Barachc3799992016-08-15 11:12:27 -04001552 if (!tm->trace_by_callers)
1553 tm->trace_by_callers =
Ole Troan73710c72018-06-04 22:27:49 +02001554 hash_create_shmem (0, sizeof (trace.callers), sizeof (uword));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001555
1556 p = hash_get_mem (tm->trace_by_callers, &trace.callers);
1557 if (p)
1558 {
1559 trace_index = p[0];
1560 t = tm->traces + trace_index;
1561 }
1562 else
1563 {
1564 i = vec_len (tm->trace_free_list);
1565 if (i > 0)
1566 {
1567 trace_index = tm->trace_free_list[i - 1];
1568 _vec_len (tm->trace_free_list) = i - 1;
1569 }
1570 else
1571 {
Dave Barachc3799992016-08-15 11:12:27 -04001572 mheap_trace_t *old_start = tm->traces;
1573 mheap_trace_t *old_end = vec_end (tm->traces);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001574
1575 vec_add2 (tm->traces, t, 1);
1576
Dave Barachc3799992016-08-15 11:12:27 -04001577 if (tm->traces != old_start)
1578 {
1579 hash_pair_t *p;
1580 mheap_trace_t *q;
1581 /* *INDENT-OFF* */
Damjan Marion607de1a2016-08-16 22:53:54 +02001582 hash_foreach_pair (p, tm->trace_by_callers,
Dave Barachc3799992016-08-15 11:12:27 -04001583 ({
1584 q = uword_to_pointer (p->key, mheap_trace_t *);
1585 ASSERT (q >= old_start && q < old_end);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001586 p->key = pointer_to_uword (tm->traces + (q - old_start));
1587 }));
Dave Barachc3799992016-08-15 11:12:27 -04001588 /* *INDENT-ON* */
1589 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001590 trace_index = t - tm->traces;
1591 }
1592
1593 t = tm->traces + trace_index;
1594 t[0] = trace;
1595 t->n_allocations = 0;
1596 t->n_bytes = 0;
1597 hash_set_mem (tm->trace_by_callers, t->callers, trace_index);
1598 }
1599
1600 t->n_allocations += 1;
1601 t->n_bytes += size;
Dave Barachc3799992016-08-15 11:12:27 -04001602 t->offset = offset; /* keep a sample to autopsy */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001603 hash_set (tm->trace_index_by_offset, offset, t - tm->traces);
1604}
1605
Dave Barachc3799992016-08-15 11:12:27 -04001606static void
1607mheap_put_trace (void *v, uword offset, uword size)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001608{
Dave Barachc3799992016-08-15 11:12:27 -04001609 mheap_t *h;
1610 mheap_trace_main_t *tm;
1611 mheap_trace_t *t;
1612 uword trace_index, *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001613
1614 h = mheap_header (v);
1615 tm = &h->trace_main;
1616 p = hash_get (tm->trace_index_by_offset, offset);
Dave Barachc3799992016-08-15 11:12:27 -04001617 if (!p)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001618 return;
1619
1620 trace_index = p[0];
1621 hash_unset (tm->trace_index_by_offset, offset);
1622 ASSERT (trace_index < vec_len (tm->traces));
1623
1624 t = tm->traces + trace_index;
1625 ASSERT (t->n_allocations > 0);
1626 ASSERT (t->n_bytes >= size);
1627 t->n_allocations -= 1;
1628 t->n_bytes -= size;
1629 if (t->n_allocations == 0)
1630 {
1631 hash_unset_mem (tm->trace_by_callers, t->callers);
1632 vec_add1 (tm->trace_free_list, trace_index);
1633 memset (t, 0, sizeof (t[0]));
1634 }
1635}
1636
Dave Barachc3799992016-08-15 11:12:27 -04001637static int
1638mheap_trace_sort (const void *_t1, const void *_t2)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001639{
Dave Barachc3799992016-08-15 11:12:27 -04001640 const mheap_trace_t *t1 = _t1;
1641 const mheap_trace_t *t2 = _t2;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001642 word cmp;
1643
1644 cmp = (word) t2->n_bytes - (word) t1->n_bytes;
Dave Barachc3799992016-08-15 11:12:27 -04001645 if (!cmp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001646 cmp = (word) t2->n_allocations - (word) t1->n_allocations;
1647 return cmp;
1648}
1649
1650always_inline void
1651mheap_trace_main_free (mheap_trace_main_t * tm)
1652{
1653 vec_free (tm->traces);
1654 vec_free (tm->trace_free_list);
1655 hash_free (tm->trace_by_callers);
1656 hash_free (tm->trace_index_by_offset);
1657}
1658
Dave Barachc3799992016-08-15 11:12:27 -04001659void
1660mheap_trace (void *v, int enable)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001661{
Dave Barachc3799992016-08-15 11:12:27 -04001662 mheap_t *h;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001663
1664 h = mheap_header (v);
1665
1666 if (enable)
1667 {
1668 h->flags |= MHEAP_FLAG_TRACE;
1669 }
1670 else
1671 {
1672 mheap_trace_main_free (&h->trace_main);
1673 h->flags &= ~MHEAP_FLAG_TRACE;
1674 }
1675}
Dave Barachc3799992016-08-15 11:12:27 -04001676
1677/*
1678 * fd.io coding-style-patch-verification: ON
1679 *
1680 * Local Variables:
1681 * eval: (c-set-style "gnu")
1682 * End:
1683 */