blob: fceca95ff7dab4f42ab51bbe1fcd1b1fb6e4b90b [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#include <vppinfra/bitops.h>
39#include <vppinfra/hash.h>
40#include <vppinfra/format.h>
41#include <vppinfra/mheap.h>
42#include <vppinfra/os.h>
43#include <vppinfra/time.h>
44
45#ifdef CLIB_UNIX
46#include <vppinfra/elf_clib.h>
47#endif
48
Dave Barachc3799992016-08-15 11:12:27 -040049static void mheap_get_trace (void *v, uword offset, uword size);
50static void mheap_put_trace (void *v, uword offset, uword size);
51static int mheap_trace_sort (const void *t1, const void *t2);
Ed Warnickecb9cada2015-12-08 15:45:58 -070052
Dave Barachc3799992016-08-15 11:12:27 -040053always_inline void
54mheap_maybe_lock (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -070055{
Dave Barachc3799992016-08-15 11:12:27 -040056 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -070057 if (v && (h->flags & MHEAP_FLAG_THREAD_SAFE))
58 {
Damjan Marionf55f9b82017-05-10 21:06:28 +020059 u32 my_cpu = os_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -070060 if (h->owner_cpu == my_cpu)
Dave Barachc3799992016-08-15 11:12:27 -040061 {
62 h->recursion_count++;
63 return;
64 }
65
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 while (__sync_lock_test_and_set (&h->lock, 1))
Dave Barachc3799992016-08-15 11:12:27 -040067 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -070068
69 h->owner_cpu = my_cpu;
70 h->recursion_count = 1;
71 }
72}
73
Dave Barachc3799992016-08-15 11:12:27 -040074always_inline void
75mheap_maybe_unlock (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -070076{
Dave Barachc3799992016-08-15 11:12:27 -040077 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -070078 if (v && h->flags & MHEAP_FLAG_THREAD_SAFE)
79 {
Damjan Marionf55f9b82017-05-10 21:06:28 +020080 ASSERT (os_get_thread_index () == h->owner_cpu);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 if (--h->recursion_count == 0)
Dave Barachc3799992016-08-15 11:12:27 -040082 {
83 h->owner_cpu = ~0;
84 CLIB_MEMORY_BARRIER ();
85 h->lock = 0;
86 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 }
88}
89
90/* Find bin for objects with size at least n_user_data_bytes. */
91always_inline uword
92user_data_size_to_bin_index (uword n_user_data_bytes)
93{
94 uword n_user_data_words;
95 word small_bin, large_bin;
96
97 /* User size must be at least big enough to hold free elt. */
98 n_user_data_bytes = clib_max (n_user_data_bytes, MHEAP_MIN_USER_DATA_BYTES);
99
100 /* Round to words. */
Dave Barachc3799992016-08-15 11:12:27 -0400101 n_user_data_words =
102 (round_pow2 (n_user_data_bytes, MHEAP_USER_DATA_WORD_BYTES) /
103 MHEAP_USER_DATA_WORD_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700104
105 ASSERT (n_user_data_words > 0);
Dave Barachc3799992016-08-15 11:12:27 -0400106 small_bin =
107 n_user_data_words -
108 (MHEAP_MIN_USER_DATA_BYTES / MHEAP_USER_DATA_WORD_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109 ASSERT (small_bin >= 0);
110
Dave Barachc3799992016-08-15 11:12:27 -0400111 large_bin =
112 MHEAP_N_SMALL_OBJECT_BINS + max_log2 (n_user_data_bytes) -
113 MHEAP_LOG2_N_SMALL_OBJECT_BINS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114
115 return small_bin < MHEAP_N_SMALL_OBJECT_BINS ? small_bin : large_bin;
116}
117
118always_inline uword
119mheap_elt_size_to_user_n_bytes (uword n_bytes)
120{
121 ASSERT (n_bytes >= sizeof (mheap_elt_t));
122 return (n_bytes - STRUCT_OFFSET_OF (mheap_elt_t, user_data));
123}
124
Dave Barachc3799992016-08-15 11:12:27 -0400125always_inline uword __attribute__ ((unused))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126mheap_elt_size_to_user_n_words (uword n_bytes)
127{
128 ASSERT (n_bytes % MHEAP_USER_DATA_WORD_BYTES == 0);
Dave Barachc3799992016-08-15 11:12:27 -0400129 return mheap_elt_size_to_user_n_bytes (n_bytes) /
130 MHEAP_USER_DATA_WORD_BYTES;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700131}
132
133always_inline void
Dave Barachc3799992016-08-15 11:12:27 -0400134mheap_elt_set_size (void *v,
135 uword uoffset, uword n_user_data_bytes, uword is_free)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136{
Dave Barachc3799992016-08-15 11:12:27 -0400137 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700138
139 e = mheap_elt_at_uoffset (v, uoffset);
140
141 ASSERT (n_user_data_bytes % MHEAP_USER_DATA_WORD_BYTES == 0);
Dave Barachb3d93da2016-08-03 14:34:38 -0400142
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143 e->n_user_data = n_user_data_bytes / MHEAP_USER_DATA_WORD_BYTES;
144 e->is_free = is_free;
Dave Barachc3799992016-08-15 11:12:27 -0400145 ASSERT (e->prev_n_user_data * sizeof (e->user_data[0]) >=
146 MHEAP_MIN_USER_DATA_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147
148 n = mheap_next_elt (e);
149 n->prev_n_user_data = e->n_user_data;
150 n->prev_is_free = is_free;
151}
152
Dave Barachc3799992016-08-15 11:12:27 -0400153always_inline void
154set_first_free_elt_offset (mheap_t * h, uword bin, uword uoffset)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155{
156 uword i0, i1;
157
158 h->first_free_elt_uoffset_by_bin[bin] = uoffset;
159
160 i0 = bin / BITS (h->non_empty_free_elt_heads[0]);
161 i1 = (uword) 1 << (uword) (bin % BITS (h->non_empty_free_elt_heads[0]));
162
163 ASSERT (i0 < ARRAY_LEN (h->non_empty_free_elt_heads));
Dave Barachb3d93da2016-08-03 14:34:38 -0400164 if (h->first_free_elt_uoffset_by_bin[bin] == MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165 h->non_empty_free_elt_heads[i0] &= ~i1;
166 else
167 h->non_empty_free_elt_heads[i0] |= i1;
168}
169
170always_inline void
Dave Barachc3799992016-08-15 11:12:27 -0400171set_free_elt (void *v, uword uoffset, uword n_user_data_bytes)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700172{
Dave Barachc3799992016-08-15 11:12:27 -0400173 mheap_t *h = mheap_header (v);
174 mheap_elt_t *e = mheap_elt_at_uoffset (v, uoffset);
175 mheap_elt_t *n = mheap_next_elt (e);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700176 uword bin = user_data_size_to_bin_index (n_user_data_bytes);
177
178 ASSERT (n->prev_is_free);
179 ASSERT (e->is_free);
180
Dave Barachb3d93da2016-08-03 14:34:38 -0400181 e->free_elt.prev_uoffset = MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700182 e->free_elt.next_uoffset = h->first_free_elt_uoffset_by_bin[bin];
183
184 /* Fill in next free elt's previous pointer. */
Dave Barachb3d93da2016-08-03 14:34:38 -0400185 if (e->free_elt.next_uoffset != MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186 {
Dave Barachc3799992016-08-15 11:12:27 -0400187 mheap_elt_t *nf = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700188 ASSERT (nf->is_free);
189 nf->free_elt.prev_uoffset = uoffset;
190 }
191
192 set_first_free_elt_offset (h, bin, uoffset);
193}
194
195always_inline void
Dave Barachc3799992016-08-15 11:12:27 -0400196new_free_elt (void *v, uword uoffset, uword n_user_data_bytes)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700197{
198 mheap_elt_set_size (v, uoffset, n_user_data_bytes, /* is_free */ 1);
199 set_free_elt (v, uoffset, n_user_data_bytes);
200}
201
202always_inline void
Dave Barachc3799992016-08-15 11:12:27 -0400203remove_free_elt (void *v, mheap_elt_t * e, uword bin)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700204{
Dave Barachc3799992016-08-15 11:12:27 -0400205 mheap_t *h = mheap_header (v);
206 mheap_elt_t *p, *n;
Dave Barachb3d93da2016-08-03 14:34:38 -0400207#if CLIB_VEC64 > 0
208 u64 no, po;
209#else
Ed Warnickecb9cada2015-12-08 15:45:58 -0700210 u32 no, po;
Dave Barachb3d93da2016-08-03 14:34:38 -0400211#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212
213 no = e->free_elt.next_uoffset;
Dave Barachb3d93da2016-08-03 14:34:38 -0400214
215 n = no != MHEAP_GROUNDED ? mheap_elt_at_uoffset (v, no) : 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216 po = e->free_elt.prev_uoffset;
Dave Barachb3d93da2016-08-03 14:34:38 -0400217 p = po != MHEAP_GROUNDED ? mheap_elt_at_uoffset (v, po) : 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218
Dave Barachc3799992016-08-15 11:12:27 -0400219 if (!p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700220 set_first_free_elt_offset (h, bin, no);
221 else
222 p->free_elt.next_uoffset = no;
223
224 if (n)
225 n->free_elt.prev_uoffset = po;
226}
227
228always_inline void
Dave Barachc3799992016-08-15 11:12:27 -0400229remove_free_elt2 (void *v, mheap_elt_t * e)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700230{
231 uword bin;
232 bin = user_data_size_to_bin_index (mheap_elt_data_bytes (e));
233 remove_free_elt (v, e, bin);
234}
235
236#define MHEAP_VM_MAP (1 << 0)
237#define MHEAP_VM_UNMAP (1 << 1)
238#define MHEAP_VM_NOMAP (0 << 1)
239#define MHEAP_VM_ROUND (1 << 2)
240#define MHEAP_VM_ROUND_UP MHEAP_VM_ROUND
241#define MHEAP_VM_ROUND_DOWN (0 << 2)
242
243static uword mheap_page_size;
244
Dave Barachc3799992016-08-15 11:12:27 -0400245static_always_inline uword
246mheap_page_round (uword addr)
247{
248 return (addr + mheap_page_size - 1) & ~(mheap_page_size - 1);
249}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250
251static_always_inline uword
Dave Barachc3799992016-08-15 11:12:27 -0400252mheap_page_truncate (uword addr)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253{
Dave Barachc3799992016-08-15 11:12:27 -0400254 return addr & ~(mheap_page_size - 1);
255}
256
257static_always_inline uword
258mheap_vm (void *v, uword flags, clib_address_t start_addr, uword size)
259{
260 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261 clib_address_t start_page, end_page, end_addr;
262 uword mapped_bytes;
263
Dave Barachc3799992016-08-15 11:12:27 -0400264 ASSERT (!(h->flags & MHEAP_FLAG_DISABLE_VM));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265
266 end_addr = start_addr + size;
267
268 /* Round start/end address up to page boundary. */
269 start_page = mheap_page_round (start_addr);
270
271 if ((flags & MHEAP_VM_ROUND) == MHEAP_VM_ROUND_UP)
272 end_page = mheap_page_round (end_addr);
273 else
274 end_page = mheap_page_truncate (end_addr);
275
276 mapped_bytes = 0;
277 if (end_page > start_page)
278 {
279 mapped_bytes = end_page - start_page;
280 if (flags & MHEAP_VM_MAP)
281 clib_mem_vm_map ((void *) start_page, end_page - start_page);
282 else if (flags & MHEAP_VM_UNMAP)
283 clib_mem_vm_unmap ((void *) start_page, end_page - start_page);
284 }
285
286 return mapped_bytes;
287}
288
289static_always_inline uword
Dave Barachc3799992016-08-15 11:12:27 -0400290mheap_vm_elt (void *v, uword flags, uword offset)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700291{
Dave Barachc3799992016-08-15 11:12:27 -0400292 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293 clib_address_t start_addr, end_addr;
294
295 e = mheap_elt_at_uoffset (v, offset);
296 start_addr = (clib_address_t) ((void *) e->user_data);
297 end_addr = (clib_address_t) mheap_next_elt (e);
298 return mheap_vm (v, flags, start_addr, end_addr - start_addr);
299}
300
301always_inline uword
302mheap_small_object_cache_mask (mheap_small_object_cache_t * c, uword bin)
303{
304 uword mask;
305
306/* $$$$ ELIOT FIXME: add Altivec version of this routine */
Damjan Marion7bee80c2017-04-26 15:32:12 +0200307#if !defined (CLIB_HAVE_VEC128) || defined (__ALTIVEC__) || defined (__i386__)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308 mask = 0;
309#else
310 u8x16 b = u8x16_splat (bin);
311
312 ASSERT (bin < 256);
313
Damjan Mariona52e1662018-05-19 00:04:23 +0200314#define _(i) ((uword) u8x16_compare_byte_mask ((b == c->bins.as_u8x16[i])) << (uword) ((i)*16))
Dave Barachc3799992016-08-15 11:12:27 -0400315 mask = _(0) | _(1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700316 if (BITS (uword) > 32)
Dave Barachc3799992016-08-15 11:12:27 -0400317 mask |= _(2) | _(3);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700318#undef _
319
320#endif
321 return mask;
322}
323
324always_inline uword
325mheap_get_small_object (mheap_t * h, uword bin)
326{
Dave Barachc3799992016-08-15 11:12:27 -0400327 mheap_small_object_cache_t *c = &h->small_object_cache;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700328 uword mask = mheap_small_object_cache_mask (c, bin + 1);
Dave Barachb3d93da2016-08-03 14:34:38 -0400329 uword offset = MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700330
331 if (mask)
332 {
333 uword i = min_log2 (mask);
334 uword o = c->offsets[i];
Dave Barachb3d93da2016-08-03 14:34:38 -0400335 ASSERT (o != MHEAP_GROUNDED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700336 c->bins.as_u8[i] = 0;
337 offset = o;
338 }
339
340 return offset;
341}
342
343always_inline uword
344mheap_put_small_object (mheap_t * h, uword bin, uword offset)
345{
Dave Barachc3799992016-08-15 11:12:27 -0400346 mheap_small_object_cache_t *c = &h->small_object_cache;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700347 uword free_mask = mheap_small_object_cache_mask (c, 0);
348 uword b = bin + 1;
349 uword i;
350
351 if (free_mask != 0)
352 {
353 i = min_log2 (free_mask);
354 c->bins.as_u8[i] = b;
355 c->offsets[i] = offset;
356 return 0;
357 }
358 else
359 /* Nothing free with right size: cyclic replacement. */
360 {
361 uword old_offset;
362
363 i = c->replacement_index++;
364 i %= BITS (uword);
365 c->bins.as_u8[i] = b;
366 old_offset = c->offsets[i];
367 c->offsets[i] = offset;
368
369 /* Return old offset so it can be freed. */
370 return old_offset;
371 }
372}
373
374static uword
Dave Barachc3799992016-08-15 11:12:27 -0400375mheap_get_search_free_bin (void *v,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700376 uword bin,
377 uword * n_user_data_bytes_arg,
Dave Barachc3799992016-08-15 11:12:27 -0400378 uword align, uword align_offset)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379{
Dave Barachc3799992016-08-15 11:12:27 -0400380 mheap_t *h = mheap_header (v);
381 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700382
383 /* Free object is at offset f0 ... f1;
384 Allocatted object is at offset o0 ... o1. */
385 word o0, o1, f0, f1, search_n_user_data_bytes;
386 word lo_free_usize, hi_free_usize;
387
Dave Barachb3d93da2016-08-03 14:34:38 -0400388 ASSERT (h->first_free_elt_uoffset_by_bin[bin] != MHEAP_GROUNDED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700389 e = mheap_elt_at_uoffset (v, h->first_free_elt_uoffset_by_bin[bin]);
390
391 search_n_user_data_bytes = *n_user_data_bytes_arg;
392
393 /* Silence compiler warning. */
394 o0 = o1 = f0 = f1 = 0;
395
396 h->stats.free_list.n_search_attempts += 1;
397
398 /* Find an object that is large enough with correct alignment at given alignment offset. */
399 while (1)
400 {
401 uword this_object_n_user_data_bytes = mheap_elt_data_bytes (e);
402
403 ASSERT (e->is_free);
404 if (bin < MHEAP_N_SMALL_OBJECT_BINS)
405 ASSERT (this_object_n_user_data_bytes >= search_n_user_data_bytes);
406
407 h->stats.free_list.n_objects_searched += 1;
408
409 if (this_object_n_user_data_bytes < search_n_user_data_bytes)
410 goto next;
411
412 /* Bounds of free object: from f0 to f1. */
413 f0 = ((void *) e->user_data - v);
414 f1 = f0 + this_object_n_user_data_bytes;
415
416 /* Place candidate object at end of free block and align as requested. */
Dave Barachc3799992016-08-15 11:12:27 -0400417 o0 = ((f1 - search_n_user_data_bytes) & ~(align - 1)) - align_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700418 while (o0 < f0)
419 o0 += align;
420
421 /* Make sure that first free fragment is either empty or
Dave Barachc3799992016-08-15 11:12:27 -0400422 large enough to be valid. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700423 while (1)
424 {
425 lo_free_usize = o0 != f0 ? o0 - f0 - MHEAP_ELT_OVERHEAD_BYTES : 0;
426 if (o0 <= f0 || lo_free_usize >= (word) MHEAP_MIN_USER_DATA_BYTES)
427 break;
428 o0 -= align;
429 }
430
431 o1 = o0 + search_n_user_data_bytes;
432
433 /* Does it fit? */
434 if (o0 >= f0 && o1 <= f1)
435 goto found;
436
437 next:
438 /* Reached end of free list without finding large enough object. */
Dave Barachb3d93da2016-08-03 14:34:38 -0400439 if (e->free_elt.next_uoffset == MHEAP_GROUNDED)
440 return MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700441
442 /* Otherwise keep searching for large enough object. */
443 e = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
444 }
445
Dave Barachc3799992016-08-15 11:12:27 -0400446found:
Ed Warnickecb9cada2015-12-08 15:45:58 -0700447 /* Free fragment at end. */
448 hi_free_usize = f1 != o1 ? f1 - o1 - MHEAP_ELT_OVERHEAD_BYTES : 0;
449
450 /* If fragment at end is too small to be a new object,
451 give user's object a bit more space than requested. */
452 if (hi_free_usize < (word) MHEAP_MIN_USER_DATA_BYTES)
453 {
454 search_n_user_data_bytes += f1 - o1;
455 o1 = f1;
456 hi_free_usize = 0;
457 }
458
459 /* Need to make sure that relevant memory areas are mapped. */
Dave Barachc3799992016-08-15 11:12:27 -0400460 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700461 {
Dave Barachc3799992016-08-15 11:12:27 -0400462 mheap_elt_t *f0_elt = mheap_elt_at_uoffset (v, f0);
463 mheap_elt_t *f1_elt = mheap_elt_at_uoffset (v, f1);
464 mheap_elt_t *o0_elt = mheap_elt_at_uoffset (v, o0);
465 mheap_elt_t *o1_elt = mheap_elt_at_uoffset (v, o1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700466
467 uword f0_page_start, f0_page_end;
468 uword o0_page_start, o0_page_end;
469
470 /* Free elt is mapped. Addresses after that may not be mapped. */
471 f0_page_start = mheap_page_round (pointer_to_uword (f0_elt->user_data));
Dave Barachc3799992016-08-15 11:12:27 -0400472 f0_page_end = mheap_page_truncate (pointer_to_uword (f1_elt));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700473
474 o0_page_start = mheap_page_truncate (pointer_to_uword (o0_elt));
475 o0_page_end = mheap_page_round (pointer_to_uword (o1_elt->user_data));
476
477 if (o0_page_start < f0_page_start)
478 o0_page_start = f0_page_start;
479 if (o0_page_end > f0_page_end)
480 o0_page_end = f0_page_end;
481
482 if (o0_page_end > o0_page_start)
483 clib_mem_vm_map (uword_to_pointer (o0_page_start, void *),
484 o0_page_end - o0_page_start);
485 }
486
487 /* Remove free object from free list. */
488 remove_free_elt (v, e, bin);
489
490 /* Free fragment at begining. */
491 if (lo_free_usize > 0)
492 {
493 ASSERT (lo_free_usize >= (word) MHEAP_MIN_USER_DATA_BYTES);
494 mheap_elt_set_size (v, f0, lo_free_usize, /* is_free */ 1);
495 new_free_elt (v, f0, lo_free_usize);
496 }
497
498 mheap_elt_set_size (v, o0, search_n_user_data_bytes, /* is_free */ 0);
499
500 if (hi_free_usize > 0)
501 {
502 uword uo = o1 + MHEAP_ELT_OVERHEAD_BYTES;
503 mheap_elt_set_size (v, uo, hi_free_usize, /* is_free */ 1);
504 new_free_elt (v, uo, hi_free_usize);
505 }
506
507 /* Return actual size of block. */
508 *n_user_data_bytes_arg = search_n_user_data_bytes;
509
510 h->stats.free_list.n_objects_found += 1;
511
512 return o0;
513}
514
515/* Search free lists for object with given size and alignment. */
516static uword
Dave Barachc3799992016-08-15 11:12:27 -0400517mheap_get_search_free_list (void *v,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700518 uword * n_user_bytes_arg,
Dave Barachc3799992016-08-15 11:12:27 -0400519 uword align, uword align_offset)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700520{
Dave Barachc3799992016-08-15 11:12:27 -0400521 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700522 uword bin, n_user_bytes, i, bi;
523
524 n_user_bytes = *n_user_bytes_arg;
525 bin = user_data_size_to_bin_index (n_user_bytes);
526
527 if (MHEAP_HAVE_SMALL_OBJECT_CACHE
528 && (h->flags & MHEAP_FLAG_SMALL_OBJECT_CACHE)
529 && bin < 255
530 && align == STRUCT_SIZE_OF (mheap_elt_t, user_data[0])
531 && align_offset == 0)
532 {
533 uword r = mheap_get_small_object (h, bin);
534 h->stats.n_small_object_cache_attempts += 1;
Dave Barachb3d93da2016-08-03 14:34:38 -0400535 if (r != MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700536 {
537 h->stats.n_small_object_cache_hits += 1;
538 return r;
539 }
540 }
541
Dave Barachc3799992016-08-15 11:12:27 -0400542 for (i = bin / BITS (uword); i < ARRAY_LEN (h->non_empty_free_elt_heads);
543 i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700544 {
545 uword non_empty_bin_mask = h->non_empty_free_elt_heads[i];
546
547 /* No need to search smaller bins. */
548 if (i == bin / BITS (uword))
549 non_empty_bin_mask &= ~pow2_mask (bin % BITS (uword));
550
551 /* Search each occupied free bin which is large enough. */
Dave Barachba7ddfe2017-05-17 20:20:50 -0400552 /* *INDENT-OFF* */
553 foreach_set_bit (bi, non_empty_bin_mask,
554 ({
555 uword r =
556 mheap_get_search_free_bin (v, bi + i * BITS (uword),
557 n_user_bytes_arg,
558 align,
559 align_offset);
560 if (r != MHEAP_GROUNDED) return r;
561 }));
562 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700563 }
564
Dave Barachb3d93da2016-08-03 14:34:38 -0400565 return MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700566}
567
568static never_inline void *
Dave Barachc3799992016-08-15 11:12:27 -0400569mheap_get_extend_vector (void *v,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700570 uword n_user_data_bytes,
571 uword align,
Dave Barachc3799992016-08-15 11:12:27 -0400572 uword align_offset, uword * offset_return)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700573{
574 /* Bounds of free and allocated objects (as above). */
575 uword f0, f1, o0, o1;
576 word free_size;
Dave Barachc3799992016-08-15 11:12:27 -0400577 mheap_t *h = mheap_header (v);
578 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700579
580 if (_vec_len (v) == 0)
581 {
582 _vec_len (v) = MHEAP_ELT_OVERHEAD_BYTES;
583
584 /* Create first element of heap. */
585 e = mheap_elt_at_uoffset (v, _vec_len (v));
586 e->prev_n_user_data = MHEAP_N_USER_DATA_INVALID;
587 }
588
589 f0 = _vec_len (v);
590
591 o0 = round_pow2 (f0, align) - align_offset;
592 while (1)
593 {
594 free_size = o0 - f0 - MHEAP_ELT_OVERHEAD_BYTES;
595 if (o0 == f0 || free_size >= (word) sizeof (mheap_elt_t))
596 break;
597
598 o0 += align;
599 }
600
601 o1 = o0 + n_user_data_bytes;
602 f1 = o1 + MHEAP_ELT_OVERHEAD_BYTES;
Dave Barachc3799992016-08-15 11:12:27 -0400603
Ed Warnickecb9cada2015-12-08 15:45:58 -0700604 ASSERT (v != 0);
605 h = mheap_header (v);
606
607 /* Make sure we have space for object plus overhead. */
608 if (f1 > h->max_size)
609 {
Dave Barachb3d93da2016-08-03 14:34:38 -0400610 *offset_return = MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700611 return v;
612 }
613
614 _vec_len (v) = f1;
615
Dave Barachc3799992016-08-15 11:12:27 -0400616 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700617 {
Dave Barachc3799992016-08-15 11:12:27 -0400618 mheap_elt_t *f0_elt = mheap_elt_at_uoffset (v, f0);
619 mheap_elt_t *f1_elt = mheap_elt_at_uoffset (v, f1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700620
621 uword f0_page = mheap_page_round (pointer_to_uword (f0_elt->user_data));
622 uword f1_page = mheap_page_round (pointer_to_uword (f1_elt->user_data));
623
624 if (f1_page > f0_page)
625 mheap_vm (v, MHEAP_VM_MAP, f0_page, f1_page - f0_page);
626 }
627
628 if (free_size > 0)
629 new_free_elt (v, f0, free_size);
630
631 mheap_elt_set_size (v, o0, n_user_data_bytes, /* is_free */ 0);
632
633 /* Mark last element. */
634 e = mheap_elt_at_uoffset (v, f1);
635 e->n_user_data = MHEAP_N_USER_DATA_INVALID;
636
637 *offset_return = o0;
638
639 return v;
640}
641
Dave Barachc3799992016-08-15 11:12:27 -0400642void *
643mheap_get_aligned (void *v,
644 uword n_user_data_bytes,
645 uword align, uword align_offset, uword * offset_return)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700646{
Dave Barachc3799992016-08-15 11:12:27 -0400647 mheap_t *h;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700648 uword offset;
649 u64 cpu_times[2];
650
651 cpu_times[0] = clib_cpu_time_now ();
652
653 align = clib_max (align, STRUCT_SIZE_OF (mheap_elt_t, user_data[0]));
654 align = max_pow2 (align);
655
656 /* Correct align offset to be smaller than alignment. */
657 align_offset &= (align - 1);
658
659 /* Align offset must be multiple of minimum object size. */
660 if (align_offset % STRUCT_SIZE_OF (mheap_elt_t, user_data[0]) != 0)
661 {
Dave Barachb3d93da2016-08-03 14:34:38 -0400662 *offset_return = MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700663 return v;
664 }
665
666 /* Round requested size. */
667 n_user_data_bytes = clib_max (n_user_data_bytes, MHEAP_MIN_USER_DATA_BYTES);
Dave Barachc3799992016-08-15 11:12:27 -0400668 n_user_data_bytes =
669 round_pow2 (n_user_data_bytes,
670 STRUCT_SIZE_OF (mheap_elt_t, user_data[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700671
Dave Barachc3799992016-08-15 11:12:27 -0400672 if (!v)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700673 v = mheap_alloc (0, 64 << 20);
674
675 mheap_maybe_lock (v);
676
677 h = mheap_header (v);
678
679 if (h->flags & MHEAP_FLAG_VALIDATE)
680 mheap_validate (v);
681
682 /* First search free lists for object. */
Dave Barachc3799992016-08-15 11:12:27 -0400683 offset =
684 mheap_get_search_free_list (v, &n_user_data_bytes, align, align_offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700685
686 h = mheap_header (v);
687
688 /* If that fails allocate object at end of heap by extending vector. */
Dave Barachb3d93da2016-08-03 14:34:38 -0400689 if (offset == MHEAP_GROUNDED && _vec_len (v) < h->max_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700690 {
Dave Barachc3799992016-08-15 11:12:27 -0400691 v =
692 mheap_get_extend_vector (v, n_user_data_bytes, align, align_offset,
693 &offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700694 h = mheap_header (v);
Dave Barachb3d93da2016-08-03 14:34:38 -0400695 h->stats.n_vector_expands += offset != MHEAP_GROUNDED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700696 }
697
698 *offset_return = offset;
Dave Barachb3d93da2016-08-03 14:34:38 -0400699 if (offset != MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700700 {
701 h->n_elts += 1;
702
703 if (h->flags & MHEAP_FLAG_TRACE)
704 {
705 /* Recursion block for case when we are traceing main clib heap. */
706 h->flags &= ~MHEAP_FLAG_TRACE;
707
708 mheap_get_trace (v, offset, n_user_data_bytes);
709
710 h->flags |= MHEAP_FLAG_TRACE;
711 }
712 }
713
714 if (h->flags & MHEAP_FLAG_VALIDATE)
715 mheap_validate (v);
716
717 mheap_maybe_unlock (v);
718
719 cpu_times[1] = clib_cpu_time_now ();
720 h->stats.n_clocks_get += cpu_times[1] - cpu_times[0];
721 h->stats.n_gets += 1;
722
723 return v;
724}
725
Dave Barachc3799992016-08-15 11:12:27 -0400726static void
727free_last_elt (void *v, mheap_elt_t * e)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700728{
Dave Barachc3799992016-08-15 11:12:27 -0400729 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700730
731 /* Possibly delete preceeding free element also. */
732 if (e->prev_is_free)
733 {
734 e = mheap_prev_elt (e);
735 remove_free_elt2 (v, e);
736 }
737
738 if (e->prev_n_user_data == MHEAP_N_USER_DATA_INVALID)
739 {
Dave Barachc3799992016-08-15 11:12:27 -0400740 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700741 mheap_vm_elt (v, MHEAP_VM_UNMAP, mheap_elt_uoffset (v, e));
742 _vec_len (v) = 0;
743 }
744 else
745 {
746 uword uo = mheap_elt_uoffset (v, e);
Dave Barachc3799992016-08-15 11:12:27 -0400747 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700748 mheap_vm_elt (v, MHEAP_VM_UNMAP, uo);
749 e->n_user_data = MHEAP_N_USER_DATA_INVALID;
750 _vec_len (v) = uo;
751 }
752}
753
Dave Barachc3799992016-08-15 11:12:27 -0400754void
755mheap_put (void *v, uword uoffset)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700756{
Dave Barachc3799992016-08-15 11:12:27 -0400757 mheap_t *h;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700758 uword n_user_data_bytes, bin;
Dave Barachc3799992016-08-15 11:12:27 -0400759 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700760 uword trace_uoffset, trace_n_user_data_bytes;
761 u64 cpu_times[2];
762
763 cpu_times[0] = clib_cpu_time_now ();
764
765 h = mheap_header (v);
766
767 mheap_maybe_lock (v);
768
769 if (h->flags & MHEAP_FLAG_VALIDATE)
770 mheap_validate (v);
771
772 ASSERT (h->n_elts > 0);
773 h->n_elts--;
774 h->stats.n_puts += 1;
775
776 e = mheap_elt_at_uoffset (v, uoffset);
777 n = mheap_next_elt (e);
778 n_user_data_bytes = mheap_elt_data_bytes (e);
779
780 trace_uoffset = uoffset;
781 trace_n_user_data_bytes = n_user_data_bytes;
782
783 bin = user_data_size_to_bin_index (n_user_data_bytes);
784 if (MHEAP_HAVE_SMALL_OBJECT_CACHE
Dave Barachc3799992016-08-15 11:12:27 -0400785 && bin < 255 && (h->flags & MHEAP_FLAG_SMALL_OBJECT_CACHE))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700786 {
787 uoffset = mheap_put_small_object (h, bin, uoffset);
Dave Barachc3799992016-08-15 11:12:27 -0400788 if (uoffset == 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700789 goto done;
790
791 e = mheap_elt_at_uoffset (v, uoffset);
792 n = mheap_next_elt (e);
793 n_user_data_bytes = mheap_elt_data_bytes (e);
794 }
795
796 /* Assert that forward and back pointers are equal. */
797 if (e->n_user_data != n->prev_n_user_data)
798 os_panic ();
799
800 /* Forward and backwards is_free must agree. */
801 if (e->is_free != n->prev_is_free)
802 os_panic ();
803
804 /* Object was already freed. */
805 if (e->is_free)
806 os_panic ();
807
808 /* Special case: delete last element in heap. */
809 if (n->n_user_data == MHEAP_N_USER_DATA_INVALID)
810 free_last_elt (v, e);
811
812 else
813 {
814 uword f0, f1, n_combine;
815
816 f0 = uoffset;
817 f1 = f0 + n_user_data_bytes;
818 n_combine = 0;
819
820 if (e->prev_is_free)
821 {
Dave Barachc3799992016-08-15 11:12:27 -0400822 mheap_elt_t *p = mheap_prev_elt (e);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700823 f0 = mheap_elt_uoffset (v, p);
824 remove_free_elt2 (v, p);
825 n_combine++;
826 }
827
828 if (n->is_free)
829 {
Dave Barachc3799992016-08-15 11:12:27 -0400830 mheap_elt_t *m = mheap_next_elt (n);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700831 f1 = (void *) m - v;
832 remove_free_elt2 (v, n);
833 n_combine++;
834 }
835
836 if (n_combine)
837 mheap_elt_set_size (v, f0, f1 - f0, /* is_free */ 1);
838 else
839 e->is_free = n->prev_is_free = 1;
840 set_free_elt (v, f0, f1 - f0);
841
Dave Barachc3799992016-08-15 11:12:27 -0400842 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700843 mheap_vm_elt (v, MHEAP_VM_UNMAP, f0);
844 }
845
Dave Barachc3799992016-08-15 11:12:27 -0400846done:
Ed Warnickecb9cada2015-12-08 15:45:58 -0700847 h = mheap_header (v);
848
849 if (h->flags & MHEAP_FLAG_TRACE)
850 {
851 /* Recursion block for case when we are traceing main clib heap. */
852 h->flags &= ~MHEAP_FLAG_TRACE;
853
854 mheap_put_trace (v, trace_uoffset, trace_n_user_data_bytes);
855
856 h->flags |= MHEAP_FLAG_TRACE;
857 }
858
859 if (h->flags & MHEAP_FLAG_VALIDATE)
860 mheap_validate (v);
861
862 mheap_maybe_unlock (v);
863
864 cpu_times[1] = clib_cpu_time_now ();
865 h->stats.n_clocks_put += cpu_times[1] - cpu_times[0];
866}
867
Dave Barachc3799992016-08-15 11:12:27 -0400868void *
869mheap_alloc_with_flags (void *memory, uword memory_size, uword flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700870{
Dave Barachc3799992016-08-15 11:12:27 -0400871 mheap_t *h;
872 void *v;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700873 uword size;
874
Dave Barachc3799992016-08-15 11:12:27 -0400875 if (!mheap_page_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700876 mheap_page_size = clib_mem_get_page_size ();
877
Dave Barachc3799992016-08-15 11:12:27 -0400878 if (!memory)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700879 {
880 /* No memory given, try to VM allocate some. */
881 memory = clib_mem_vm_alloc (memory_size);
Dave Barachc3799992016-08-15 11:12:27 -0400882 if (!memory)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700883 return 0;
884
885 /* No memory region implies we have virtual memory. */
886 flags &= ~MHEAP_FLAG_DISABLE_VM;
887 }
888
889 /* Make sure that given memory is page aligned. */
890 {
891 uword am, av, ah;
892
893 am = pointer_to_uword (memory);
894 av = mheap_page_round (am);
895 v = uword_to_pointer (av, void *);
896 h = mheap_header (v);
897 ah = pointer_to_uword (h);
898 while (ah < am)
899 ah += mheap_page_size;
900
901 h = uword_to_pointer (ah, void *);
902 v = mheap_vector (h);
903
Dave Barachc3799992016-08-15 11:12:27 -0400904 if (PREDICT_FALSE (memory + memory_size < v))
905 {
Pierre Pfister889178c2016-06-17 13:30:02 +0100906 /*
907 * This will happen when the requested memory_size is too
908 * small to cope with the heap header and/or memory alignment.
909 */
Dave Barachc3799992016-08-15 11:12:27 -0400910 clib_mem_vm_free (memory, memory_size);
Pierre Pfister889178c2016-06-17 13:30:02 +0100911 return 0;
Dave Barachc3799992016-08-15 11:12:27 -0400912 }
Pierre Pfister889178c2016-06-17 13:30:02 +0100913
Ed Warnickecb9cada2015-12-08 15:45:58 -0700914 size = memory + memory_size - v;
915 }
916
917 /* VM map header so we can use memory. */
Dave Barachc3799992016-08-15 11:12:27 -0400918 if (!(flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700919 clib_mem_vm_map (h, sizeof (h[0]));
920
921 /* Zero vector header: both heap header and vector length. */
922 memset (h, 0, sizeof (h[0]));
923 _vec_len (v) = 0;
924
925 h->vm_alloc_offset_from_header = (void *) h - memory;
926 h->vm_alloc_size = memory_size;
927
928 h->max_size = size;
929 h->owner_cpu = ~0;
930
931 /* Set flags based on those given less builtin-flags. */
Dave Barachc3799992016-08-15 11:12:27 -0400932 h->flags |= (flags & ~MHEAP_FLAG_TRACE);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700933
934 /* Unmap remainder of heap until we will be ready to use it. */
Dave Barachc3799992016-08-15 11:12:27 -0400935 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700936 mheap_vm (v, MHEAP_VM_UNMAP | MHEAP_VM_ROUND_UP,
937 (clib_address_t) v, h->max_size);
938
939 /* Initialize free list heads to empty. */
Dave Barachc3799992016-08-15 11:12:27 -0400940 memset (h->first_free_elt_uoffset_by_bin, 0xFF,
941 sizeof (h->first_free_elt_uoffset_by_bin));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700942
943 return v;
944}
945
Dave Barachc3799992016-08-15 11:12:27 -0400946void *
947mheap_alloc (void *memory, uword size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700948{
949 uword flags = 0;
950
951 if (memory != 0)
952 flags |= MHEAP_FLAG_DISABLE_VM;
953
954#ifdef CLIB_HAVE_VEC128
955 flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE;
956#endif
957
958 return mheap_alloc_with_flags (memory, size, flags);
959}
960
Dave Barachc3799992016-08-15 11:12:27 -0400961void *
962_mheap_free (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700963{
Dave Barachc3799992016-08-15 11:12:27 -0400964 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700965
966 if (v)
Dave Barachc3799992016-08-15 11:12:27 -0400967 clib_mem_vm_free ((void *) h - h->vm_alloc_offset_from_header,
968 h->vm_alloc_size);
969
Ed Warnickecb9cada2015-12-08 15:45:58 -0700970 return 0;
971}
972
973/* Call user's function with each object in heap. */
Dave Barachc3799992016-08-15 11:12:27 -0400974void
975mheap_foreach (void *v,
976 uword (*func) (void *arg, void *v, void *elt_data,
977 uword elt_size), void *arg)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700978{
Dave Barachc3799992016-08-15 11:12:27 -0400979 mheap_elt_t *e;
980 u8 *stack_heap, *clib_mem_mheap_save;
981 u8 tmp_heap_memory[16 * 1024];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700982
983 mheap_maybe_lock (v);
984
985 if (vec_len (v) == 0)
986 goto done;
987
988 clib_mem_mheap_save = 0;
989 stack_heap = 0;
990
991 /* Allocate a new temporary heap on the stack.
992 This is so that our hash table & user's callback function can
993 themselves allocate memory somewhere without getting in the way
994 of the heap we are looking at. */
995 if (v == clib_mem_get_heap ())
996 {
997 stack_heap = mheap_alloc (tmp_heap_memory, sizeof (tmp_heap_memory));
998 clib_mem_mheap_save = v;
999 clib_mem_set_heap (stack_heap);
1000 }
1001
1002 for (e = v;
Dave Barachc3799992016-08-15 11:12:27 -04001003 e->n_user_data != MHEAP_N_USER_DATA_INVALID; e = mheap_next_elt (e))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001004 {
Dave Barachc3799992016-08-15 11:12:27 -04001005 void *p = mheap_elt_data (v, e);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001006 if (e->is_free)
1007 continue;
Dave Barachc3799992016-08-15 11:12:27 -04001008 if ((*func) (arg, v, p, mheap_elt_data_bytes (e)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001009 break;
1010 }
1011
1012 /* Restore main CLIB heap. */
1013 if (clib_mem_mheap_save)
1014 clib_mem_set_heap (clib_mem_mheap_save);
1015
Dave Barachc3799992016-08-15 11:12:27 -04001016done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001017 mheap_maybe_unlock (v);
1018}
1019
1020/* Bytes in mheap header overhead not including data bytes. */
1021always_inline uword
Dave Barachc3799992016-08-15 11:12:27 -04001022mheap_bytes_overhead (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001023{
Dave Barachc3799992016-08-15 11:12:27 -04001024 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001025 return v ? sizeof (h[0]) + h->n_elts * sizeof (mheap_elt_t) : 0;
1026}
1027
1028/* Total number of bytes including both data and overhead. */
Dave Barachc3799992016-08-15 11:12:27 -04001029uword
1030mheap_bytes (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001031{
Dave Barachc3799992016-08-15 11:12:27 -04001032 return mheap_bytes_overhead (v) + vec_bytes (v);
1033}
1034
1035static void
1036mheap_usage_no_lock (void *v, clib_mem_usage_t * usage)
1037{
1038 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001039 uword used = 0, free = 0, free_vm_unmapped = 0;
1040
1041 if (vec_len (v) > 0)
1042 {
Dave Barachc3799992016-08-15 11:12:27 -04001043 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001044
1045 for (e = v;
1046 e->n_user_data != MHEAP_N_USER_DATA_INVALID;
1047 e = mheap_next_elt (e))
1048 {
1049 uword size = mheap_elt_data_bytes (e);
1050 if (e->is_free)
1051 {
1052 free += size;
Dave Barachc3799992016-08-15 11:12:27 -04001053 if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001054 free_vm_unmapped +=
1055 mheap_vm_elt (v, MHEAP_VM_NOMAP, mheap_elt_uoffset (v, e));
1056 }
1057 else
1058 used += size;
1059 }
1060 }
1061
1062 usage->object_count = mheap_elts (v);
1063 usage->bytes_total = mheap_bytes (v);
1064 usage->bytes_overhead = mheap_bytes_overhead (v);
1065 usage->bytes_max = mheap_max_size (v);
1066 usage->bytes_used = used;
1067 usage->bytes_free = free;
1068 usage->bytes_free_reclaimed = free_vm_unmapped;
1069}
1070
Dave Barachc3799992016-08-15 11:12:27 -04001071void
1072mheap_usage (void *v, clib_mem_usage_t * usage)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001073{
1074 mheap_maybe_lock (v);
1075 mheap_usage_no_lock (v, usage);
1076 mheap_maybe_unlock (v);
1077}
1078
Dave Barachc3799992016-08-15 11:12:27 -04001079static u8 *
1080format_mheap_byte_count (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001081{
1082 uword n_bytes = va_arg (*va, uword);
1083 if (n_bytes < 1024)
1084 return format (s, "%wd", n_bytes);
1085 else
1086 return format (s, "%wdk", n_bytes / 1024);
1087}
1088
1089/* Returns first corrupt heap element. */
Dave Barachc3799992016-08-15 11:12:27 -04001090static mheap_elt_t *
1091mheap_first_corrupt (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001092{
Dave Barachc3799992016-08-15 11:12:27 -04001093 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001094
1095 if (vec_len (v) == 0)
1096 return 0;
1097
1098 e = v;
1099 while (1)
1100 {
1101 if (e->n_user_data == MHEAP_N_USER_DATA_INVALID)
1102 break;
1103
1104 n = mheap_next_elt (e);
1105
1106 if (e->n_user_data != n->prev_n_user_data)
1107 return e;
1108
1109 if (e->is_free != n->prev_is_free)
1110 return e;
1111
1112 e = n;
1113 }
1114
1115 return 0;
1116}
1117
Dave Barachc3799992016-08-15 11:12:27 -04001118static u8 *
1119format_mheap_stats (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001120{
Dave Barachc3799992016-08-15 11:12:27 -04001121 mheap_t *h = va_arg (*va, mheap_t *);
1122 mheap_stats_t *st = &h->stats;
Christophe Fontained3c008d2017-10-02 18:10:54 +02001123 u32 indent = format_get_indent (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001124
Dave Barachc3799992016-08-15 11:12:27 -04001125 s =
1126 format (s,
1127 "alloc. from small object cache: %Ld hits %Ld attempts (%.2f%%) replacements %d",
1128 st->n_small_object_cache_hits, st->n_small_object_cache_attempts,
1129 (st->n_small_object_cache_attempts !=
1130 0 ? 100. * (f64) st->n_small_object_cache_hits /
1131 (f64) st->n_small_object_cache_attempts : 0.),
1132 h->small_object_cache.replacement_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001133
Dave Barachc3799992016-08-15 11:12:27 -04001134 s =
1135 format (s,
1136 "\n%Ualloc. from free-list: %Ld attempts, %Ld hits (%.2f%%), %Ld considered (per-attempt %.2f)",
1137 format_white_space, indent, st->free_list.n_search_attempts,
1138 st->free_list.n_objects_found,
1139 (st->free_list.n_search_attempts !=
1140 0 ? 100. * (f64) st->free_list.n_objects_found /
1141 (f64) st->free_list.n_search_attempts : 0.),
1142 st->free_list.n_objects_searched,
1143 (st->free_list.n_search_attempts !=
1144 0 ? (f64) st->free_list.n_objects_searched /
1145 (f64) st->free_list.n_search_attempts : 0.));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001146
1147 s = format (s, "\n%Ualloc. from vector-expand: %Ld",
Dave Barachc3799992016-08-15 11:12:27 -04001148 format_white_space, indent, st->n_vector_expands);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001149
1150 s = format (s, "\n%Uallocs: %Ld %.2f clocks/call",
1151 format_white_space, indent,
Dave Barachc3799992016-08-15 11:12:27 -04001152 st->n_gets, (f64) st->n_clocks_get / (f64) st->n_gets);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001153
1154 s = format (s, "\n%Ufrees: %Ld %.2f clocks/call",
1155 format_white_space, indent,
Dave Barachc3799992016-08-15 11:12:27 -04001156 st->n_puts, (f64) st->n_clocks_put / (f64) st->n_puts);
1157
Ed Warnickecb9cada2015-12-08 15:45:58 -07001158 return s;
1159}
1160
Dave Barachc3799992016-08-15 11:12:27 -04001161u8 *
1162format_mheap (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001163{
Dave Barachc3799992016-08-15 11:12:27 -04001164 void *v = va_arg (*va, u8 *);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001165 int verbose = va_arg (*va, int);
1166
Dave Barachc3799992016-08-15 11:12:27 -04001167 mheap_t *h;
Gabriel Gannee3ea7972017-10-12 10:53:31 +02001168 uword i, size;
1169 u32 indent;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001170 clib_mem_usage_t usage;
Dave Barachc3799992016-08-15 11:12:27 -04001171 mheap_elt_t *first_corrupt;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001172
1173 mheap_maybe_lock (v);
1174
1175 h = mheap_header (v);
1176
1177 mheap_usage_no_lock (v, &usage);
1178
1179 indent = format_get_indent (s);
1180
Dave Barachc3799992016-08-15 11:12:27 -04001181 s =
1182 format (s,
1183 "%d objects, %U of %U used, %U free, %U reclaimed, %U overhead",
1184 usage.object_count, format_mheap_byte_count, usage.bytes_used,
1185 format_mheap_byte_count, usage.bytes_total,
1186 format_mheap_byte_count, usage.bytes_free,
1187 format_mheap_byte_count, usage.bytes_free_reclaimed,
1188 format_mheap_byte_count, usage.bytes_overhead);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001189
1190 if (usage.bytes_max != ~0)
1191 s = format (s, ", %U capacity", format_mheap_byte_count, usage.bytes_max);
1192
1193 /* Show histogram of sizes. */
1194 if (verbose > 1)
1195 {
1196 uword hist[MHEAP_N_BINS];
Dave Barachc3799992016-08-15 11:12:27 -04001197 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001198 uword i, n_hist;
1199
1200 memset (hist, 0, sizeof (hist));
1201
1202 n_hist = 0;
1203 for (e = v;
1204 e->n_user_data != MHEAP_N_USER_DATA_INVALID;
1205 e = mheap_next_elt (e))
1206 {
1207 uword n_user_data_bytes = mheap_elt_data_bytes (e);
1208 uword bin = user_data_size_to_bin_index (n_user_data_bytes);
Dave Barachc3799992016-08-15 11:12:27 -04001209 if (!e->is_free)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001210 {
1211 hist[bin] += 1;
1212 n_hist += 1;
1213 }
1214 }
1215
1216 s = format (s, "\n%U%=12s%=12s%=16s",
1217 format_white_space, indent + 2,
1218 "Size", "Count", "Fraction");
1219
1220 for (i = 0; i < ARRAY_LEN (hist); i++)
1221 {
1222 if (hist[i] == 0)
1223 continue;
1224 s = format (s, "\n%U%12d%12wd%16.4f",
1225 format_white_space, indent + 2,
Dave Barachc3799992016-08-15 11:12:27 -04001226 MHEAP_MIN_USER_DATA_BYTES +
1227 i * MHEAP_USER_DATA_WORD_BYTES, hist[i],
Ed Warnickecb9cada2015-12-08 15:45:58 -07001228 (f64) hist[i] / (f64) n_hist);
1229 }
1230 }
1231
1232 if (verbose)
1233 s = format (s, "\n%U%U",
Dave Barachc3799992016-08-15 11:12:27 -04001234 format_white_space, indent + 2, format_mheap_stats, h);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001235
1236 if ((h->flags & MHEAP_FLAG_TRACE) && vec_len (h->trace_main.traces) > 0)
1237 {
1238 /* Make a copy of traces since we'll be sorting them. */
Dave Barachc3799992016-08-15 11:12:27 -04001239 mheap_trace_t *t, *traces_copy;
Christophe Fontained3c008d2017-10-02 18:10:54 +02001240 u32 indent, total_objects_traced;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001241
1242 traces_copy = vec_dup (h->trace_main.traces);
1243 qsort (traces_copy, vec_len (traces_copy), sizeof (traces_copy[0]),
1244 mheap_trace_sort);
1245
1246 total_objects_traced = 0;
1247 s = format (s, "\n");
Dave Barachc3799992016-08-15 11:12:27 -04001248 vec_foreach (t, traces_copy)
1249 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001250 /* Skip over free elements. */
1251 if (t->n_allocations == 0)
1252 continue;
1253
1254 total_objects_traced += t->n_allocations;
1255
1256 /* When not verbose only report allocations of more than 1k. */
Dave Barachc3799992016-08-15 11:12:27 -04001257 if (!verbose && t->n_bytes < 1024)
1258 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001259
1260 if (t == traces_copy)
Dave Barachc3799992016-08-15 11:12:27 -04001261 s = format (s, "%=9s%=9s %=10s Traceback\n", "Bytes", "Count",
1262 "Sample");
1263 s = format (s, "%9d%9d %p", t->n_bytes, t->n_allocations,
1264 t->offset + v);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001265 indent = format_get_indent (s);
1266 for (i = 0; i < ARRAY_LEN (t->callers) && t->callers[i]; i++)
1267 {
1268 if (i > 0)
1269 s = format (s, "%U", format_white_space, indent);
1270#ifdef CLIB_UNIX
Dave Barachc3799992016-08-15 11:12:27 -04001271 s =
1272 format (s, " %U\n", format_clib_elf_symbol_with_address,
1273 t->callers[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001274#else
1275 s = format (s, " %p\n", t->callers[i]);
1276#endif
1277 }
1278 }
1279
1280 s = format (s, "%d total traced objects\n", total_objects_traced);
1281
1282 vec_free (traces_copy);
Dave Barachc3799992016-08-15 11:12:27 -04001283 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001284
1285 first_corrupt = mheap_first_corrupt (v);
1286 if (first_corrupt)
1287 {
1288 size = mheap_elt_data_bytes (first_corrupt);
1289 s = format (s, "\n first corrupt object: %p, size %wd\n %U",
Dave Barachc3799992016-08-15 11:12:27 -04001290 first_corrupt, size, format_hex_bytes, first_corrupt, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001291 }
1292
1293 /* FIXME. This output could be wrong in the unlikely case that format
1294 uses the same mheap as we are currently inspecting. */
1295 if (verbose > 1)
1296 {
Dave Barachc3799992016-08-15 11:12:27 -04001297 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001298 uword i, o;
1299
1300 s = format (s, "\n");
1301
1302 e = mheap_elt_at_uoffset (v, 0);
1303 i = 0;
1304 while (1)
1305 {
1306 if ((i % 8) == 0)
1307 s = format (s, "%8d: ", i);
1308
1309 o = mheap_elt_uoffset (v, e);
1310
1311 if (e->is_free)
1312 s = format (s, "(%8d) ", o);
1313 else
1314 s = format (s, " %8d ", o);
1315
1316 if ((i % 8) == 7 || (i + 1) >= h->n_elts)
1317 s = format (s, "\n");
1318 }
1319 }
1320
1321 mheap_maybe_unlock (v);
1322
1323 return s;
1324}
1325
Dave Barachc3799992016-08-15 11:12:27 -04001326void
1327dmh (void *v)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001328{
Dave Barachc3799992016-08-15 11:12:27 -04001329 fformat (stderr, "%U", format_mheap, v, 1);
1330}
1331
1332static void
1333mheap_validate_breakpoint ()
1334{
1335 os_panic ();
1336}
1337
1338void
1339mheap_validate (void *v)
1340{
1341 mheap_t *h = mheap_header (v);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001342 uword i, s;
1343
1344 uword elt_count, elt_size;
1345 uword free_count_from_free_lists, free_size_from_free_lists;
1346 uword small_elt_free_count, small_elt_free_size;
1347
1348#define CHECK(x) if (! (x)) { mheap_validate_breakpoint (); os_panic (); }
1349
1350 if (vec_len (v) == 0)
1351 return;
1352
1353 mheap_maybe_lock (v);
1354
1355 /* Validate number of elements and size. */
1356 free_size_from_free_lists = free_count_from_free_lists = 0;
1357 for (i = 0; i < ARRAY_LEN (h->first_free_elt_uoffset_by_bin); i++)
1358 {
Dave Barachc3799992016-08-15 11:12:27 -04001359 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001360 uword is_first;
1361
Dave Barachb3d93da2016-08-03 14:34:38 -04001362 CHECK ((h->first_free_elt_uoffset_by_bin[i] != MHEAP_GROUNDED)
Dave Barachc3799992016-08-15 11:12:27 -04001363 ==
1364 ((h->non_empty_free_elt_heads[i /
1365 BITS (uword)] & ((uword) 1 <<
1366 (uword) (i %
1367 BITS
1368 (uword))))
1369 != 0));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001370
Dave Barachb3d93da2016-08-03 14:34:38 -04001371 if (h->first_free_elt_uoffset_by_bin[i] == MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001372 continue;
1373
1374 e = mheap_elt_at_uoffset (v, h->first_free_elt_uoffset_by_bin[i]);
1375 is_first = 1;
1376 while (1)
1377 {
1378 uword s;
1379
1380 n = mheap_next_elt (e);
1381
1382 /* Object must be marked free. */
1383 CHECK (e->is_free);
1384
1385 /* Next object's previous free bit must also be set. */
1386 CHECK (n->prev_is_free);
1387
1388 if (is_first)
Dave Barachb3d93da2016-08-03 14:34:38 -04001389 CHECK (e->free_elt.prev_uoffset == MHEAP_GROUNDED);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001390 is_first = 0;
1391
1392 s = mheap_elt_data_bytes (e);
1393 CHECK (user_data_size_to_bin_index (s) == i);
1394
1395 free_count_from_free_lists += 1;
1396 free_size_from_free_lists += s;
1397
Dave Barachb3d93da2016-08-03 14:34:38 -04001398 if (e->free_elt.next_uoffset == MHEAP_GROUNDED)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001399 break;
1400
1401 n = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
1402
1403 /* Check free element linkages. */
1404 CHECK (n->free_elt.prev_uoffset == mheap_elt_uoffset (v, e));
1405
1406 e = n;
1407 }
1408 }
1409
1410 /* Go through small object cache. */
1411 small_elt_free_count = small_elt_free_size = 0;
1412 for (i = 0; i < ARRAY_LEN (h->small_object_cache.bins.as_u8); i++)
1413 {
1414 if (h->small_object_cache.bins.as_u8[i] != 0)
1415 {
Dave Barachc3799992016-08-15 11:12:27 -04001416 mheap_elt_t *e;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001417 uword b = h->small_object_cache.bins.as_u8[i] - 1;
1418 uword o = h->small_object_cache.offsets[i];
1419 uword s;
1420
1421 e = mheap_elt_at_uoffset (v, o);
1422
1423 /* Object must be allocated. */
Dave Barachc3799992016-08-15 11:12:27 -04001424 CHECK (!e->is_free);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001425
1426 s = mheap_elt_data_bytes (e);
1427 CHECK (user_data_size_to_bin_index (s) == b);
1428
1429 small_elt_free_count += 1;
1430 small_elt_free_size += s;
1431 }
1432 }
1433
1434 {
Dave Barachc3799992016-08-15 11:12:27 -04001435 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001436 uword elt_free_size, elt_free_count;
1437
1438 elt_count = elt_size = elt_free_size = elt_free_count = 0;
Dave Barachc3799992016-08-15 11:12:27 -04001439 for (e = v; e->n_user_data != MHEAP_N_USER_DATA_INVALID; e = n)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001440 {
1441 if (e->prev_n_user_data != MHEAP_N_USER_DATA_INVALID)
Dave Barachc3799992016-08-15 11:12:27 -04001442 CHECK (e->prev_n_user_data * sizeof (e->user_data[0]) >=
1443 MHEAP_MIN_USER_DATA_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001444
Dave Barachc3799992016-08-15 11:12:27 -04001445 CHECK (e->n_user_data * sizeof (e->user_data[0]) >=
1446 MHEAP_MIN_USER_DATA_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001447
1448 n = mheap_next_elt (e);
1449
1450 CHECK (e->is_free == n->prev_is_free);
1451
1452 elt_count++;
1453 s = mheap_elt_data_bytes (e);
1454 elt_size += s;
1455
1456 if (e->is_free)
1457 {
1458 elt_free_count++;
1459 elt_free_size += s;
1460 }
1461
1462 /* Consecutive free objects should have been combined. */
Dave Barachc3799992016-08-15 11:12:27 -04001463 CHECK (!(e->prev_is_free && n->prev_is_free));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001464 }
1465
1466 CHECK (free_count_from_free_lists == elt_free_count);
1467 CHECK (free_size_from_free_lists == elt_free_size);
1468 CHECK (elt_count == h->n_elts + elt_free_count + small_elt_free_count);
Dave Barachc3799992016-08-15 11:12:27 -04001469 CHECK (elt_size + (elt_count + 1) * MHEAP_ELT_OVERHEAD_BYTES ==
1470 vec_len (v));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001471 }
1472
1473 {
Dave Barachc3799992016-08-15 11:12:27 -04001474 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001475
Dave Barachc3799992016-08-15 11:12:27 -04001476 for (e = v; e->n_user_data == MHEAP_N_USER_DATA_INVALID; e = n)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001477 {
1478 n = mheap_next_elt (e);
1479 CHECK (e->n_user_data == n->prev_n_user_data);
1480 }
1481 }
1482
1483#undef CHECK
1484
1485 mheap_maybe_unlock (v);
1486
1487 h->validate_serial += 1;
1488}
1489
Dave Barachc3799992016-08-15 11:12:27 -04001490static void
1491mheap_get_trace (void *v, uword offset, uword size)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001492{
Dave Barachc3799992016-08-15 11:12:27 -04001493 mheap_t *h;
1494 mheap_trace_main_t *tm;
1495 mheap_trace_t *t;
1496 uword i, n_callers, trace_index, *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001497 mheap_trace_t trace;
1498
1499 /* Spurious Coverity warnings be gone. */
Dave Barachc3799992016-08-15 11:12:27 -04001500 memset (&trace, 0, sizeof (trace));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001501
1502 n_callers = clib_backtrace (trace.callers, ARRAY_LEN (trace.callers),
1503 /* Skip mheap_get_aligned's frame */ 1);
1504 if (n_callers == 0)
Dave Barachc3799992016-08-15 11:12:27 -04001505 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001506
1507 for (i = n_callers; i < ARRAY_LEN (trace.callers); i++)
1508 trace.callers[i] = 0;
1509
1510 h = mheap_header (v);
1511 tm = &h->trace_main;
1512
Dave Barachc3799992016-08-15 11:12:27 -04001513 if (!tm->trace_by_callers)
1514 tm->trace_by_callers =
Ole Troan73710c72018-06-04 22:27:49 +02001515 hash_create_shmem (0, sizeof (trace.callers), sizeof (uword));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001516
1517 p = hash_get_mem (tm->trace_by_callers, &trace.callers);
1518 if (p)
1519 {
1520 trace_index = p[0];
1521 t = tm->traces + trace_index;
1522 }
1523 else
1524 {
1525 i = vec_len (tm->trace_free_list);
1526 if (i > 0)
1527 {
1528 trace_index = tm->trace_free_list[i - 1];
1529 _vec_len (tm->trace_free_list) = i - 1;
1530 }
1531 else
1532 {
Dave Barachc3799992016-08-15 11:12:27 -04001533 mheap_trace_t *old_start = tm->traces;
1534 mheap_trace_t *old_end = vec_end (tm->traces);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001535
1536 vec_add2 (tm->traces, t, 1);
1537
Dave Barachc3799992016-08-15 11:12:27 -04001538 if (tm->traces != old_start)
1539 {
1540 hash_pair_t *p;
1541 mheap_trace_t *q;
1542 /* *INDENT-OFF* */
Damjan Marion607de1a2016-08-16 22:53:54 +02001543 hash_foreach_pair (p, tm->trace_by_callers,
Dave Barachc3799992016-08-15 11:12:27 -04001544 ({
1545 q = uword_to_pointer (p->key, mheap_trace_t *);
1546 ASSERT (q >= old_start && q < old_end);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001547 p->key = pointer_to_uword (tm->traces + (q - old_start));
1548 }));
Dave Barachc3799992016-08-15 11:12:27 -04001549 /* *INDENT-ON* */
1550 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001551 trace_index = t - tm->traces;
1552 }
1553
1554 t = tm->traces + trace_index;
1555 t[0] = trace;
1556 t->n_allocations = 0;
1557 t->n_bytes = 0;
1558 hash_set_mem (tm->trace_by_callers, t->callers, trace_index);
1559 }
1560
1561 t->n_allocations += 1;
1562 t->n_bytes += size;
Dave Barachc3799992016-08-15 11:12:27 -04001563 t->offset = offset; /* keep a sample to autopsy */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001564 hash_set (tm->trace_index_by_offset, offset, t - tm->traces);
1565}
1566
Dave Barachc3799992016-08-15 11:12:27 -04001567static void
1568mheap_put_trace (void *v, uword offset, uword size)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001569{
Dave Barachc3799992016-08-15 11:12:27 -04001570 mheap_t *h;
1571 mheap_trace_main_t *tm;
1572 mheap_trace_t *t;
1573 uword trace_index, *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001574
1575 h = mheap_header (v);
1576 tm = &h->trace_main;
1577 p = hash_get (tm->trace_index_by_offset, offset);
Dave Barachc3799992016-08-15 11:12:27 -04001578 if (!p)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001579 return;
1580
1581 trace_index = p[0];
1582 hash_unset (tm->trace_index_by_offset, offset);
1583 ASSERT (trace_index < vec_len (tm->traces));
1584
1585 t = tm->traces + trace_index;
1586 ASSERT (t->n_allocations > 0);
1587 ASSERT (t->n_bytes >= size);
1588 t->n_allocations -= 1;
1589 t->n_bytes -= size;
1590 if (t->n_allocations == 0)
1591 {
1592 hash_unset_mem (tm->trace_by_callers, t->callers);
1593 vec_add1 (tm->trace_free_list, trace_index);
1594 memset (t, 0, sizeof (t[0]));
1595 }
1596}
1597
Dave Barachc3799992016-08-15 11:12:27 -04001598static int
1599mheap_trace_sort (const void *_t1, const void *_t2)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001600{
Dave Barachc3799992016-08-15 11:12:27 -04001601 const mheap_trace_t *t1 = _t1;
1602 const mheap_trace_t *t2 = _t2;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001603 word cmp;
1604
1605 cmp = (word) t2->n_bytes - (word) t1->n_bytes;
Dave Barachc3799992016-08-15 11:12:27 -04001606 if (!cmp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001607 cmp = (word) t2->n_allocations - (word) t1->n_allocations;
1608 return cmp;
1609}
1610
1611always_inline void
1612mheap_trace_main_free (mheap_trace_main_t * tm)
1613{
1614 vec_free (tm->traces);
1615 vec_free (tm->trace_free_list);
1616 hash_free (tm->trace_by_callers);
1617 hash_free (tm->trace_index_by_offset);
1618}
1619
Dave Barachc3799992016-08-15 11:12:27 -04001620void
1621mheap_trace (void *v, int enable)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001622{
Dave Barachc3799992016-08-15 11:12:27 -04001623 mheap_t *h;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001624
1625 h = mheap_header (v);
1626
1627 if (enable)
1628 {
1629 h->flags |= MHEAP_FLAG_TRACE;
1630 }
1631 else
1632 {
1633 mheap_trace_main_free (&h->trace_main);
1634 h->flags &= ~MHEAP_FLAG_TRACE;
1635 }
1636}
Dave Barachc3799992016-08-15 11:12:27 -04001637
1638/*
1639 * fd.io coding-style-patch-verification: ON
1640 *
1641 * Local Variables:
1642 * eval: (c-set-style "gnu")
1643 * End:
1644 */