blob: 5492e106d916da86c8de6f2d82ee711059e9b090 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
48#if USE_DLMALLOC == 0
Ed Warnickecb9cada2015-12-08 15:45:58 -070049#include <vppinfra/mheap_bootstrap.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040050#else
51#include <vppinfra/dlmalloc.h>
52#endif
53
Ed Warnickecb9cada2015-12-08 15:45:58 -070054#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040055#include <vppinfra/string.h> /* memcpy, clib_memset */
Benoît Ganne9fb6d402019-04-15 15:28:21 +020056#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070057
Damjan Marionce8debf2016-02-06 19:16:21 +010058#define CLIB_MAX_MHEAPS 256
Dave Baracha690fdb2020-01-21 12:34:55 -050059#define CLIB_MAX_NUMAS 8
60
61/* Unspecified NUMA socket */
62#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +010063
Ed Warnickecb9cada2015-12-08 15:45:58 -070064/* Per CPU heaps. */
Dave Barachc3799992016-08-15 11:12:27 -040065extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
Dave Baracha690fdb2020-01-21 12:34:55 -050066extern void *clib_per_numa_mheaps[CLIB_MAX_NUMAS];
67
68always_inline void *
69clib_mem_get_per_cpu_heap (void)
70{
71 int cpu = os_get_thread_index ();
72 return clib_per_cpu_mheaps[cpu];
73}
74
75always_inline void *
76clib_mem_set_per_cpu_heap (u8 * new_heap)
77{
78 int cpu = os_get_thread_index ();
79 void *old = clib_per_cpu_mheaps[cpu];
80 clib_per_cpu_mheaps[cpu] = new_heap;
81 return old;
82}
83
84always_inline void *
85clib_mem_get_per_numa_heap (u32 numa_id)
86{
87 ASSERT (numa_id >= 0 && numa_id < ARRAY_LEN (clib_per_numa_mheaps));
88 return clib_per_numa_mheaps[numa_id];
89}
90
91always_inline void *
92clib_mem_set_per_numa_heap (u8 * new_heap)
93{
94 int numa = os_get_numa_index ();
95 void *old = clib_per_numa_mheaps[numa];
96 clib_per_numa_mheaps[numa] = new_heap;
97 return old;
98}
Ed Warnickecb9cada2015-12-08 15:45:58 -070099
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200100always_inline void
101clib_mem_set_thread_index (void)
102{
103 /*
104 * Find an unused slot in the per-cpu-mheaps array,
105 * and grab it for this thread. We need to be able to
106 * push/pop the thread heap without affecting other thread(s).
107 */
108 int i;
109 if (__os_thread_index != 0)
110 return;
111 for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
112 if (clib_atomic_bool_cmp_and_swap (&clib_per_cpu_mheaps[i],
113 0, clib_per_cpu_mheaps[0]))
114 {
115 os_set_thread_index (i);
116 break;
117 }
118 ASSERT (__os_thread_index > 0);
119}
120
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200121always_inline uword
122clib_mem_size_nocheck (void *p)
123{
124#if USE_DLMALLOC == 0
125 mheap_elt_t *e = mheap_user_pointer_to_elt (p);
126 return mheap_elt_data_bytes (e);
127#else
128 return mspace_usable_size_with_delta (p);
129#endif
130}
131
Dave Barach241e5222016-10-13 10:53:26 -0400132/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700133always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400134clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
135 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136{
Dave Barachc3799992016-08-15 11:12:27 -0400137 void *heap, *p;
Dave Barach6a5adc32018-07-04 10:56:23 -0400138 uword cpu;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139
140 if (align_offset > align)
141 {
142 if (align > 0)
143 align_offset %= align;
144 else
145 align_offset = align;
146 }
147
Damjan Marionf55f9b82017-05-10 21:06:28 +0200148 cpu = os_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700149 heap = clib_per_cpu_mheaps[cpu];
Dave Barach6a5adc32018-07-04 10:56:23 -0400150
151#if USE_DLMALLOC == 0
152 uword offset;
Dave Barachc3799992016-08-15 11:12:27 -0400153 heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700154 clib_per_cpu_mheaps[cpu] = heap;
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200155 if (PREDICT_TRUE (offset != ~0))
156 p = heap + offset;
Dave Barach6a5adc32018-07-04 10:56:23 -0400157#else
158 p = mspace_get_aligned (heap, size, align, align_offset);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200159#endif /* USE_DLMALLOC */
160
161 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400162 {
163 if (os_out_of_memory_on_failure)
164 os_out_of_memory ();
165 return 0;
166 }
167
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200168 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400169 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170}
171
Dave Barach241e5222016-10-13 10:53:26 -0400172/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173always_inline void *
174clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400175{
176 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400177 /* align_offset */ 0,
178 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400179}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700180
181always_inline void *
182clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400183{
Dave Barach241e5222016-10-13 10:53:26 -0400184 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
185 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400186}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700187
Dave Barach241e5222016-10-13 10:53:26 -0400188/* Memory allocator which calls os_out_of_memory() when it fails */
189always_inline void *
190clib_mem_alloc_or_null (uword size)
191{
192 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
193 /* align_offset */ 0,
194 /* os_out_of_memory */ 0);
195}
196
197always_inline void *
198clib_mem_alloc_aligned_or_null (uword size, uword align)
199{
200 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
201 /* os_out_of_memory */ 0);
202}
203
204
205
Ed Warnickecb9cada2015-12-08 15:45:58 -0700206/* Memory allocator which panics when it fails.
207 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
208#define clib_mem_alloc_aligned_no_fail(size,align) \
209({ \
210 uword _clib_mem_alloc_size = (size); \
211 void * _clib_mem_alloc_p; \
212 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
213 if (! _clib_mem_alloc_p) \
214 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
215 _clib_mem_alloc_p; \
216})
217
218#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
219
220/* Alias to stack allocator for naming consistency. */
221#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
222
Dave Barachc3799992016-08-15 11:12:27 -0400223always_inline uword
224clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700225{
Dave Barach6a5adc32018-07-04 10:56:23 -0400226#if USE_DLMALLOC == 0
Dave Barachc3799992016-08-15 11:12:27 -0400227 void *heap = clib_mem_get_per_cpu_heap ();
228 uword offset = (uword) p - (uword) heap;
229 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700230
231 if (offset >= vec_len (heap))
232 return 0;
233
234 e = mheap_elt_at_uoffset (heap, offset);
235 n = mheap_next_elt (e);
Dave Barachc3799992016-08-15 11:12:27 -0400236
Ed Warnickecb9cada2015-12-08 15:45:58 -0700237 /* Check that heap forward and reverse pointers agree. */
238 return e->n_user_data == n->prev_n_user_data;
Dave Barach6a5adc32018-07-04 10:56:23 -0400239#else
240 void *heap = clib_mem_get_per_cpu_heap ();
241
242 return mspace_is_heap_object (heap, p);
243#endif /* USE_DLMALLOC */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244}
245
Dave Barachc3799992016-08-15 11:12:27 -0400246always_inline void
247clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700248{
Dave Barachc3799992016-08-15 11:12:27 -0400249 u8 *heap = clib_mem_get_per_cpu_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250
251 /* Make sure object is in the correct heap. */
252 ASSERT (clib_mem_is_heap_object (p));
253
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200254 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
255
Dave Barach6a5adc32018-07-04 10:56:23 -0400256#if USE_DLMALLOC == 0
Ed Warnickecb9cada2015-12-08 15:45:58 -0700257 mheap_put (heap, (u8 *) p - heap);
Dave Barach6a5adc32018-07-04 10:56:23 -0400258#else
259 mspace_put (heap, p);
260#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261}
262
Dave Barachc3799992016-08-15 11:12:27 -0400263always_inline void *
264clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265{
266 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400267 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700268 if (q)
269 {
270 uword copy_size;
271 if (old_size < new_size)
272 copy_size = old_size;
273 else
274 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500275 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276 clib_mem_free (p);
277 }
278 return q;
279}
280
Dave Barachc3799992016-08-15 11:12:27 -0400281always_inline uword
282clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700283{
Dave Barach6a5adc32018-07-04 10:56:23 -0400284 ASSERT (clib_mem_is_heap_object (p));
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200285 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286}
287
Benoît Ganne78af0a82019-04-29 17:27:24 +0200288always_inline void
289clib_mem_free_s (void *p)
290{
291 uword size = clib_mem_size (p);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200292 CLIB_MEM_UNPOISON (p, size);
Benoît Ganne78af0a82019-04-29 17:27:24 +0200293 memset_s_inline (p, size, 0, size);
294 clib_mem_free (p);
295}
296
Dave Barachc3799992016-08-15 11:12:27 -0400297always_inline void *
298clib_mem_get_heap (void)
299{
300 return clib_mem_get_per_cpu_heap ();
301}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302
Dave Barachc3799992016-08-15 11:12:27 -0400303always_inline void *
304clib_mem_set_heap (void *heap)
305{
306 return clib_mem_set_per_cpu_heap (heap);
307}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308
Dave Barachc3799992016-08-15 11:12:27 -0400309void *clib_mem_init (void *heap, uword size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400310void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Dave Baracha690fdb2020-01-21 12:34:55 -0500311void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700312
313void clib_mem_exit (void);
314
315uword clib_mem_get_page_size (void);
316
317void clib_mem_validate (void);
318
319void clib_mem_trace (int enable);
320
Dave Barachd67a4282019-06-15 12:46:13 -0400321int clib_mem_is_traced (void);
322
Dave Barachc3799992016-08-15 11:12:27 -0400323typedef struct
324{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700325 /* Total number of objects allocated. */
326 uword object_count;
327
328 /* Total allocated bytes. Bytes used and free.
329 used + free = total */
330 uword bytes_total, bytes_used, bytes_free;
331
332 /* Number of bytes used by mheap data structure overhead
333 (e.g. free lists, mheap header). */
334 uword bytes_overhead;
335
336 /* Amount of free space returned to operating system. */
337 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400338
Ed Warnickecb9cada2015-12-08 15:45:58 -0700339 /* For malloc which puts small objects in sbrk region and
340 large objects in mmap'ed regions. */
341 uword bytes_used_sbrk;
342 uword bytes_used_mmap;
343
344 /* Max. number of bytes in this heap. */
345 uword bytes_max;
346} clib_mem_usage_t;
347
348void clib_mem_usage (clib_mem_usage_t * usage);
349
Dave Barachc3799992016-08-15 11:12:27 -0400350u8 *format_clib_mem_usage (u8 * s, va_list * args);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700351
Damjan Marion01914ce2017-09-14 19:04:50 +0200352/* Allocate virtual address space. */
353always_inline void *
354clib_mem_vm_alloc (uword size)
355{
356 void *mmap_addr;
357 uword flags = MAP_PRIVATE;
358
359#ifdef MAP_ANONYMOUS
360 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700361#endif
362
Damjan Marion01914ce2017-09-14 19:04:50 +0200363 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
364 if (mmap_addr == (void *) -1)
365 mmap_addr = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700366
Damjan Marion01914ce2017-09-14 19:04:50 +0200367 return mmap_addr;
368}
369
370always_inline void
371clib_mem_vm_free (void *addr, uword size)
372{
373 munmap (addr, size);
374}
375
376always_inline void *
377clib_mem_vm_unmap (void *addr, uword size)
378{
379 void *mmap_addr;
380 uword flags = MAP_PRIVATE | MAP_FIXED;
381
382 /* To unmap we "map" with no protection. If we actually called
383 munmap then other callers could steal the address space. By
384 changing to PROT_NONE the kernel can free up the pages which is
385 really what we want "unmap" to mean. */
386 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
387 if (mmap_addr == (void *) -1)
388 mmap_addr = 0;
389
390 return mmap_addr;
391}
392
393always_inline void *
394clib_mem_vm_map (void *addr, uword size)
395{
396 void *mmap_addr;
Dave Barache89be4e2018-08-29 08:50:40 -0400397 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
Damjan Marion01914ce2017-09-14 19:04:50 +0200398
399 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
400 if (mmap_addr == (void *) -1)
401 mmap_addr = 0;
402
403 return mmap_addr;
404}
405
406typedef struct
407{
408#define CLIB_MEM_VM_F_SHARED (1 << 0)
409#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
410#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
411#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
412#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
Damjan Marion7b185362018-03-04 16:41:35 +0100413#define CLIB_MEM_VM_F_LOCKED (1 << 5)
Damjan Marion01914ce2017-09-14 19:04:50 +0200414 u32 flags; /**< vm allocation flags:
415 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
Florin Corasd3e83a92018-01-16 02:40:18 -0800416 descriptor will be provided on successful allocation.
Damjan Marion01914ce2017-09-14 19:04:50 +0200417 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
418 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
419 numa node preference.
420 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
421 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
422 number of available pages is not sufficient.
Damjan Marion7b185362018-03-04 16:41:35 +0100423 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
Damjan Marion01914ce2017-09-14 19:04:50 +0200424 */
425 char *name; /**< Name for memory allocation, set by caller. */
426 uword size; /**< Allocation size, set by caller. */
427 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
428 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800429 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
Damjan Marion01914ce2017-09-14 19:04:50 +0200430 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
431 int n_pages; /* Number of pages. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800432 uword requested_va; /**< Request fixed position mapping */
Damjan Marion01914ce2017-09-14 19:04:50 +0200433} clib_mem_vm_alloc_t;
434
Damjan Marion567e61d2018-10-24 17:08:26 +0200435clib_error_t *clib_mem_create_fd (char *name, int *fdp);
Damjan Marion1636b162018-10-19 12:54:42 +0200436clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
Damjan Marion01914ce2017-09-14 19:04:50 +0200437clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
Haiyang Tan642829d2018-10-09 19:09:45 -0700438void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
Damjan Marion567e61d2018-10-24 17:08:26 +0200439u64 clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200440uword clib_mem_get_default_hugepage_size (void);
Damjan Marion567e61d2018-10-24 17:08:26 +0200441int clib_mem_get_fd_log2_page_size (int fd);
Damjan Marion01914ce2017-09-14 19:04:50 +0200442u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
443
Florin Corasd3e83a92018-01-16 02:40:18 -0800444typedef struct
445{
446 uword size; /**< Map size */
447 int fd; /**< File descriptor to be mapped */
448 uword requested_va; /**< Request fixed position mapping */
449 void *addr; /**< Pointer to mapped memory, if successful */
450} clib_mem_vm_map_t;
Florin Corasd3e83a92018-01-16 02:40:18 -0800451
Florin Corasb384b542018-01-15 01:08:33 -0800452clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
453void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400454void mheap_trace (void *v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400455uword clib_mem_trace_enable_disable (uword enable);
456void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700457
Dave Barachc3799992016-08-15 11:12:27 -0400458#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700459
460#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400461
462/*
463 * fd.io coding-style-patch-verification: ON
464 *
465 * Local Variables:
466 * eval: (c-set-style "gnu")
467 * End:
468 */