blob: 31c5fd841ad1a3bc01d4e50611928ffed8f5cfe3 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
Dave Barach6a5adc32018-07-04 10:56:23 -040048#include <vppinfra/dlmalloc.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040049
Ed Warnickecb9cada2015-12-08 15:45:58 -070050#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040051#include <vppinfra/string.h> /* memcpy, clib_memset */
Benoît Ganne9fb6d402019-04-15 15:28:21 +020052#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070053
Damjan Marionce8debf2016-02-06 19:16:21 +010054#define CLIB_MAX_MHEAPS 256
Dave Baracha690fdb2020-01-21 12:34:55 -050055#define CLIB_MAX_NUMAS 8
56
Damjan Marionb5095042020-09-11 22:13:46 +020057typedef enum
58{
59 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
60 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
61 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
62 CLIB_MEM_PAGE_SZ_4K = 12,
63 CLIB_MEM_PAGE_SZ_16K = 14,
64 CLIB_MEM_PAGE_SZ_64K = 16,
65 CLIB_MEM_PAGE_SZ_1M = 20,
66 CLIB_MEM_PAGE_SZ_2M = 21,
67 CLIB_MEM_PAGE_SZ_16M = 24,
68 CLIB_MEM_PAGE_SZ_32M = 25,
69 CLIB_MEM_PAGE_SZ_512M = 29,
70 CLIB_MEM_PAGE_SZ_1G = 30,
71 CLIB_MEM_PAGE_SZ_16G = 34,
72} clib_mem_page_sz_t;
73
Dave Baracha690fdb2020-01-21 12:34:55 -050074/* Unspecified NUMA socket */
75#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +010076
Ed Warnickecb9cada2015-12-08 15:45:58 -070077/* Per CPU heaps. */
Dave Barachc3799992016-08-15 11:12:27 -040078extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
Dave Baracha690fdb2020-01-21 12:34:55 -050079extern void *clib_per_numa_mheaps[CLIB_MAX_NUMAS];
80
81always_inline void *
82clib_mem_get_per_cpu_heap (void)
83{
84 int cpu = os_get_thread_index ();
85 return clib_per_cpu_mheaps[cpu];
86}
87
88always_inline void *
89clib_mem_set_per_cpu_heap (u8 * new_heap)
90{
91 int cpu = os_get_thread_index ();
92 void *old = clib_per_cpu_mheaps[cpu];
93 clib_per_cpu_mheaps[cpu] = new_heap;
94 return old;
95}
96
97always_inline void *
98clib_mem_get_per_numa_heap (u32 numa_id)
99{
Dave Baracha6ef36b2020-02-11 10:29:13 -0500100 ASSERT (numa_id < ARRAY_LEN (clib_per_numa_mheaps));
Dave Baracha690fdb2020-01-21 12:34:55 -0500101 return clib_per_numa_mheaps[numa_id];
102}
103
104always_inline void *
105clib_mem_set_per_numa_heap (u8 * new_heap)
106{
107 int numa = os_get_numa_index ();
108 void *old = clib_per_numa_mheaps[numa];
109 clib_per_numa_mheaps[numa] = new_heap;
110 return old;
111}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700112
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200113always_inline void
114clib_mem_set_thread_index (void)
115{
116 /*
117 * Find an unused slot in the per-cpu-mheaps array,
118 * and grab it for this thread. We need to be able to
119 * push/pop the thread heap without affecting other thread(s).
120 */
121 int i;
122 if (__os_thread_index != 0)
123 return;
124 for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
125 if (clib_atomic_bool_cmp_and_swap (&clib_per_cpu_mheaps[i],
126 0, clib_per_cpu_mheaps[0]))
127 {
128 os_set_thread_index (i);
129 break;
130 }
131 ASSERT (__os_thread_index > 0);
132}
133
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200134always_inline uword
135clib_mem_size_nocheck (void *p)
136{
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200137 return mspace_usable_size_with_delta (p);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200138}
139
Dave Barach241e5222016-10-13 10:53:26 -0400140/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700141always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400142clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
143 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700144{
Dave Barachc3799992016-08-15 11:12:27 -0400145 void *heap, *p;
Dave Barach6a5adc32018-07-04 10:56:23 -0400146 uword cpu;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147
148 if (align_offset > align)
149 {
150 if (align > 0)
151 align_offset %= align;
152 else
153 align_offset = align;
154 }
155
Damjan Marionf55f9b82017-05-10 21:06:28 +0200156 cpu = os_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157 heap = clib_per_cpu_mheaps[cpu];
Dave Barach6a5adc32018-07-04 10:56:23 -0400158
Dave Barach6a5adc32018-07-04 10:56:23 -0400159 p = mspace_get_aligned (heap, size, align, align_offset);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200160
161 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400162 {
163 if (os_out_of_memory_on_failure)
164 os_out_of_memory ();
165 return 0;
166 }
167
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200168 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400169 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170}
171
Dave Barach241e5222016-10-13 10:53:26 -0400172/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173always_inline void *
174clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400175{
176 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400177 /* align_offset */ 0,
178 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400179}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700180
181always_inline void *
182clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400183{
Dave Barach241e5222016-10-13 10:53:26 -0400184 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
185 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400186}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700187
Dave Barach241e5222016-10-13 10:53:26 -0400188/* Memory allocator which calls os_out_of_memory() when it fails */
189always_inline void *
190clib_mem_alloc_or_null (uword size)
191{
192 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
193 /* align_offset */ 0,
194 /* os_out_of_memory */ 0);
195}
196
197always_inline void *
198clib_mem_alloc_aligned_or_null (uword size, uword align)
199{
200 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
201 /* os_out_of_memory */ 0);
202}
203
204
205
Ed Warnickecb9cada2015-12-08 15:45:58 -0700206/* Memory allocator which panics when it fails.
207 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
208#define clib_mem_alloc_aligned_no_fail(size,align) \
209({ \
210 uword _clib_mem_alloc_size = (size); \
211 void * _clib_mem_alloc_p; \
212 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
213 if (! _clib_mem_alloc_p) \
214 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
215 _clib_mem_alloc_p; \
216})
217
218#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
219
220/* Alias to stack allocator for naming consistency. */
221#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
222
Dave Barachc3799992016-08-15 11:12:27 -0400223always_inline uword
224clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700225{
Dave Barach6a5adc32018-07-04 10:56:23 -0400226 void *heap = clib_mem_get_per_cpu_heap ();
227
228 return mspace_is_heap_object (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229}
230
Dave Barachc3799992016-08-15 11:12:27 -0400231always_inline void
232clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700233{
Dave Barachc3799992016-08-15 11:12:27 -0400234 u8 *heap = clib_mem_get_per_cpu_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700235
236 /* Make sure object is in the correct heap. */
237 ASSERT (clib_mem_is_heap_object (p));
238
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200239 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
240
Dave Barach6a5adc32018-07-04 10:56:23 -0400241 mspace_put (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700242}
243
Dave Barachc3799992016-08-15 11:12:27 -0400244always_inline void *
245clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246{
247 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400248 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700249 if (q)
250 {
251 uword copy_size;
252 if (old_size < new_size)
253 copy_size = old_size;
254 else
255 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500256 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700257 clib_mem_free (p);
258 }
259 return q;
260}
261
Dave Barachc3799992016-08-15 11:12:27 -0400262always_inline uword
263clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700264{
Dave Barach6a5adc32018-07-04 10:56:23 -0400265 ASSERT (clib_mem_is_heap_object (p));
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200266 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700267}
268
Benoît Ganne78af0a82019-04-29 17:27:24 +0200269always_inline void
270clib_mem_free_s (void *p)
271{
272 uword size = clib_mem_size (p);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200273 CLIB_MEM_UNPOISON (p, size);
Benoît Ganne78af0a82019-04-29 17:27:24 +0200274 memset_s_inline (p, size, 0, size);
275 clib_mem_free (p);
276}
277
Dave Barachc3799992016-08-15 11:12:27 -0400278always_inline void *
279clib_mem_get_heap (void)
280{
281 return clib_mem_get_per_cpu_heap ();
282}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700283
Dave Barachc3799992016-08-15 11:12:27 -0400284always_inline void *
285clib_mem_set_heap (void *heap)
286{
287 return clib_mem_set_per_cpu_heap (heap);
288}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700289
Dave Barachc3799992016-08-15 11:12:27 -0400290void *clib_mem_init (void *heap, uword size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400291void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Florin Coras4c959952020-02-09 18:09:31 +0000292void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
293 u8 numa);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700294
295void clib_mem_exit (void);
296
297uword clib_mem_get_page_size (void);
298
299void clib_mem_validate (void);
300
301void clib_mem_trace (int enable);
302
Dave Barachd67a4282019-06-15 12:46:13 -0400303int clib_mem_is_traced (void);
304
Dave Barachc3799992016-08-15 11:12:27 -0400305typedef struct
306{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700307 /* Total number of objects allocated. */
308 uword object_count;
309
310 /* Total allocated bytes. Bytes used and free.
311 used + free = total */
312 uword bytes_total, bytes_used, bytes_free;
313
314 /* Number of bytes used by mheap data structure overhead
315 (e.g. free lists, mheap header). */
316 uword bytes_overhead;
317
318 /* Amount of free space returned to operating system. */
319 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400320
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321 /* For malloc which puts small objects in sbrk region and
322 large objects in mmap'ed regions. */
323 uword bytes_used_sbrk;
324 uword bytes_used_mmap;
325
326 /* Max. number of bytes in this heap. */
327 uword bytes_max;
328} clib_mem_usage_t;
329
330void clib_mem_usage (clib_mem_usage_t * usage);
331
Dave Barachc3799992016-08-15 11:12:27 -0400332u8 *format_clib_mem_usage (u8 * s, va_list * args);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333
Damjan Marion01914ce2017-09-14 19:04:50 +0200334/* Allocate virtual address space. */
335always_inline void *
336clib_mem_vm_alloc (uword size)
337{
338 void *mmap_addr;
339 uword flags = MAP_PRIVATE;
340
341#ifdef MAP_ANONYMOUS
342 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700343#endif
344
Damjan Marion01914ce2017-09-14 19:04:50 +0200345 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
346 if (mmap_addr == (void *) -1)
347 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100348 else
349 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700350
Damjan Marion01914ce2017-09-14 19:04:50 +0200351 return mmap_addr;
352}
353
354always_inline void
355clib_mem_vm_free (void *addr, uword size)
356{
357 munmap (addr, size);
358}
359
360always_inline void *
361clib_mem_vm_unmap (void *addr, uword size)
362{
363 void *mmap_addr;
364 uword flags = MAP_PRIVATE | MAP_FIXED;
365
366 /* To unmap we "map" with no protection. If we actually called
367 munmap then other callers could steal the address space. By
368 changing to PROT_NONE the kernel can free up the pages which is
369 really what we want "unmap" to mean. */
370 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
371 if (mmap_addr == (void *) -1)
372 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100373 else
374 CLIB_MEM_UNPOISON (mmap_addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200375
376 return mmap_addr;
377}
378
379always_inline void *
380clib_mem_vm_map (void *addr, uword size)
381{
382 void *mmap_addr;
Dave Barache89be4e2018-08-29 08:50:40 -0400383 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
Damjan Marion01914ce2017-09-14 19:04:50 +0200384
385 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
386 if (mmap_addr == (void *) -1)
387 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100388 else
389 CLIB_MEM_UNPOISON (mmap_addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200390
391 return mmap_addr;
392}
393
394typedef struct
395{
396#define CLIB_MEM_VM_F_SHARED (1 << 0)
397#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
398#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
399#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
400#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
Damjan Marion7b185362018-03-04 16:41:35 +0100401#define CLIB_MEM_VM_F_LOCKED (1 << 5)
Damjan Marion01914ce2017-09-14 19:04:50 +0200402 u32 flags; /**< vm allocation flags:
403 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
Florin Corasd3e83a92018-01-16 02:40:18 -0800404 descriptor will be provided on successful allocation.
Damjan Marion01914ce2017-09-14 19:04:50 +0200405 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
406 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
407 numa node preference.
408 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
409 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
410 number of available pages is not sufficient.
Damjan Marion7b185362018-03-04 16:41:35 +0100411 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
Damjan Marion01914ce2017-09-14 19:04:50 +0200412 */
413 char *name; /**< Name for memory allocation, set by caller. */
414 uword size; /**< Allocation size, set by caller. */
415 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
416 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800417 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
Damjan Marion01914ce2017-09-14 19:04:50 +0200418 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
419 int n_pages; /* Number of pages. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800420 uword requested_va; /**< Request fixed position mapping */
Damjan Marion01914ce2017-09-14 19:04:50 +0200421} clib_mem_vm_alloc_t;
422
Damjan Marion567e61d2018-10-24 17:08:26 +0200423clib_error_t *clib_mem_create_fd (char *name, int *fdp);
Damjan Marion1636b162018-10-19 12:54:42 +0200424clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
Damjan Marion01914ce2017-09-14 19:04:50 +0200425clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
Haiyang Tan642829d2018-10-09 19:09:45 -0700426void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
Damjan Marion567e61d2018-10-24 17:08:26 +0200427u64 clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200428uword clib_mem_get_default_hugepage_size (void);
Damjan Marion567e61d2018-10-24 17:08:26 +0200429int clib_mem_get_fd_log2_page_size (int fd);
Damjan Marionb5095042020-09-11 22:13:46 +0200430uword clib_mem_vm_reserve (uword start, uword size,
431 clib_mem_page_sz_t log2_page_sz);
Damjan Marion01914ce2017-09-14 19:04:50 +0200432u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
Dave Barach2b793412020-08-28 10:39:00 -0400433void clib_mem_destroy_mspace (void *mspace);
434void clib_mem_destroy (void);
Damjan Marion01914ce2017-09-14 19:04:50 +0200435
Florin Corasd3e83a92018-01-16 02:40:18 -0800436typedef struct
437{
438 uword size; /**< Map size */
439 int fd; /**< File descriptor to be mapped */
440 uword requested_va; /**< Request fixed position mapping */
441 void *addr; /**< Pointer to mapped memory, if successful */
Florin Coras6fe89982020-02-07 23:28:41 +0000442 u8 numa_node;
Florin Corasd3e83a92018-01-16 02:40:18 -0800443} clib_mem_vm_map_t;
Florin Corasd3e83a92018-01-16 02:40:18 -0800444
Florin Corasb384b542018-01-15 01:08:33 -0800445clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
Damjan Marionb5095042020-09-11 22:13:46 +0200446void clib_mem_vm_randomize_va (uword * requested_va,
447 clib_mem_page_sz_t log2_page_size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400448void mheap_trace (void *v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400449uword clib_mem_trace_enable_disable (uword enable);
450void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700451
Dave Barachc3799992016-08-15 11:12:27 -0400452#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700453
454#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400455
456/*
457 * fd.io coding-style-patch-verification: ON
458 *
459 * Local Variables:
460 * eval: (c-set-style "gnu")
461 * End:
462 */