blob: 99097263dfa2eb8054d111f118ba1bbc7a68756c [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
Dave Barach6a5adc32018-07-04 10:56:23 -040048#include <vppinfra/dlmalloc.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040049
Ed Warnickecb9cada2015-12-08 15:45:58 -070050#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040051#include <vppinfra/string.h> /* memcpy, clib_memset */
Benoît Ganne9fb6d402019-04-15 15:28:21 +020052#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070053
Damjan Marionce8debf2016-02-06 19:16:21 +010054#define CLIB_MAX_MHEAPS 256
Dave Baracha690fdb2020-01-21 12:34:55 -050055#define CLIB_MAX_NUMAS 8
56
Damjan Marionb5095042020-09-11 22:13:46 +020057typedef enum
58{
59 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
60 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
61 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
62 CLIB_MEM_PAGE_SZ_4K = 12,
63 CLIB_MEM_PAGE_SZ_16K = 14,
64 CLIB_MEM_PAGE_SZ_64K = 16,
65 CLIB_MEM_PAGE_SZ_1M = 20,
66 CLIB_MEM_PAGE_SZ_2M = 21,
67 CLIB_MEM_PAGE_SZ_16M = 24,
68 CLIB_MEM_PAGE_SZ_32M = 25,
69 CLIB_MEM_PAGE_SZ_512M = 29,
70 CLIB_MEM_PAGE_SZ_1G = 30,
71 CLIB_MEM_PAGE_SZ_16G = 34,
72} clib_mem_page_sz_t;
73
Damjan Marion57d1ec02020-09-16 21:15:44 +020074typedef struct
75{
76 /* per CPU heaps */
77 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
78
79 /* per NUMA heaps */
80 void *per_numa_mheaps[CLIB_MAX_NUMAS];
81} clib_mem_main_t;
82
83extern clib_mem_main_t clib_mem_main;
84
Dave Baracha690fdb2020-01-21 12:34:55 -050085/* Unspecified NUMA socket */
86#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +010087
Dave Baracha690fdb2020-01-21 12:34:55 -050088always_inline void *
89clib_mem_get_per_cpu_heap (void)
90{
91 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +020092 return clib_mem_main.per_cpu_mheaps[cpu];
Dave Baracha690fdb2020-01-21 12:34:55 -050093}
94
95always_inline void *
96clib_mem_set_per_cpu_heap (u8 * new_heap)
97{
98 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +020099 void *old = clib_mem_main.per_cpu_mheaps[cpu];
100 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500101 return old;
102}
103
104always_inline void *
105clib_mem_get_per_numa_heap (u32 numa_id)
106{
Damjan Marion57d1ec02020-09-16 21:15:44 +0200107 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
108 return clib_mem_main.per_numa_mheaps[numa_id];
Dave Baracha690fdb2020-01-21 12:34:55 -0500109}
110
111always_inline void *
112clib_mem_set_per_numa_heap (u8 * new_heap)
113{
114 int numa = os_get_numa_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200115 void *old = clib_mem_main.per_numa_mheaps[numa];
116 clib_mem_main.per_numa_mheaps[numa] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500117 return old;
118}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700119
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200120always_inline void
121clib_mem_set_thread_index (void)
122{
123 /*
124 * Find an unused slot in the per-cpu-mheaps array,
125 * and grab it for this thread. We need to be able to
126 * push/pop the thread heap without affecting other thread(s).
127 */
128 int i;
129 if (__os_thread_index != 0)
130 return;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200131 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
132 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
133 0, clib_mem_main.per_cpu_mheaps[0]))
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200134 {
135 os_set_thread_index (i);
136 break;
137 }
138 ASSERT (__os_thread_index > 0);
139}
140
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200141always_inline uword
142clib_mem_size_nocheck (void *p)
143{
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200144 return mspace_usable_size_with_delta (p);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200145}
146
Dave Barach241e5222016-10-13 10:53:26 -0400147/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400149clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
150 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151{
Dave Barachc3799992016-08-15 11:12:27 -0400152 void *heap, *p;
Dave Barach6a5adc32018-07-04 10:56:23 -0400153 uword cpu;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700154
155 if (align_offset > align)
156 {
157 if (align > 0)
158 align_offset %= align;
159 else
160 align_offset = align;
161 }
162
Damjan Marionf55f9b82017-05-10 21:06:28 +0200163 cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200164 heap = clib_mem_main.per_cpu_mheaps[cpu];
Dave Barach6a5adc32018-07-04 10:56:23 -0400165
Dave Barach6a5adc32018-07-04 10:56:23 -0400166 p = mspace_get_aligned (heap, size, align, align_offset);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200167
168 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400169 {
170 if (os_out_of_memory_on_failure)
171 os_out_of_memory ();
172 return 0;
173 }
174
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200175 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400176 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700177}
178
Dave Barach241e5222016-10-13 10:53:26 -0400179/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700180always_inline void *
181clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400182{
183 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400184 /* align_offset */ 0,
185 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400186}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700187
188always_inline void *
189clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400190{
Dave Barach241e5222016-10-13 10:53:26 -0400191 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
192 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400193}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700194
Dave Barach241e5222016-10-13 10:53:26 -0400195/* Memory allocator which calls os_out_of_memory() when it fails */
196always_inline void *
197clib_mem_alloc_or_null (uword size)
198{
199 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
200 /* align_offset */ 0,
201 /* os_out_of_memory */ 0);
202}
203
204always_inline void *
205clib_mem_alloc_aligned_or_null (uword size, uword align)
206{
207 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
208 /* os_out_of_memory */ 0);
209}
210
211
212
Ed Warnickecb9cada2015-12-08 15:45:58 -0700213/* Memory allocator which panics when it fails.
214 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
215#define clib_mem_alloc_aligned_no_fail(size,align) \
216({ \
217 uword _clib_mem_alloc_size = (size); \
218 void * _clib_mem_alloc_p; \
219 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
220 if (! _clib_mem_alloc_p) \
221 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
222 _clib_mem_alloc_p; \
223})
224
225#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
226
227/* Alias to stack allocator for naming consistency. */
228#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
229
Dave Barachc3799992016-08-15 11:12:27 -0400230always_inline uword
231clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232{
Dave Barach6a5adc32018-07-04 10:56:23 -0400233 void *heap = clib_mem_get_per_cpu_heap ();
234
235 return mspace_is_heap_object (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700236}
237
Dave Barachc3799992016-08-15 11:12:27 -0400238always_inline void
239clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240{
Dave Barachc3799992016-08-15 11:12:27 -0400241 u8 *heap = clib_mem_get_per_cpu_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700242
243 /* Make sure object is in the correct heap. */
244 ASSERT (clib_mem_is_heap_object (p));
245
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200246 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
247
Dave Barach6a5adc32018-07-04 10:56:23 -0400248 mspace_put (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700249}
250
Dave Barachc3799992016-08-15 11:12:27 -0400251always_inline void *
252clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253{
254 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400255 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256 if (q)
257 {
258 uword copy_size;
259 if (old_size < new_size)
260 copy_size = old_size;
261 else
262 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500263 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700264 clib_mem_free (p);
265 }
266 return q;
267}
268
Dave Barachc3799992016-08-15 11:12:27 -0400269always_inline uword
270clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700271{
Dave Barach6a5adc32018-07-04 10:56:23 -0400272 ASSERT (clib_mem_is_heap_object (p));
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200273 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274}
275
Benoît Ganne78af0a82019-04-29 17:27:24 +0200276always_inline void
277clib_mem_free_s (void *p)
278{
279 uword size = clib_mem_size (p);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200280 CLIB_MEM_UNPOISON (p, size);
Benoît Ganne78af0a82019-04-29 17:27:24 +0200281 memset_s_inline (p, size, 0, size);
282 clib_mem_free (p);
283}
284
Dave Barachc3799992016-08-15 11:12:27 -0400285always_inline void *
286clib_mem_get_heap (void)
287{
288 return clib_mem_get_per_cpu_heap ();
289}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290
Dave Barachc3799992016-08-15 11:12:27 -0400291always_inline void *
292clib_mem_set_heap (void *heap)
293{
294 return clib_mem_set_per_cpu_heap (heap);
295}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296
Dave Barachc3799992016-08-15 11:12:27 -0400297void *clib_mem_init (void *heap, uword size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400298void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Florin Coras4c959952020-02-09 18:09:31 +0000299void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
300 u8 numa);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700301
302void clib_mem_exit (void);
303
304uword clib_mem_get_page_size (void);
305
306void clib_mem_validate (void);
307
308void clib_mem_trace (int enable);
309
Dave Barachd67a4282019-06-15 12:46:13 -0400310int clib_mem_is_traced (void);
311
Dave Barachc3799992016-08-15 11:12:27 -0400312typedef struct
313{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700314 /* Total number of objects allocated. */
315 uword object_count;
316
317 /* Total allocated bytes. Bytes used and free.
318 used + free = total */
319 uword bytes_total, bytes_used, bytes_free;
320
321 /* Number of bytes used by mheap data structure overhead
322 (e.g. free lists, mheap header). */
323 uword bytes_overhead;
324
325 /* Amount of free space returned to operating system. */
326 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400327
Ed Warnickecb9cada2015-12-08 15:45:58 -0700328 /* For malloc which puts small objects in sbrk region and
329 large objects in mmap'ed regions. */
330 uword bytes_used_sbrk;
331 uword bytes_used_mmap;
332
333 /* Max. number of bytes in this heap. */
334 uword bytes_max;
335} clib_mem_usage_t;
336
337void clib_mem_usage (clib_mem_usage_t * usage);
338
Dave Barachc3799992016-08-15 11:12:27 -0400339u8 *format_clib_mem_usage (u8 * s, va_list * args);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700340
Damjan Marion01914ce2017-09-14 19:04:50 +0200341/* Allocate virtual address space. */
342always_inline void *
343clib_mem_vm_alloc (uword size)
344{
345 void *mmap_addr;
346 uword flags = MAP_PRIVATE;
347
348#ifdef MAP_ANONYMOUS
349 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700350#endif
351
Damjan Marion01914ce2017-09-14 19:04:50 +0200352 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
353 if (mmap_addr == (void *) -1)
354 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100355 else
356 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700357
Damjan Marion01914ce2017-09-14 19:04:50 +0200358 return mmap_addr;
359}
360
361always_inline void
362clib_mem_vm_free (void *addr, uword size)
363{
364 munmap (addr, size);
365}
366
367always_inline void *
368clib_mem_vm_unmap (void *addr, uword size)
369{
370 void *mmap_addr;
371 uword flags = MAP_PRIVATE | MAP_FIXED;
372
373 /* To unmap we "map" with no protection. If we actually called
374 munmap then other callers could steal the address space. By
375 changing to PROT_NONE the kernel can free up the pages which is
376 really what we want "unmap" to mean. */
377 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
378 if (mmap_addr == (void *) -1)
379 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100380 else
381 CLIB_MEM_UNPOISON (mmap_addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200382
383 return mmap_addr;
384}
385
386always_inline void *
387clib_mem_vm_map (void *addr, uword size)
388{
389 void *mmap_addr;
Dave Barache89be4e2018-08-29 08:50:40 -0400390 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
Damjan Marion01914ce2017-09-14 19:04:50 +0200391
392 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
393 if (mmap_addr == (void *) -1)
394 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100395 else
396 CLIB_MEM_UNPOISON (mmap_addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200397
398 return mmap_addr;
399}
400
401typedef struct
402{
403#define CLIB_MEM_VM_F_SHARED (1 << 0)
404#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
405#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
406#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
407#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
Damjan Marion7b185362018-03-04 16:41:35 +0100408#define CLIB_MEM_VM_F_LOCKED (1 << 5)
Damjan Marion01914ce2017-09-14 19:04:50 +0200409 u32 flags; /**< vm allocation flags:
410 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
Florin Corasd3e83a92018-01-16 02:40:18 -0800411 descriptor will be provided on successful allocation.
Damjan Marion01914ce2017-09-14 19:04:50 +0200412 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
413 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
414 numa node preference.
415 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
416 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
417 number of available pages is not sufficient.
Damjan Marion7b185362018-03-04 16:41:35 +0100418 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
Damjan Marion01914ce2017-09-14 19:04:50 +0200419 */
420 char *name; /**< Name for memory allocation, set by caller. */
421 uword size; /**< Allocation size, set by caller. */
422 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
423 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800424 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
Damjan Marion01914ce2017-09-14 19:04:50 +0200425 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
426 int n_pages; /* Number of pages. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800427 uword requested_va; /**< Request fixed position mapping */
Damjan Marion01914ce2017-09-14 19:04:50 +0200428} clib_mem_vm_alloc_t;
429
Damjan Marion567e61d2018-10-24 17:08:26 +0200430clib_error_t *clib_mem_create_fd (char *name, int *fdp);
Damjan Marion1636b162018-10-19 12:54:42 +0200431clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
Damjan Marion01914ce2017-09-14 19:04:50 +0200432clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
Haiyang Tan642829d2018-10-09 19:09:45 -0700433void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
Damjan Marion567e61d2018-10-24 17:08:26 +0200434u64 clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200435uword clib_mem_get_default_hugepage_size (void);
Damjan Marion567e61d2018-10-24 17:08:26 +0200436int clib_mem_get_fd_log2_page_size (int fd);
Damjan Marionb5095042020-09-11 22:13:46 +0200437uword clib_mem_vm_reserve (uword start, uword size,
438 clib_mem_page_sz_t log2_page_sz);
Damjan Marion01914ce2017-09-14 19:04:50 +0200439u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
Dave Barach2b793412020-08-28 10:39:00 -0400440void clib_mem_destroy_mspace (void *mspace);
441void clib_mem_destroy (void);
Damjan Marion01914ce2017-09-14 19:04:50 +0200442
Florin Corasd3e83a92018-01-16 02:40:18 -0800443typedef struct
444{
445 uword size; /**< Map size */
446 int fd; /**< File descriptor to be mapped */
447 uword requested_va; /**< Request fixed position mapping */
448 void *addr; /**< Pointer to mapped memory, if successful */
Florin Coras6fe89982020-02-07 23:28:41 +0000449 u8 numa_node;
Florin Corasd3e83a92018-01-16 02:40:18 -0800450} clib_mem_vm_map_t;
Florin Corasd3e83a92018-01-16 02:40:18 -0800451
Florin Corasb384b542018-01-15 01:08:33 -0800452clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
Damjan Marionb5095042020-09-11 22:13:46 +0200453void clib_mem_vm_randomize_va (uword * requested_va,
454 clib_mem_page_sz_t log2_page_size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400455void mheap_trace (void *v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400456uword clib_mem_trace_enable_disable (uword enable);
457void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700458
Dave Barachc3799992016-08-15 11:12:27 -0400459#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700460
461#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400462
463/*
464 * fd.io coding-style-patch-verification: ON
465 *
466 * Local Variables:
467 * eval: (c-set-style "gnu")
468 * End:
469 */