blob: f35c495eb25610bd0506fc4ba19332dc055d668f [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
48#if USE_DLMALLOC == 0
Ed Warnickecb9cada2015-12-08 15:45:58 -070049#include <vppinfra/mheap_bootstrap.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040050#else
51#include <vppinfra/dlmalloc.h>
52#endif
53
Ed Warnickecb9cada2015-12-08 15:45:58 -070054#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040055#include <vppinfra/string.h> /* memcpy, clib_memset */
Benoît Ganne9fb6d402019-04-15 15:28:21 +020056#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070057
Damjan Marionce8debf2016-02-06 19:16:21 +010058#define CLIB_MAX_MHEAPS 256
Dave Baracha690fdb2020-01-21 12:34:55 -050059#define CLIB_MAX_NUMAS 8
60
61/* Unspecified NUMA socket */
62#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +010063
Ed Warnickecb9cada2015-12-08 15:45:58 -070064/* Per CPU heaps. */
Dave Barachc3799992016-08-15 11:12:27 -040065extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
Dave Baracha690fdb2020-01-21 12:34:55 -050066extern void *clib_per_numa_mheaps[CLIB_MAX_NUMAS];
67
68always_inline void *
69clib_mem_get_per_cpu_heap (void)
70{
71 int cpu = os_get_thread_index ();
72 return clib_per_cpu_mheaps[cpu];
73}
74
75always_inline void *
76clib_mem_set_per_cpu_heap (u8 * new_heap)
77{
78 int cpu = os_get_thread_index ();
79 void *old = clib_per_cpu_mheaps[cpu];
80 clib_per_cpu_mheaps[cpu] = new_heap;
81 return old;
82}
83
84always_inline void *
85clib_mem_get_per_numa_heap (u32 numa_id)
86{
87 ASSERT (numa_id >= 0 && numa_id < ARRAY_LEN (clib_per_numa_mheaps));
88 return clib_per_numa_mheaps[numa_id];
89}
90
91always_inline void *
92clib_mem_set_per_numa_heap (u8 * new_heap)
93{
94 int numa = os_get_numa_index ();
95 void *old = clib_per_numa_mheaps[numa];
96 clib_per_numa_mheaps[numa] = new_heap;
97 return old;
98}
Ed Warnickecb9cada2015-12-08 15:45:58 -070099
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200100always_inline void
101clib_mem_set_thread_index (void)
102{
103 /*
104 * Find an unused slot in the per-cpu-mheaps array,
105 * and grab it for this thread. We need to be able to
106 * push/pop the thread heap without affecting other thread(s).
107 */
108 int i;
109 if (__os_thread_index != 0)
110 return;
111 for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
112 if (clib_atomic_bool_cmp_and_swap (&clib_per_cpu_mheaps[i],
113 0, clib_per_cpu_mheaps[0]))
114 {
115 os_set_thread_index (i);
116 break;
117 }
118 ASSERT (__os_thread_index > 0);
119}
120
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200121always_inline uword
122clib_mem_size_nocheck (void *p)
123{
124#if USE_DLMALLOC == 0
125 mheap_elt_t *e = mheap_user_pointer_to_elt (p);
126 return mheap_elt_data_bytes (e);
127#else
128 return mspace_usable_size_with_delta (p);
129#endif
130}
131
Dave Barach241e5222016-10-13 10:53:26 -0400132/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700133always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400134clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
135 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136{
Dave Barachc3799992016-08-15 11:12:27 -0400137 void *heap, *p;
Dave Barach6a5adc32018-07-04 10:56:23 -0400138 uword cpu;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139
140 if (align_offset > align)
141 {
142 if (align > 0)
143 align_offset %= align;
144 else
145 align_offset = align;
146 }
147
Damjan Marionf55f9b82017-05-10 21:06:28 +0200148 cpu = os_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700149 heap = clib_per_cpu_mheaps[cpu];
Dave Barach6a5adc32018-07-04 10:56:23 -0400150
151#if USE_DLMALLOC == 0
152 uword offset;
Dave Barachc3799992016-08-15 11:12:27 -0400153 heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700154 clib_per_cpu_mheaps[cpu] = heap;
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200155 if (PREDICT_TRUE (offset != ~0))
156 p = heap + offset;
Dave Barach6a5adc32018-07-04 10:56:23 -0400157#else
158 p = mspace_get_aligned (heap, size, align, align_offset);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200159#endif /* USE_DLMALLOC */
160
161 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400162 {
163 if (os_out_of_memory_on_failure)
164 os_out_of_memory ();
165 return 0;
166 }
167
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200168 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400169 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170}
171
Dave Barach241e5222016-10-13 10:53:26 -0400172/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173always_inline void *
174clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400175{
176 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400177 /* align_offset */ 0,
178 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400179}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700180
181always_inline void *
182clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400183{
Dave Barach241e5222016-10-13 10:53:26 -0400184 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
185 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400186}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700187
Dave Barach241e5222016-10-13 10:53:26 -0400188/* Memory allocator which calls os_out_of_memory() when it fails */
189always_inline void *
190clib_mem_alloc_or_null (uword size)
191{
192 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
193 /* align_offset */ 0,
194 /* os_out_of_memory */ 0);
195}
196
197always_inline void *
198clib_mem_alloc_aligned_or_null (uword size, uword align)
199{
200 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
201 /* os_out_of_memory */ 0);
202}
203
204
205
Ed Warnickecb9cada2015-12-08 15:45:58 -0700206/* Memory allocator which panics when it fails.
207 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
208#define clib_mem_alloc_aligned_no_fail(size,align) \
209({ \
210 uword _clib_mem_alloc_size = (size); \
211 void * _clib_mem_alloc_p; \
212 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
213 if (! _clib_mem_alloc_p) \
214 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
215 _clib_mem_alloc_p; \
216})
217
218#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
219
220/* Alias to stack allocator for naming consistency. */
221#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
222
Dave Barachc3799992016-08-15 11:12:27 -0400223always_inline uword
224clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700225{
Dave Barach6a5adc32018-07-04 10:56:23 -0400226#if USE_DLMALLOC == 0
Dave Barachc3799992016-08-15 11:12:27 -0400227 void *heap = clib_mem_get_per_cpu_heap ();
228 uword offset = (uword) p - (uword) heap;
229 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700230
231 if (offset >= vec_len (heap))
232 return 0;
233
234 e = mheap_elt_at_uoffset (heap, offset);
235 n = mheap_next_elt (e);
Dave Barachc3799992016-08-15 11:12:27 -0400236
Ed Warnickecb9cada2015-12-08 15:45:58 -0700237 /* Check that heap forward and reverse pointers agree. */
238 return e->n_user_data == n->prev_n_user_data;
Dave Barach6a5adc32018-07-04 10:56:23 -0400239#else
240 void *heap = clib_mem_get_per_cpu_heap ();
241
242 return mspace_is_heap_object (heap, p);
243#endif /* USE_DLMALLOC */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244}
245
Dave Barachc3799992016-08-15 11:12:27 -0400246always_inline void
247clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700248{
Dave Barachc3799992016-08-15 11:12:27 -0400249 u8 *heap = clib_mem_get_per_cpu_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250
251 /* Make sure object is in the correct heap. */
252 ASSERT (clib_mem_is_heap_object (p));
253
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200254 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
255
Dave Barach6a5adc32018-07-04 10:56:23 -0400256#if USE_DLMALLOC == 0
Ed Warnickecb9cada2015-12-08 15:45:58 -0700257 mheap_put (heap, (u8 *) p - heap);
Dave Barach6a5adc32018-07-04 10:56:23 -0400258#else
259 mspace_put (heap, p);
260#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261}
262
Dave Barachc3799992016-08-15 11:12:27 -0400263always_inline void *
264clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265{
266 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400267 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700268 if (q)
269 {
270 uword copy_size;
271 if (old_size < new_size)
272 copy_size = old_size;
273 else
274 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500275 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276 clib_mem_free (p);
277 }
278 return q;
279}
280
Dave Barachc3799992016-08-15 11:12:27 -0400281always_inline uword
282clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700283{
Dave Barach6a5adc32018-07-04 10:56:23 -0400284 ASSERT (clib_mem_is_heap_object (p));
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200285 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286}
287
Benoît Ganne78af0a82019-04-29 17:27:24 +0200288always_inline void
289clib_mem_free_s (void *p)
290{
291 uword size = clib_mem_size (p);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200292 CLIB_MEM_UNPOISON (p, size);
Benoît Ganne78af0a82019-04-29 17:27:24 +0200293 memset_s_inline (p, size, 0, size);
294 clib_mem_free (p);
295}
296
Dave Barachc3799992016-08-15 11:12:27 -0400297always_inline void *
298clib_mem_get_heap (void)
299{
300 return clib_mem_get_per_cpu_heap ();
301}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302
Dave Barachc3799992016-08-15 11:12:27 -0400303always_inline void *
304clib_mem_set_heap (void *heap)
305{
306 return clib_mem_set_per_cpu_heap (heap);
307}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308
Dave Barachc3799992016-08-15 11:12:27 -0400309void *clib_mem_init (void *heap, uword size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400310void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Florin Coras4c959952020-02-09 18:09:31 +0000311void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
312 u8 numa);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700313
314void clib_mem_exit (void);
315
316uword clib_mem_get_page_size (void);
317
318void clib_mem_validate (void);
319
320void clib_mem_trace (int enable);
321
Dave Barachd67a4282019-06-15 12:46:13 -0400322int clib_mem_is_traced (void);
323
Dave Barachc3799992016-08-15 11:12:27 -0400324typedef struct
325{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700326 /* Total number of objects allocated. */
327 uword object_count;
328
329 /* Total allocated bytes. Bytes used and free.
330 used + free = total */
331 uword bytes_total, bytes_used, bytes_free;
332
333 /* Number of bytes used by mheap data structure overhead
334 (e.g. free lists, mheap header). */
335 uword bytes_overhead;
336
337 /* Amount of free space returned to operating system. */
338 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400339
Ed Warnickecb9cada2015-12-08 15:45:58 -0700340 /* For malloc which puts small objects in sbrk region and
341 large objects in mmap'ed regions. */
342 uword bytes_used_sbrk;
343 uword bytes_used_mmap;
344
345 /* Max. number of bytes in this heap. */
346 uword bytes_max;
347} clib_mem_usage_t;
348
349void clib_mem_usage (clib_mem_usage_t * usage);
350
Dave Barachc3799992016-08-15 11:12:27 -0400351u8 *format_clib_mem_usage (u8 * s, va_list * args);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700352
Damjan Marion01914ce2017-09-14 19:04:50 +0200353/* Allocate virtual address space. */
354always_inline void *
355clib_mem_vm_alloc (uword size)
356{
357 void *mmap_addr;
358 uword flags = MAP_PRIVATE;
359
360#ifdef MAP_ANONYMOUS
361 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700362#endif
363
Damjan Marion01914ce2017-09-14 19:04:50 +0200364 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
365 if (mmap_addr == (void *) -1)
366 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100367 else
368 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700369
Damjan Marion01914ce2017-09-14 19:04:50 +0200370 return mmap_addr;
371}
372
373always_inline void
374clib_mem_vm_free (void *addr, uword size)
375{
376 munmap (addr, size);
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100377 CLIB_MEM_POISON (addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200378}
379
380always_inline void *
381clib_mem_vm_unmap (void *addr, uword size)
382{
383 void *mmap_addr;
384 uword flags = MAP_PRIVATE | MAP_FIXED;
385
386 /* To unmap we "map" with no protection. If we actually called
387 munmap then other callers could steal the address space. By
388 changing to PROT_NONE the kernel can free up the pages which is
389 really what we want "unmap" to mean. */
390 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
391 if (mmap_addr == (void *) -1)
392 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100393 else
394 CLIB_MEM_UNPOISON (mmap_addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200395
396 return mmap_addr;
397}
398
399always_inline void *
400clib_mem_vm_map (void *addr, uword size)
401{
402 void *mmap_addr;
Dave Barache89be4e2018-08-29 08:50:40 -0400403 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
Damjan Marion01914ce2017-09-14 19:04:50 +0200404
405 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
406 if (mmap_addr == (void *) -1)
407 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100408 else
409 CLIB_MEM_UNPOISON (mmap_addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200410
411 return mmap_addr;
412}
413
414typedef struct
415{
416#define CLIB_MEM_VM_F_SHARED (1 << 0)
417#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
418#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
419#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
420#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
Damjan Marion7b185362018-03-04 16:41:35 +0100421#define CLIB_MEM_VM_F_LOCKED (1 << 5)
Damjan Marion01914ce2017-09-14 19:04:50 +0200422 u32 flags; /**< vm allocation flags:
423 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
Florin Corasd3e83a92018-01-16 02:40:18 -0800424 descriptor will be provided on successful allocation.
Damjan Marion01914ce2017-09-14 19:04:50 +0200425 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
426 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
427 numa node preference.
428 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
429 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
430 number of available pages is not sufficient.
Damjan Marion7b185362018-03-04 16:41:35 +0100431 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
Damjan Marion01914ce2017-09-14 19:04:50 +0200432 */
433 char *name; /**< Name for memory allocation, set by caller. */
434 uword size; /**< Allocation size, set by caller. */
435 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
436 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800437 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
Damjan Marion01914ce2017-09-14 19:04:50 +0200438 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
439 int n_pages; /* Number of pages. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800440 uword requested_va; /**< Request fixed position mapping */
Damjan Marion01914ce2017-09-14 19:04:50 +0200441} clib_mem_vm_alloc_t;
442
Damjan Marion567e61d2018-10-24 17:08:26 +0200443clib_error_t *clib_mem_create_fd (char *name, int *fdp);
Damjan Marion1636b162018-10-19 12:54:42 +0200444clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
Damjan Marion01914ce2017-09-14 19:04:50 +0200445clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
Haiyang Tan642829d2018-10-09 19:09:45 -0700446void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
Damjan Marion567e61d2018-10-24 17:08:26 +0200447u64 clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200448uword clib_mem_get_default_hugepage_size (void);
Damjan Marion567e61d2018-10-24 17:08:26 +0200449int clib_mem_get_fd_log2_page_size (int fd);
Damjan Marion01914ce2017-09-14 19:04:50 +0200450u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
451
Florin Corasd3e83a92018-01-16 02:40:18 -0800452typedef struct
453{
454 uword size; /**< Map size */
455 int fd; /**< File descriptor to be mapped */
456 uword requested_va; /**< Request fixed position mapping */
457 void *addr; /**< Pointer to mapped memory, if successful */
Florin Coras6fe89982020-02-07 23:28:41 +0000458 u8 numa_node;
Florin Corasd3e83a92018-01-16 02:40:18 -0800459} clib_mem_vm_map_t;
Florin Corasd3e83a92018-01-16 02:40:18 -0800460
Florin Corasb384b542018-01-15 01:08:33 -0800461clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
462void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400463void mheap_trace (void *v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400464uword clib_mem_trace_enable_disable (uword enable);
465void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700466
Dave Barachc3799992016-08-15 11:12:27 -0400467#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700468
469#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400470
471/*
472 * fd.io coding-style-patch-verification: ON
473 *
474 * Local Variables:
475 * eval: (c-set-style "gnu")
476 * End:
477 */