blob: 3ba20ad6d3edd4120d7409e52d59d3498e574ce2 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
Dave Barach6a5adc32018-07-04 10:56:23 -040048#include <vppinfra/dlmalloc.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040049
Ed Warnickecb9cada2015-12-08 15:45:58 -070050#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040051#include <vppinfra/string.h> /* memcpy, clib_memset */
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +020052#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070053
Damjan Marionce8debf2016-02-06 19:16:21 +010054#define CLIB_MAX_MHEAPS 256
Damjan Marion6bfd0762020-09-11 22:16:53 +020055#define CLIB_MAX_NUMAS 16
56#define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
Dave Baracha690fdb2020-01-21 12:34:55 -050057
Damjan Marionb5095042020-09-11 22:13:46 +020058typedef enum
59{
60 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
61 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
62 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
63 CLIB_MEM_PAGE_SZ_4K = 12,
64 CLIB_MEM_PAGE_SZ_16K = 14,
65 CLIB_MEM_PAGE_SZ_64K = 16,
66 CLIB_MEM_PAGE_SZ_1M = 20,
67 CLIB_MEM_PAGE_SZ_2M = 21,
68 CLIB_MEM_PAGE_SZ_16M = 24,
69 CLIB_MEM_PAGE_SZ_32M = 25,
70 CLIB_MEM_PAGE_SZ_512M = 29,
71 CLIB_MEM_PAGE_SZ_1G = 30,
72 CLIB_MEM_PAGE_SZ_16G = 34,
73} clib_mem_page_sz_t;
74
Damjan Marion6bfd0762020-09-11 22:16:53 +020075typedef struct _clib_mem_vm_map_hdr
76{
77 /* base address */
78 uword base_addr;
79
80 /* number of pages */
81 uword num_pages;
82
83 /* page size (log2) */
84 clib_mem_page_sz_t log2_page_sz;
85
Damjan Marion5ef25162020-09-17 13:29:33 +020086 /* file descriptor, -1 if memory is not shared */
87 int fd;
88
Damjan Marion6bfd0762020-09-11 22:16:53 +020089 /* allocation mame */
90#define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
91 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
92
93 /* linked list */
94 struct _clib_mem_vm_map_hdr *prev, *next;
95} clib_mem_vm_map_hdr_t;
96
Damjan Marion57d1ec02020-09-16 21:15:44 +020097typedef struct
98{
Damjan Marionc63e2a42020-09-16 21:36:00 +020099 /* log2 system page size */
100 clib_mem_page_sz_t log2_page_sz;
101
102 /* log2 system default hugepage size */
103 clib_mem_page_sz_t log2_default_hugepage_sz;
104
105 /* bitmap of available numa nodes */
106 u32 numa_node_bitmap;
107
Damjan Marion57d1ec02020-09-16 21:15:44 +0200108 /* per CPU heaps */
109 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
110
111 /* per NUMA heaps */
112 void *per_numa_mheaps[CLIB_MAX_NUMAS];
Damjan Marion6bfd0762020-09-11 22:16:53 +0200113
114 /* memory maps */
115 clib_mem_vm_map_hdr_t *first_map, *last_map;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200116
117 /* last error */
118 clib_error_t *error;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200119} clib_mem_main_t;
120
121extern clib_mem_main_t clib_mem_main;
122
Dave Baracha690fdb2020-01-21 12:34:55 -0500123/* Unspecified NUMA socket */
124#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +0100125
Dave Baracha690fdb2020-01-21 12:34:55 -0500126always_inline void *
127clib_mem_get_per_cpu_heap (void)
128{
129 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200130 return clib_mem_main.per_cpu_mheaps[cpu];
Dave Baracha690fdb2020-01-21 12:34:55 -0500131}
132
133always_inline void *
134clib_mem_set_per_cpu_heap (u8 * new_heap)
135{
136 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200137 void *old = clib_mem_main.per_cpu_mheaps[cpu];
138 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500139 return old;
140}
141
142always_inline void *
143clib_mem_get_per_numa_heap (u32 numa_id)
144{
Damjan Marion57d1ec02020-09-16 21:15:44 +0200145 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
146 return clib_mem_main.per_numa_mheaps[numa_id];
Dave Baracha690fdb2020-01-21 12:34:55 -0500147}
148
149always_inline void *
150clib_mem_set_per_numa_heap (u8 * new_heap)
151{
152 int numa = os_get_numa_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200153 void *old = clib_mem_main.per_numa_mheaps[numa];
154 clib_mem_main.per_numa_mheaps[numa] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500155 return old;
156}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200158always_inline void
159clib_mem_set_thread_index (void)
160{
161 /*
162 * Find an unused slot in the per-cpu-mheaps array,
163 * and grab it for this thread. We need to be able to
164 * push/pop the thread heap without affecting other thread(s).
165 */
166 int i;
167 if (__os_thread_index != 0)
168 return;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200169 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
170 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
171 0, clib_mem_main.per_cpu_mheaps[0]))
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200172 {
173 os_set_thread_index (i);
174 break;
175 }
176 ASSERT (__os_thread_index > 0);
177}
178
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200179always_inline uword
180clib_mem_size_nocheck (void *p)
181{
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200182 return mspace_usable_size_with_delta (p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200183}
184
Dave Barach241e5222016-10-13 10:53:26 -0400185/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400187clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
188 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700189{
Dave Barachc3799992016-08-15 11:12:27 -0400190 void *heap, *p;
Dave Barach6a5adc32018-07-04 10:56:23 -0400191 uword cpu;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700192
193 if (align_offset > align)
194 {
195 if (align > 0)
196 align_offset %= align;
197 else
198 align_offset = align;
199 }
200
Damjan Marionf55f9b82017-05-10 21:06:28 +0200201 cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200202 heap = clib_mem_main.per_cpu_mheaps[cpu];
Dave Barach6a5adc32018-07-04 10:56:23 -0400203
Dave Barach6a5adc32018-07-04 10:56:23 -0400204 p = mspace_get_aligned (heap, size, align, align_offset);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200205
206 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400207 {
208 if (os_out_of_memory_on_failure)
209 os_out_of_memory ();
210 return 0;
211 }
212
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200213 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400214 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700215}
216
Dave Barach241e5222016-10-13 10:53:26 -0400217/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218always_inline void *
219clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400220{
221 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400222 /* align_offset */ 0,
223 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400224}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700225
226always_inline void *
227clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400228{
Dave Barach241e5222016-10-13 10:53:26 -0400229 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
230 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400231}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232
Dave Barach241e5222016-10-13 10:53:26 -0400233/* Memory allocator which calls os_out_of_memory() when it fails */
234always_inline void *
235clib_mem_alloc_or_null (uword size)
236{
237 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
238 /* align_offset */ 0,
239 /* os_out_of_memory */ 0);
240}
241
242always_inline void *
243clib_mem_alloc_aligned_or_null (uword size, uword align)
244{
245 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
246 /* os_out_of_memory */ 0);
247}
248
249
250
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251/* Memory allocator which panics when it fails.
252 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
253#define clib_mem_alloc_aligned_no_fail(size,align) \
254({ \
255 uword _clib_mem_alloc_size = (size); \
256 void * _clib_mem_alloc_p; \
257 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
258 if (! _clib_mem_alloc_p) \
259 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
260 _clib_mem_alloc_p; \
261})
262
263#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
264
265/* Alias to stack allocator for naming consistency. */
266#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
267
Dave Barachc3799992016-08-15 11:12:27 -0400268always_inline uword
269clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270{
Dave Barach6a5adc32018-07-04 10:56:23 -0400271 void *heap = clib_mem_get_per_cpu_heap ();
272
273 return mspace_is_heap_object (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274}
275
Dave Barachc3799992016-08-15 11:12:27 -0400276always_inline void
277clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700278{
Dave Barachc3799992016-08-15 11:12:27 -0400279 u8 *heap = clib_mem_get_per_cpu_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700280
281 /* Make sure object is in the correct heap. */
282 ASSERT (clib_mem_is_heap_object (p));
283
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200284 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
285
Dave Barach6a5adc32018-07-04 10:56:23 -0400286 mspace_put (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287}
288
Dave Barachc3799992016-08-15 11:12:27 -0400289always_inline void *
290clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700291{
292 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400293 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700294 if (q)
295 {
296 uword copy_size;
297 if (old_size < new_size)
298 copy_size = old_size;
299 else
300 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500301 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302 clib_mem_free (p);
303 }
304 return q;
305}
306
Dave Barachc3799992016-08-15 11:12:27 -0400307always_inline uword
308clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700309{
Dave Barach6a5adc32018-07-04 10:56:23 -0400310 ASSERT (clib_mem_is_heap_object (p));
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200311 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700312}
313
BenoƮt Ganne78af0a82019-04-29 17:27:24 +0200314always_inline void
315clib_mem_free_s (void *p)
316{
317 uword size = clib_mem_size (p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200318 CLIB_MEM_UNPOISON (p, size);
BenoƮt Ganne78af0a82019-04-29 17:27:24 +0200319 memset_s_inline (p, size, 0, size);
320 clib_mem_free (p);
321}
322
Dave Barachc3799992016-08-15 11:12:27 -0400323always_inline void *
324clib_mem_get_heap (void)
325{
326 return clib_mem_get_per_cpu_heap ();
327}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700328
Dave Barachc3799992016-08-15 11:12:27 -0400329always_inline void *
330clib_mem_set_heap (void *heap)
331{
332 return clib_mem_set_per_cpu_heap (heap);
333}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700334
Damjan Marionc63e2a42020-09-16 21:36:00 +0200335void clib_mem_main_init ();
Dave Barachc3799992016-08-15 11:12:27 -0400336void *clib_mem_init (void *heap, uword size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200337void *clib_mem_init_with_page_size (uword memory_size,
338 clib_mem_page_sz_t log2_page_sz);
Dave Barach6a5adc32018-07-04 10:56:23 -0400339void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Florin Coras4c959952020-02-09 18:09:31 +0000340void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
341 u8 numa);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700342
343void clib_mem_exit (void);
344
Ed Warnickecb9cada2015-12-08 15:45:58 -0700345void clib_mem_validate (void);
346
347void clib_mem_trace (int enable);
348
Dave Barachd67a4282019-06-15 12:46:13 -0400349int clib_mem_is_traced (void);
350
Dave Barachc3799992016-08-15 11:12:27 -0400351typedef struct
352{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353 /* Total number of objects allocated. */
354 uword object_count;
355
356 /* Total allocated bytes. Bytes used and free.
357 used + free = total */
358 uword bytes_total, bytes_used, bytes_free;
359
360 /* Number of bytes used by mheap data structure overhead
361 (e.g. free lists, mheap header). */
362 uword bytes_overhead;
363
364 /* Amount of free space returned to operating system. */
365 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400366
Ed Warnickecb9cada2015-12-08 15:45:58 -0700367 /* For malloc which puts small objects in sbrk region and
368 large objects in mmap'ed regions. */
369 uword bytes_used_sbrk;
370 uword bytes_used_mmap;
371
372 /* Max. number of bytes in this heap. */
373 uword bytes_max;
374} clib_mem_usage_t;
375
376void clib_mem_usage (clib_mem_usage_t * usage);
377
Dave Barachc3799992016-08-15 11:12:27 -0400378u8 *format_clib_mem_usage (u8 * s, va_list * args);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379
Damjan Marion01914ce2017-09-14 19:04:50 +0200380/* Allocate virtual address space. */
381always_inline void *
382clib_mem_vm_alloc (uword size)
383{
384 void *mmap_addr;
385 uword flags = MAP_PRIVATE;
386
387#ifdef MAP_ANONYMOUS
388 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700389#endif
390
Damjan Marion01914ce2017-09-14 19:04:50 +0200391 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
392 if (mmap_addr == (void *) -1)
393 mmap_addr = 0;
BenoƮt Ganne1557d9a2020-02-07 11:58:16 +0100394 else
395 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700396
Damjan Marion01914ce2017-09-14 19:04:50 +0200397 return mmap_addr;
398}
399
400always_inline void
401clib_mem_vm_free (void *addr, uword size)
402{
403 munmap (addr, size);
404}
405
Damjan Marion6bfd0762020-09-11 22:16:53 +0200406void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
407 uword size, int fd, uword offset, char *name);
Damjan Marion01914ce2017-09-14 19:04:50 +0200408
Damjan Marion6bfd0762020-09-11 22:16:53 +0200409void *clib_mem_vm_map (void *start, uword size,
410 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
411void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
412 char *fmt, ...);
413void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
414 char *fmt, ...);
415int clib_mem_vm_unmap (void *base);
416clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
417 hdr);
Damjan Marion01914ce2017-09-14 19:04:50 +0200418
419typedef struct
420{
421#define CLIB_MEM_VM_F_SHARED (1 << 0)
422#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
423#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
424#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
425#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
Damjan Marion7b185362018-03-04 16:41:35 +0100426#define CLIB_MEM_VM_F_LOCKED (1 << 5)
Damjan Marion01914ce2017-09-14 19:04:50 +0200427 u32 flags; /**< vm allocation flags:
428 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
Florin Corasd3e83a92018-01-16 02:40:18 -0800429 descriptor will be provided on successful allocation.
Damjan Marion01914ce2017-09-14 19:04:50 +0200430 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
431 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
432 numa node preference.
433 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
434 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
435 number of available pages is not sufficient.
Damjan Marion7b185362018-03-04 16:41:35 +0100436 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
Damjan Marion01914ce2017-09-14 19:04:50 +0200437 */
438 char *name; /**< Name for memory allocation, set by caller. */
439 uword size; /**< Allocation size, set by caller. */
440 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
441 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800442 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
Damjan Marion01914ce2017-09-14 19:04:50 +0200443 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
444 int n_pages; /* Number of pages. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800445 uword requested_va; /**< Request fixed position mapping */
Damjan Marion01914ce2017-09-14 19:04:50 +0200446} clib_mem_vm_alloc_t;
447
Damjan Marion6bfd0762020-09-11 22:16:53 +0200448
449static_always_inline clib_mem_page_sz_t
450clib_mem_get_log2_page_size (void)
451{
452 return clib_mem_main.log2_page_sz;
453}
454
455static_always_inline uword
456clib_mem_get_page_size (void)
457{
458 return 1ULL << clib_mem_main.log2_page_sz;
459}
460
461static_always_inline clib_mem_page_sz_t
462clib_mem_get_log2_default_hugepage_size ()
463{
464 return clib_mem_main.log2_default_hugepage_sz;
465}
466
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200467int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
Damjan Marion01914ce2017-09-14 19:04:50 +0200468clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
Haiyang Tan642829d2018-10-09 19:09:45 -0700469void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200470uword clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200471uword clib_mem_get_default_hugepage_size (void);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200472clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
Damjan Marionb5095042020-09-11 22:13:46 +0200473uword clib_mem_vm_reserve (uword start, uword size,
474 clib_mem_page_sz_t log2_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200475u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
476 int n_pages);
Dave Barach2b793412020-08-28 10:39:00 -0400477void clib_mem_destroy_mspace (void *mspace);
478void clib_mem_destroy (void);
Damjan Marion01914ce2017-09-14 19:04:50 +0200479
Florin Corasd3e83a92018-01-16 02:40:18 -0800480typedef struct
481{
482 uword size; /**< Map size */
483 int fd; /**< File descriptor to be mapped */
484 uword requested_va; /**< Request fixed position mapping */
485 void *addr; /**< Pointer to mapped memory, if successful */
Florin Coras6fe89982020-02-07 23:28:41 +0000486 u8 numa_node;
Florin Corasd3e83a92018-01-16 02:40:18 -0800487} clib_mem_vm_map_t;
Florin Corasd3e83a92018-01-16 02:40:18 -0800488
Florin Corasb384b542018-01-15 01:08:33 -0800489clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
Damjan Marionb5095042020-09-11 22:13:46 +0200490void clib_mem_vm_randomize_va (uword * requested_va,
491 clib_mem_page_sz_t log2_page_size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400492void mheap_trace (void *v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400493uword clib_mem_trace_enable_disable (uword enable);
494void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700495
Damjan Marion6bfd0762020-09-11 22:16:53 +0200496always_inline uword
497clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
498{
499 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
500
501 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
502 log2_page_size = clib_mem_get_log2_page_size ();
503 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
504 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
505
506 return round_pow2 (size, 1ULL << log2_page_size);
507}
508
509typedef struct
510{
511 uword mapped;
512 uword not_mapped;
513 uword per_numa[CLIB_MAX_NUMAS];
514 uword unknown;
515} clib_mem_page_stats_t;
516
517void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
518 uword n_pages, clib_mem_page_stats_t * stats);
519
520static_always_inline int
521vlib_mem_get_next_numa_node (int numa)
522{
523 clib_mem_main_t *mm = &clib_mem_main;
524 u32 bitmap = mm->numa_node_bitmap;
525
526 if (numa >= 0)
527 bitmap &= ~pow2_mask (numa + 1);
528 if (bitmap == 0)
529 return -1;
530
531 return count_trailing_zeros (bitmap);
532}
533
534static_always_inline clib_mem_page_sz_t
535clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
536{
537 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
538 return clib_mem_get_log2_page_size ();
539 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
540 return clib_mem_get_log2_default_hugepage_size ();
541 return log2_page_size;
542}
543
544static_always_inline uword
545clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
546{
547 return 1 << clib_mem_log2_page_size_validate (log2_page_size);
548}
549
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200550static_always_inline clib_error_t *
551clib_mem_get_last_error (void)
552{
553 return clib_mem_main.error;
554}
555
Damjan Marion6bfd0762020-09-11 22:16:53 +0200556
Dave Barachc3799992016-08-15 11:12:27 -0400557#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700558
559#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400560
561/*
562 * fd.io coding-style-patch-verification: ON
563 *
564 * Local Variables:
565 * eval: (c-set-style "gnu")
566 * End:
567 */