blob: dfe8de9362629347511eac0b773dee8f424e0360 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
Ed Warnickecb9cada2015-12-08 15:45:58 -070048#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040049#include <vppinfra/string.h> /* memcpy, clib_memset */
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +020050#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070051
Damjan Marionce8debf2016-02-06 19:16:21 +010052#define CLIB_MAX_MHEAPS 256
Damjan Marion6bfd0762020-09-11 22:16:53 +020053#define CLIB_MAX_NUMAS 16
54#define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
Damjan Marion561ae5d2020-09-24 13:53:46 +020055#define CLIB_MEM_ERROR (-1)
Dave Baracha690fdb2020-01-21 12:34:55 -050056
Damjan Marionb5095042020-09-11 22:13:46 +020057typedef enum
58{
59 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
60 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
61 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
62 CLIB_MEM_PAGE_SZ_4K = 12,
63 CLIB_MEM_PAGE_SZ_16K = 14,
64 CLIB_MEM_PAGE_SZ_64K = 16,
65 CLIB_MEM_PAGE_SZ_1M = 20,
66 CLIB_MEM_PAGE_SZ_2M = 21,
67 CLIB_MEM_PAGE_SZ_16M = 24,
68 CLIB_MEM_PAGE_SZ_32M = 25,
69 CLIB_MEM_PAGE_SZ_512M = 29,
70 CLIB_MEM_PAGE_SZ_1G = 30,
71 CLIB_MEM_PAGE_SZ_16G = 34,
72} clib_mem_page_sz_t;
73
Damjan Marion6bfd0762020-09-11 22:16:53 +020074typedef struct _clib_mem_vm_map_hdr
75{
76 /* base address */
77 uword base_addr;
78
79 /* number of pages */
80 uword num_pages;
81
82 /* page size (log2) */
83 clib_mem_page_sz_t log2_page_sz;
84
Damjan Marion5ef25162020-09-17 13:29:33 +020085 /* file descriptor, -1 if memory is not shared */
86 int fd;
87
Damjan Marion6bfd0762020-09-11 22:16:53 +020088 /* allocation mame */
89#define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
90 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
91
92 /* linked list */
93 struct _clib_mem_vm_map_hdr *prev, *next;
94} clib_mem_vm_map_hdr_t;
95
Damjan Marionbfa75d62020-10-06 17:46:06 +020096#define foreach_clib_mem_heap_flag \
97 _(0, LOCKED, "locked") \
98 _(1, UNMAP_ON_DESTROY, "unmap-on-destroy")
99
100typedef enum
101{
102#define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i),
103 foreach_clib_mem_heap_flag
104#undef _
105} clib_mem_heap_flag_t;
106
107typedef struct
108{
109 /* base address */
110 void *base;
111
112 /* dlmalloc mspace */
113 void *mspace;
114
115 /* heap size */
116 uword size;
117
118 /* page size (log2) */
119 clib_mem_page_sz_t log2_page_sz:8;
120
121 /* flags */
122 clib_mem_heap_flag_t flags:8;
123
124 /* name - _MUST_ be last */
125 char name[0];
126} clib_mem_heap_t;
127
Damjan Marion57d1ec02020-09-16 21:15:44 +0200128typedef struct
129{
Damjan Marionc63e2a42020-09-16 21:36:00 +0200130 /* log2 system page size */
131 clib_mem_page_sz_t log2_page_sz;
132
Damjan Marionff011b22021-09-21 11:38:04 +0200133 /* log2 default hugepage size */
Damjan Marionc63e2a42020-09-16 21:36:00 +0200134 clib_mem_page_sz_t log2_default_hugepage_sz;
135
Damjan Marionff011b22021-09-21 11:38:04 +0200136 /* log2 system default hugepage size */
137 clib_mem_page_sz_t log2_sys_default_hugepage_sz;
138
Damjan Marionc63e2a42020-09-16 21:36:00 +0200139 /* bitmap of available numa nodes */
140 u32 numa_node_bitmap;
141
Damjan Marion57d1ec02020-09-16 21:15:44 +0200142 /* per CPU heaps */
143 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
144
145 /* per NUMA heaps */
146 void *per_numa_mheaps[CLIB_MAX_NUMAS];
Damjan Marion6bfd0762020-09-11 22:16:53 +0200147
148 /* memory maps */
149 clib_mem_vm_map_hdr_t *first_map, *last_map;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200150
Damjan Marion70ae0a92020-10-26 10:39:30 +0100151 /* map lock */
152 u8 map_lock;
153
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200154 /* last error */
155 clib_error_t *error;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200156} clib_mem_main_t;
157
158extern clib_mem_main_t clib_mem_main;
159
Dave Baracha690fdb2020-01-21 12:34:55 -0500160/* Unspecified NUMA socket */
161#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +0100162
Damjan Marionbfa75d62020-10-06 17:46:06 +0200163always_inline clib_mem_heap_t *
Dave Baracha690fdb2020-01-21 12:34:55 -0500164clib_mem_get_per_cpu_heap (void)
165{
166 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200167 return clib_mem_main.per_cpu_mheaps[cpu];
Dave Baracha690fdb2020-01-21 12:34:55 -0500168}
169
170always_inline void *
Damjan Marionbfa75d62020-10-06 17:46:06 +0200171clib_mem_set_per_cpu_heap (void *new_heap)
Dave Baracha690fdb2020-01-21 12:34:55 -0500172{
173 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200174 void *old = clib_mem_main.per_cpu_mheaps[cpu];
175 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500176 return old;
177}
178
179always_inline void *
180clib_mem_get_per_numa_heap (u32 numa_id)
181{
Damjan Marion57d1ec02020-09-16 21:15:44 +0200182 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
183 return clib_mem_main.per_numa_mheaps[numa_id];
Dave Baracha690fdb2020-01-21 12:34:55 -0500184}
185
186always_inline void *
Damjan Marionbfa75d62020-10-06 17:46:06 +0200187clib_mem_set_per_numa_heap (void *new_heap)
Dave Baracha690fdb2020-01-21 12:34:55 -0500188{
189 int numa = os_get_numa_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200190 void *old = clib_mem_main.per_numa_mheaps[numa];
191 clib_mem_main.per_numa_mheaps[numa] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500192 return old;
193}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700194
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200195always_inline void
196clib_mem_set_thread_index (void)
197{
198 /*
199 * Find an unused slot in the per-cpu-mheaps array,
200 * and grab it for this thread. We need to be able to
201 * push/pop the thread heap without affecting other thread(s).
202 */
203 int i;
204 if (__os_thread_index != 0)
205 return;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200206 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
207 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
208 0, clib_mem_main.per_cpu_mheaps[0]))
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200209 {
210 os_set_thread_index (i);
211 break;
212 }
213 ASSERT (__os_thread_index > 0);
214}
215
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200216always_inline uword
217clib_mem_size_nocheck (void *p)
218{
Damjan Marion4537c302020-09-28 19:03:37 +0200219 size_t mspace_usable_size_with_delta (const void *p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200220 return mspace_usable_size_with_delta (p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200221}
222
Dave Barach241e5222016-10-13 10:53:26 -0400223/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400225clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
226 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700227{
Damjan Marion4537c302020-09-28 19:03:37 +0200228 void *mspace_get_aligned (void *msp, unsigned long n_user_data_bytes,
229 unsigned long align, unsigned long align_offset);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200230 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
231 void *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232
233 if (align_offset > align)
234 {
235 if (align > 0)
236 align_offset %= align;
237 else
238 align_offset = align;
239 }
240
Damjan Marionbfa75d62020-10-06 17:46:06 +0200241 p = mspace_get_aligned (h->mspace, size, align, align_offset);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200242
243 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400244 {
245 if (os_out_of_memory_on_failure)
246 os_out_of_memory ();
247 return 0;
248 }
249
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200250 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400251 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252}
253
Dave Barach241e5222016-10-13 10:53:26 -0400254/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700255always_inline void *
256clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400257{
258 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400259 /* align_offset */ 0,
260 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400261}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262
263always_inline void *
264clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400265{
Dave Barach241e5222016-10-13 10:53:26 -0400266 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
267 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400268}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700269
Dave Barach241e5222016-10-13 10:53:26 -0400270/* Memory allocator which calls os_out_of_memory() when it fails */
271always_inline void *
272clib_mem_alloc_or_null (uword size)
273{
274 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
275 /* align_offset */ 0,
276 /* os_out_of_memory */ 0);
277}
278
279always_inline void *
280clib_mem_alloc_aligned_or_null (uword size, uword align)
281{
282 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
283 /* os_out_of_memory */ 0);
284}
285
286
287
Ed Warnickecb9cada2015-12-08 15:45:58 -0700288/* Memory allocator which panics when it fails.
289 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
290#define clib_mem_alloc_aligned_no_fail(size,align) \
291({ \
292 uword _clib_mem_alloc_size = (size); \
293 void * _clib_mem_alloc_p; \
294 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
295 if (! _clib_mem_alloc_p) \
296 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
297 _clib_mem_alloc_p; \
298})
299
300#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
301
302/* Alias to stack allocator for naming consistency. */
303#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
304
Dave Barachc3799992016-08-15 11:12:27 -0400305always_inline uword
306clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700307{
Damjan Marion4537c302020-09-28 19:03:37 +0200308 int mspace_is_heap_object (void *msp, void *p);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200309 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
310 return mspace_is_heap_object (h->mspace, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700311}
312
Dave Barachc3799992016-08-15 11:12:27 -0400313always_inline void
314clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700315{
Damjan Marion4537c302020-09-28 19:03:37 +0200316 void mspace_put (void *msp, void *p_arg);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200317 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
318
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319 /* Make sure object is in the correct heap. */
320 ASSERT (clib_mem_is_heap_object (p));
321
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200322 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
323
Damjan Marionbfa75d62020-10-06 17:46:06 +0200324 mspace_put (h->mspace, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700325}
326
Dave Barachc3799992016-08-15 11:12:27 -0400327always_inline void *
328clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700329{
330 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400331 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700332 if (q)
333 {
334 uword copy_size;
335 if (old_size < new_size)
336 copy_size = old_size;
337 else
338 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500339 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700340 clib_mem_free (p);
341 }
342 return q;
343}
344
Dave Barachc3799992016-08-15 11:12:27 -0400345always_inline uword
346clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700347{
Dave Barach6a5adc32018-07-04 10:56:23 -0400348 ASSERT (clib_mem_is_heap_object (p));
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200349 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700350}
351
BenoƮt Ganne78af0a82019-04-29 17:27:24 +0200352always_inline void
353clib_mem_free_s (void *p)
354{
355 uword size = clib_mem_size (p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200356 CLIB_MEM_UNPOISON (p, size);
BenoƮt Ganne78af0a82019-04-29 17:27:24 +0200357 memset_s_inline (p, size, 0, size);
358 clib_mem_free (p);
359}
360
Damjan Marionbfa75d62020-10-06 17:46:06 +0200361always_inline clib_mem_heap_t *
Dave Barachc3799992016-08-15 11:12:27 -0400362clib_mem_get_heap (void)
363{
364 return clib_mem_get_per_cpu_heap ();
365}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700366
Damjan Marionbfa75d62020-10-06 17:46:06 +0200367always_inline clib_mem_heap_t *
368clib_mem_set_heap (clib_mem_heap_t * heap)
Dave Barachc3799992016-08-15 11:12:27 -0400369{
370 return clib_mem_set_per_cpu_heap (heap);
371}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700372
Damjan Marionbfa75d62020-10-06 17:46:06 +0200373void clib_mem_destroy_heap (clib_mem_heap_t * heap);
374clib_mem_heap_t *clib_mem_create_heap (void *base, uword size, int is_locked,
375 char *fmt, ...);
Damjan Marion4537c302020-09-28 19:03:37 +0200376
Damjan Marionc63e2a42020-09-16 21:36:00 +0200377void clib_mem_main_init ();
Damjan Marionbfa75d62020-10-06 17:46:06 +0200378void *clib_mem_init (void *base, uword size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200379void *clib_mem_init_with_page_size (uword memory_size,
380 clib_mem_page_sz_t log2_page_sz);
Dave Barach6a5adc32018-07-04 10:56:23 -0400381void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700382
383void clib_mem_exit (void);
384
Ed Warnickecb9cada2015-12-08 15:45:58 -0700385void clib_mem_trace (int enable);
386
Dave Barachd67a4282019-06-15 12:46:13 -0400387int clib_mem_is_traced (void);
388
Dave Barachc3799992016-08-15 11:12:27 -0400389typedef struct
390{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700391 /* Total number of objects allocated. */
392 uword object_count;
393
394 /* Total allocated bytes. Bytes used and free.
395 used + free = total */
396 uword bytes_total, bytes_used, bytes_free;
397
398 /* Number of bytes used by mheap data structure overhead
399 (e.g. free lists, mheap header). */
400 uword bytes_overhead;
401
402 /* Amount of free space returned to operating system. */
403 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400404
Ed Warnickecb9cada2015-12-08 15:45:58 -0700405 /* For malloc which puts small objects in sbrk region and
406 large objects in mmap'ed regions. */
407 uword bytes_used_sbrk;
408 uword bytes_used_mmap;
409
410 /* Max. number of bytes in this heap. */
411 uword bytes_max;
412} clib_mem_usage_t;
413
Damjan Marionbfa75d62020-10-06 17:46:06 +0200414void clib_mem_get_heap_usage (clib_mem_heap_t * heap,
415 clib_mem_usage_t * usage);
Damjan Marion4537c302020-09-28 19:03:37 +0200416
Damjan Marionbfa75d62020-10-06 17:46:06 +0200417void *clib_mem_get_heap_base (clib_mem_heap_t * heap);
418uword clib_mem_get_heap_size (clib_mem_heap_t * heap);
419uword clib_mem_get_heap_free_space (clib_mem_heap_t * heap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700420
Dave Barachc3799992016-08-15 11:12:27 -0400421u8 *format_clib_mem_usage (u8 * s, va_list * args);
Damjan Marion4537c302020-09-28 19:03:37 +0200422u8 *format_clib_mem_heap (u8 * s, va_list * va);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200423u8 *format_clib_mem_page_stats (u8 * s, va_list * va);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700424
Damjan Marion01914ce2017-09-14 19:04:50 +0200425/* Allocate virtual address space. */
426always_inline void *
427clib_mem_vm_alloc (uword size)
428{
429 void *mmap_addr;
430 uword flags = MAP_PRIVATE;
431
432#ifdef MAP_ANONYMOUS
433 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700434#endif
435
Damjan Marion01914ce2017-09-14 19:04:50 +0200436 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
437 if (mmap_addr == (void *) -1)
438 mmap_addr = 0;
BenoƮt Ganne1557d9a2020-02-07 11:58:16 +0100439 else
440 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700441
Damjan Marion01914ce2017-09-14 19:04:50 +0200442 return mmap_addr;
443}
444
445always_inline void
446clib_mem_vm_free (void *addr, uword size)
447{
448 munmap (addr, size);
449}
450
Damjan Marion6bfd0762020-09-11 22:16:53 +0200451void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
452 uword size, int fd, uword offset, char *name);
Damjan Marion01914ce2017-09-14 19:04:50 +0200453
Damjan Marion6bfd0762020-09-11 22:16:53 +0200454void *clib_mem_vm_map (void *start, uword size,
455 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
456void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
457 char *fmt, ...);
458void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
459 char *fmt, ...);
460int clib_mem_vm_unmap (void *base);
461clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
462 hdr);
Damjan Marion01914ce2017-09-14 19:04:50 +0200463
Damjan Marion6bfd0762020-09-11 22:16:53 +0200464static_always_inline clib_mem_page_sz_t
465clib_mem_get_log2_page_size (void)
466{
467 return clib_mem_main.log2_page_sz;
468}
469
470static_always_inline uword
471clib_mem_get_page_size (void)
472{
473 return 1ULL << clib_mem_main.log2_page_sz;
474}
475
Damjan Marionff011b22021-09-21 11:38:04 +0200476static_always_inline void
477clib_mem_set_log2_default_hugepage_size (clib_mem_page_sz_t log2_page_sz)
478{
479 clib_mem_main.log2_default_hugepage_sz = log2_page_sz;
480}
481
Damjan Marion6bfd0762020-09-11 22:16:53 +0200482static_always_inline clib_mem_page_sz_t
483clib_mem_get_log2_default_hugepage_size ()
484{
485 return clib_mem_main.log2_default_hugepage_sz;
486}
487
Damjan Marionff011b22021-09-21 11:38:04 +0200488static_always_inline uword
489clib_mem_get_default_hugepage_size (void)
490{
491 return 1ULL << clib_mem_main.log2_default_hugepage_sz;
492}
493
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200494int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200495uword clib_mem_get_fd_page_size (int fd);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200496clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
Damjan Marionb5095042020-09-11 22:13:46 +0200497uword clib_mem_vm_reserve (uword start, uword size,
498 clib_mem_page_sz_t log2_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200499u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
500 int n_pages);
Dave Barach2b793412020-08-28 10:39:00 -0400501void clib_mem_destroy (void);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200502int clib_mem_set_numa_affinity (u8 numa_node, int force);
503int clib_mem_set_default_numa_affinity ();
Damjan Marionb5095042020-09-11 22:13:46 +0200504void clib_mem_vm_randomize_va (uword * requested_va,
505 clib_mem_page_sz_t log2_page_size);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200506void mheap_trace (clib_mem_heap_t * v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400507uword clib_mem_trace_enable_disable (uword enable);
508void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700509
Damjan Marion6bfd0762020-09-11 22:16:53 +0200510always_inline uword
511clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
512{
513 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
514
515 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
516 log2_page_size = clib_mem_get_log2_page_size ();
517 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
518 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
519
520 return round_pow2 (size, 1ULL << log2_page_size);
521}
522
523typedef struct
524{
Damjan Marionbfa75d62020-10-06 17:46:06 +0200525 clib_mem_page_sz_t log2_page_sz;
526 uword total;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200527 uword mapped;
528 uword not_mapped;
529 uword per_numa[CLIB_MAX_NUMAS];
530 uword unknown;
531} clib_mem_page_stats_t;
532
533void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
534 uword n_pages, clib_mem_page_stats_t * stats);
535
536static_always_inline int
537vlib_mem_get_next_numa_node (int numa)
538{
539 clib_mem_main_t *mm = &clib_mem_main;
540 u32 bitmap = mm->numa_node_bitmap;
541
542 if (numa >= 0)
543 bitmap &= ~pow2_mask (numa + 1);
544 if (bitmap == 0)
545 return -1;
546
547 return count_trailing_zeros (bitmap);
548}
549
550static_always_inline clib_mem_page_sz_t
551clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
552{
553 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
554 return clib_mem_get_log2_page_size ();
555 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
556 return clib_mem_get_log2_default_hugepage_size ();
557 return log2_page_size;
558}
559
560static_always_inline uword
561clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
562{
Dave Barach27c35e32020-10-07 09:37:36 -0400563 return 1ULL << clib_mem_log2_page_size_validate (log2_page_size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200564}
565
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200566static_always_inline clib_error_t *
567clib_mem_get_last_error (void)
568{
569 return clib_mem_main.error;
570}
571
Damjan Marion0da81682020-12-22 14:58:56 +0100572/* bulk allocator */
573
574typedef void *clib_mem_bulk_handle_t;
575clib_mem_bulk_handle_t clib_mem_bulk_init (u32 elt_sz, u32 align,
576 u32 min_elts_per_chunk);
577void clib_mem_bulk_destroy (clib_mem_bulk_handle_t h);
578void *clib_mem_bulk_alloc (clib_mem_bulk_handle_t h);
579void clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p);
580u8 *format_clib_mem_bulk (u8 *s, va_list *args);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200581
Dave Barachc3799992016-08-15 11:12:27 -0400582#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700583
584#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400585
586/*
587 * fd.io coding-style-patch-verification: ON
588 *
589 * Local Variables:
590 * eval: (c-set-style "gnu")
591 * End:
592 */