blob: 1cab0ae7252b1c7149660273e9ba62626756015a [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
Ed Warnickecb9cada2015-12-08 15:45:58 -070048#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040049#include <vppinfra/string.h> /* memcpy, clib_memset */
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +020050#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070051
Damjan Marionce8debf2016-02-06 19:16:21 +010052#define CLIB_MAX_MHEAPS 256
Damjan Marion6bfd0762020-09-11 22:16:53 +020053#define CLIB_MAX_NUMAS 16
54#define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
Damjan Marion561ae5d2020-09-24 13:53:46 +020055#define CLIB_MEM_ERROR (-1)
Dave Baracha690fdb2020-01-21 12:34:55 -050056
Damjan Marionb5095042020-09-11 22:13:46 +020057typedef enum
58{
59 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
60 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
61 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
62 CLIB_MEM_PAGE_SZ_4K = 12,
63 CLIB_MEM_PAGE_SZ_16K = 14,
64 CLIB_MEM_PAGE_SZ_64K = 16,
65 CLIB_MEM_PAGE_SZ_1M = 20,
66 CLIB_MEM_PAGE_SZ_2M = 21,
67 CLIB_MEM_PAGE_SZ_16M = 24,
68 CLIB_MEM_PAGE_SZ_32M = 25,
69 CLIB_MEM_PAGE_SZ_512M = 29,
70 CLIB_MEM_PAGE_SZ_1G = 30,
71 CLIB_MEM_PAGE_SZ_16G = 34,
72} clib_mem_page_sz_t;
73
Damjan Marion6bfd0762020-09-11 22:16:53 +020074typedef struct _clib_mem_vm_map_hdr
75{
76 /* base address */
77 uword base_addr;
78
79 /* number of pages */
80 uword num_pages;
81
82 /* page size (log2) */
83 clib_mem_page_sz_t log2_page_sz;
84
Damjan Marion5ef25162020-09-17 13:29:33 +020085 /* file descriptor, -1 if memory is not shared */
86 int fd;
87
Damjan Marion6bfd0762020-09-11 22:16:53 +020088 /* allocation mame */
89#define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
90 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
91
92 /* linked list */
93 struct _clib_mem_vm_map_hdr *prev, *next;
94} clib_mem_vm_map_hdr_t;
95
Damjan Marionbfa75d62020-10-06 17:46:06 +020096#define foreach_clib_mem_heap_flag \
97 _(0, LOCKED, "locked") \
98 _(1, UNMAP_ON_DESTROY, "unmap-on-destroy")
99
100typedef enum
101{
102#define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i),
103 foreach_clib_mem_heap_flag
104#undef _
105} clib_mem_heap_flag_t;
106
107typedef struct
108{
109 /* base address */
110 void *base;
111
112 /* dlmalloc mspace */
113 void *mspace;
114
115 /* heap size */
116 uword size;
117
118 /* page size (log2) */
119 clib_mem_page_sz_t log2_page_sz:8;
120
121 /* flags */
122 clib_mem_heap_flag_t flags:8;
123
124 /* name - _MUST_ be last */
125 char name[0];
126} clib_mem_heap_t;
127
Damjan Marion57d1ec02020-09-16 21:15:44 +0200128typedef struct
129{
Damjan Marionc63e2a42020-09-16 21:36:00 +0200130 /* log2 system page size */
131 clib_mem_page_sz_t log2_page_sz;
132
133 /* log2 system default hugepage size */
134 clib_mem_page_sz_t log2_default_hugepage_sz;
135
136 /* bitmap of available numa nodes */
137 u32 numa_node_bitmap;
138
Damjan Marion57d1ec02020-09-16 21:15:44 +0200139 /* per CPU heaps */
140 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
141
142 /* per NUMA heaps */
143 void *per_numa_mheaps[CLIB_MAX_NUMAS];
Damjan Marion6bfd0762020-09-11 22:16:53 +0200144
145 /* memory maps */
146 clib_mem_vm_map_hdr_t *first_map, *last_map;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200147
Damjan Marion70ae0a92020-10-26 10:39:30 +0100148 /* map lock */
149 u8 map_lock;
150
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200151 /* last error */
152 clib_error_t *error;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200153} clib_mem_main_t;
154
155extern clib_mem_main_t clib_mem_main;
156
Dave Baracha690fdb2020-01-21 12:34:55 -0500157/* Unspecified NUMA socket */
158#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +0100159
Damjan Marionbfa75d62020-10-06 17:46:06 +0200160always_inline clib_mem_heap_t *
Dave Baracha690fdb2020-01-21 12:34:55 -0500161clib_mem_get_per_cpu_heap (void)
162{
163 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200164 return clib_mem_main.per_cpu_mheaps[cpu];
Dave Baracha690fdb2020-01-21 12:34:55 -0500165}
166
167always_inline void *
Damjan Marionbfa75d62020-10-06 17:46:06 +0200168clib_mem_set_per_cpu_heap (void *new_heap)
Dave Baracha690fdb2020-01-21 12:34:55 -0500169{
170 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200171 void *old = clib_mem_main.per_cpu_mheaps[cpu];
172 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500173 return old;
174}
175
176always_inline void *
177clib_mem_get_per_numa_heap (u32 numa_id)
178{
Damjan Marion57d1ec02020-09-16 21:15:44 +0200179 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
180 return clib_mem_main.per_numa_mheaps[numa_id];
Dave Baracha690fdb2020-01-21 12:34:55 -0500181}
182
183always_inline void *
Damjan Marionbfa75d62020-10-06 17:46:06 +0200184clib_mem_set_per_numa_heap (void *new_heap)
Dave Baracha690fdb2020-01-21 12:34:55 -0500185{
186 int numa = os_get_numa_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200187 void *old = clib_mem_main.per_numa_mheaps[numa];
188 clib_mem_main.per_numa_mheaps[numa] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500189 return old;
190}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700191
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200192always_inline void
193clib_mem_set_thread_index (void)
194{
195 /*
196 * Find an unused slot in the per-cpu-mheaps array,
197 * and grab it for this thread. We need to be able to
198 * push/pop the thread heap without affecting other thread(s).
199 */
200 int i;
201 if (__os_thread_index != 0)
202 return;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200203 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
204 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
205 0, clib_mem_main.per_cpu_mheaps[0]))
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200206 {
207 os_set_thread_index (i);
208 break;
209 }
210 ASSERT (__os_thread_index > 0);
211}
212
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200213always_inline uword
214clib_mem_size_nocheck (void *p)
215{
Damjan Marion4537c302020-09-28 19:03:37 +0200216 size_t mspace_usable_size_with_delta (const void *p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200217 return mspace_usable_size_with_delta (p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200218}
219
Dave Barach241e5222016-10-13 10:53:26 -0400220/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400222clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
223 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224{
Damjan Marion4537c302020-09-28 19:03:37 +0200225 void *mspace_get_aligned (void *msp, unsigned long n_user_data_bytes,
226 unsigned long align, unsigned long align_offset);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200227 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
228 void *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229
230 if (align_offset > align)
231 {
232 if (align > 0)
233 align_offset %= align;
234 else
235 align_offset = align;
236 }
237
Damjan Marionbfa75d62020-10-06 17:46:06 +0200238 p = mspace_get_aligned (h->mspace, size, align, align_offset);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200239
240 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400241 {
242 if (os_out_of_memory_on_failure)
243 os_out_of_memory ();
244 return 0;
245 }
246
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200247 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400248 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700249}
250
Dave Barach241e5222016-10-13 10:53:26 -0400251/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252always_inline void *
253clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400254{
255 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400256 /* align_offset */ 0,
257 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400258}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700259
260always_inline void *
261clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400262{
Dave Barach241e5222016-10-13 10:53:26 -0400263 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
264 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400265}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266
Dave Barach241e5222016-10-13 10:53:26 -0400267/* Memory allocator which calls os_out_of_memory() when it fails */
268always_inline void *
269clib_mem_alloc_or_null (uword size)
270{
271 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
272 /* align_offset */ 0,
273 /* os_out_of_memory */ 0);
274}
275
276always_inline void *
277clib_mem_alloc_aligned_or_null (uword size, uword align)
278{
279 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
280 /* os_out_of_memory */ 0);
281}
282
283
284
Ed Warnickecb9cada2015-12-08 15:45:58 -0700285/* Memory allocator which panics when it fails.
286 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
287#define clib_mem_alloc_aligned_no_fail(size,align) \
288({ \
289 uword _clib_mem_alloc_size = (size); \
290 void * _clib_mem_alloc_p; \
291 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
292 if (! _clib_mem_alloc_p) \
293 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
294 _clib_mem_alloc_p; \
295})
296
297#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
298
299/* Alias to stack allocator for naming consistency. */
300#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
301
Dave Barachc3799992016-08-15 11:12:27 -0400302always_inline uword
303clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700304{
Damjan Marion4537c302020-09-28 19:03:37 +0200305 int mspace_is_heap_object (void *msp, void *p);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200306 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
307 return mspace_is_heap_object (h->mspace, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308}
309
Dave Barachc3799992016-08-15 11:12:27 -0400310always_inline void
311clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700312{
Damjan Marion4537c302020-09-28 19:03:37 +0200313 void mspace_put (void *msp, void *p_arg);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200314 clib_mem_heap_t *h = clib_mem_get_per_cpu_heap ();
315
Ed Warnickecb9cada2015-12-08 15:45:58 -0700316 /* Make sure object is in the correct heap. */
317 ASSERT (clib_mem_is_heap_object (p));
318
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200319 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
320
Damjan Marionbfa75d62020-10-06 17:46:06 +0200321 mspace_put (h->mspace, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322}
323
Dave Barachc3799992016-08-15 11:12:27 -0400324always_inline void *
325clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700326{
327 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400328 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700329 if (q)
330 {
331 uword copy_size;
332 if (old_size < new_size)
333 copy_size = old_size;
334 else
335 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500336 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700337 clib_mem_free (p);
338 }
339 return q;
340}
341
Dave Barachc3799992016-08-15 11:12:27 -0400342always_inline uword
343clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700344{
Dave Barach6a5adc32018-07-04 10:56:23 -0400345 ASSERT (clib_mem_is_heap_object (p));
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200346 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700347}
348
BenoƮt Ganne78af0a82019-04-29 17:27:24 +0200349always_inline void
350clib_mem_free_s (void *p)
351{
352 uword size = clib_mem_size (p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200353 CLIB_MEM_UNPOISON (p, size);
BenoƮt Ganne78af0a82019-04-29 17:27:24 +0200354 memset_s_inline (p, size, 0, size);
355 clib_mem_free (p);
356}
357
Damjan Marionbfa75d62020-10-06 17:46:06 +0200358always_inline clib_mem_heap_t *
Dave Barachc3799992016-08-15 11:12:27 -0400359clib_mem_get_heap (void)
360{
361 return clib_mem_get_per_cpu_heap ();
362}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700363
Damjan Marionbfa75d62020-10-06 17:46:06 +0200364always_inline clib_mem_heap_t *
365clib_mem_set_heap (clib_mem_heap_t * heap)
Dave Barachc3799992016-08-15 11:12:27 -0400366{
367 return clib_mem_set_per_cpu_heap (heap);
368}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700369
Damjan Marionbfa75d62020-10-06 17:46:06 +0200370void clib_mem_destroy_heap (clib_mem_heap_t * heap);
371clib_mem_heap_t *clib_mem_create_heap (void *base, uword size, int is_locked,
372 char *fmt, ...);
Damjan Marion4537c302020-09-28 19:03:37 +0200373
Damjan Marionc63e2a42020-09-16 21:36:00 +0200374void clib_mem_main_init ();
Damjan Marionbfa75d62020-10-06 17:46:06 +0200375void *clib_mem_init (void *base, uword size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200376void *clib_mem_init_with_page_size (uword memory_size,
377 clib_mem_page_sz_t log2_page_sz);
Dave Barach6a5adc32018-07-04 10:56:23 -0400378void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379
380void clib_mem_exit (void);
381
Ed Warnickecb9cada2015-12-08 15:45:58 -0700382void clib_mem_trace (int enable);
383
Dave Barachd67a4282019-06-15 12:46:13 -0400384int clib_mem_is_traced (void);
385
Dave Barachc3799992016-08-15 11:12:27 -0400386typedef struct
387{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700388 /* Total number of objects allocated. */
389 uword object_count;
390
391 /* Total allocated bytes. Bytes used and free.
392 used + free = total */
393 uword bytes_total, bytes_used, bytes_free;
394
395 /* Number of bytes used by mheap data structure overhead
396 (e.g. free lists, mheap header). */
397 uword bytes_overhead;
398
399 /* Amount of free space returned to operating system. */
400 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400401
Ed Warnickecb9cada2015-12-08 15:45:58 -0700402 /* For malloc which puts small objects in sbrk region and
403 large objects in mmap'ed regions. */
404 uword bytes_used_sbrk;
405 uword bytes_used_mmap;
406
407 /* Max. number of bytes in this heap. */
408 uword bytes_max;
409} clib_mem_usage_t;
410
Damjan Marionbfa75d62020-10-06 17:46:06 +0200411void clib_mem_get_heap_usage (clib_mem_heap_t * heap,
412 clib_mem_usage_t * usage);
Damjan Marion4537c302020-09-28 19:03:37 +0200413
Damjan Marionbfa75d62020-10-06 17:46:06 +0200414void *clib_mem_get_heap_base (clib_mem_heap_t * heap);
415uword clib_mem_get_heap_size (clib_mem_heap_t * heap);
416uword clib_mem_get_heap_free_space (clib_mem_heap_t * heap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700417
Dave Barachc3799992016-08-15 11:12:27 -0400418u8 *format_clib_mem_usage (u8 * s, va_list * args);
Damjan Marion4537c302020-09-28 19:03:37 +0200419u8 *format_clib_mem_heap (u8 * s, va_list * va);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200420u8 *format_clib_mem_page_stats (u8 * s, va_list * va);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700421
Damjan Marion01914ce2017-09-14 19:04:50 +0200422/* Allocate virtual address space. */
423always_inline void *
424clib_mem_vm_alloc (uword size)
425{
426 void *mmap_addr;
427 uword flags = MAP_PRIVATE;
428
429#ifdef MAP_ANONYMOUS
430 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700431#endif
432
Damjan Marion01914ce2017-09-14 19:04:50 +0200433 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
434 if (mmap_addr == (void *) -1)
435 mmap_addr = 0;
BenoƮt Ganne1557d9a2020-02-07 11:58:16 +0100436 else
437 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700438
Damjan Marion01914ce2017-09-14 19:04:50 +0200439 return mmap_addr;
440}
441
442always_inline void
443clib_mem_vm_free (void *addr, uword size)
444{
445 munmap (addr, size);
446}
447
Damjan Marion6bfd0762020-09-11 22:16:53 +0200448void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
449 uword size, int fd, uword offset, char *name);
Damjan Marion01914ce2017-09-14 19:04:50 +0200450
Damjan Marion6bfd0762020-09-11 22:16:53 +0200451void *clib_mem_vm_map (void *start, uword size,
452 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
453void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
454 char *fmt, ...);
455void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
456 char *fmt, ...);
457int clib_mem_vm_unmap (void *base);
458clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
459 hdr);
Damjan Marion01914ce2017-09-14 19:04:50 +0200460
Damjan Marion6bfd0762020-09-11 22:16:53 +0200461static_always_inline clib_mem_page_sz_t
462clib_mem_get_log2_page_size (void)
463{
464 return clib_mem_main.log2_page_sz;
465}
466
467static_always_inline uword
468clib_mem_get_page_size (void)
469{
470 return 1ULL << clib_mem_main.log2_page_sz;
471}
472
473static_always_inline clib_mem_page_sz_t
474clib_mem_get_log2_default_hugepage_size ()
475{
476 return clib_mem_main.log2_default_hugepage_sz;
477}
478
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200479int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200480uword clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200481uword clib_mem_get_default_hugepage_size (void);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200482clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
Damjan Marionb5095042020-09-11 22:13:46 +0200483uword clib_mem_vm_reserve (uword start, uword size,
484 clib_mem_page_sz_t log2_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200485u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
486 int n_pages);
Dave Barach2b793412020-08-28 10:39:00 -0400487void clib_mem_destroy (void);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200488int clib_mem_set_numa_affinity (u8 numa_node, int force);
489int clib_mem_set_default_numa_affinity ();
Damjan Marionb5095042020-09-11 22:13:46 +0200490void clib_mem_vm_randomize_va (uword * requested_va,
491 clib_mem_page_sz_t log2_page_size);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200492void mheap_trace (clib_mem_heap_t * v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400493uword clib_mem_trace_enable_disable (uword enable);
494void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700495
Damjan Marion6bfd0762020-09-11 22:16:53 +0200496always_inline uword
497clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
498{
499 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
500
501 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
502 log2_page_size = clib_mem_get_log2_page_size ();
503 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
504 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
505
506 return round_pow2 (size, 1ULL << log2_page_size);
507}
508
509typedef struct
510{
Damjan Marionbfa75d62020-10-06 17:46:06 +0200511 clib_mem_page_sz_t log2_page_sz;
512 uword total;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200513 uword mapped;
514 uword not_mapped;
515 uword per_numa[CLIB_MAX_NUMAS];
516 uword unknown;
517} clib_mem_page_stats_t;
518
519void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
520 uword n_pages, clib_mem_page_stats_t * stats);
521
522static_always_inline int
523vlib_mem_get_next_numa_node (int numa)
524{
525 clib_mem_main_t *mm = &clib_mem_main;
526 u32 bitmap = mm->numa_node_bitmap;
527
528 if (numa >= 0)
529 bitmap &= ~pow2_mask (numa + 1);
530 if (bitmap == 0)
531 return -1;
532
533 return count_trailing_zeros (bitmap);
534}
535
536static_always_inline clib_mem_page_sz_t
537clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
538{
539 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
540 return clib_mem_get_log2_page_size ();
541 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
542 return clib_mem_get_log2_default_hugepage_size ();
543 return log2_page_size;
544}
545
546static_always_inline uword
547clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
548{
Dave Barach27c35e32020-10-07 09:37:36 -0400549 return 1ULL << clib_mem_log2_page_size_validate (log2_page_size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200550}
551
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200552static_always_inline clib_error_t *
553clib_mem_get_last_error (void)
554{
555 return clib_mem_main.error;
556}
557
Damjan Marion0da81682020-12-22 14:58:56 +0100558/* bulk allocator */
559
560typedef void *clib_mem_bulk_handle_t;
561clib_mem_bulk_handle_t clib_mem_bulk_init (u32 elt_sz, u32 align,
562 u32 min_elts_per_chunk);
563void clib_mem_bulk_destroy (clib_mem_bulk_handle_t h);
564void *clib_mem_bulk_alloc (clib_mem_bulk_handle_t h);
565void clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p);
566u8 *format_clib_mem_bulk (u8 *s, va_list *args);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200567
Dave Barachc3799992016-08-15 11:12:27 -0400568#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700569
570#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400571
572/*
573 * fd.io coding-style-patch-verification: ON
574 *
575 * Local Variables:
576 * eval: (c-set-style "gnu")
577 * End:
578 */