blob: 2fd4bfb5dbb44579cd6b91429020579d605c17c2 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
Ed Warnickecb9cada2015-12-08 15:45:58 -070048#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040049#include <vppinfra/string.h> /* memcpy, clib_memset */
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +020050#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070051
Damjan Marionce8debf2016-02-06 19:16:21 +010052#define CLIB_MAX_MHEAPS 256
Damjan Marion6bfd0762020-09-11 22:16:53 +020053#define CLIB_MAX_NUMAS 16
54#define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
Damjan Marion561ae5d2020-09-24 13:53:46 +020055#define CLIB_MEM_ERROR (-1)
Dave Baracha690fdb2020-01-21 12:34:55 -050056
Damjan Marionb5095042020-09-11 22:13:46 +020057typedef enum
58{
59 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
60 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
61 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
62 CLIB_MEM_PAGE_SZ_4K = 12,
63 CLIB_MEM_PAGE_SZ_16K = 14,
64 CLIB_MEM_PAGE_SZ_64K = 16,
65 CLIB_MEM_PAGE_SZ_1M = 20,
66 CLIB_MEM_PAGE_SZ_2M = 21,
67 CLIB_MEM_PAGE_SZ_16M = 24,
68 CLIB_MEM_PAGE_SZ_32M = 25,
69 CLIB_MEM_PAGE_SZ_512M = 29,
70 CLIB_MEM_PAGE_SZ_1G = 30,
71 CLIB_MEM_PAGE_SZ_16G = 34,
72} clib_mem_page_sz_t;
73
Damjan Marion6bfd0762020-09-11 22:16:53 +020074typedef struct _clib_mem_vm_map_hdr
75{
76 /* base address */
77 uword base_addr;
78
79 /* number of pages */
80 uword num_pages;
81
82 /* page size (log2) */
83 clib_mem_page_sz_t log2_page_sz;
84
Damjan Marion5ef25162020-09-17 13:29:33 +020085 /* file descriptor, -1 if memory is not shared */
86 int fd;
87
Damjan Marion6bfd0762020-09-11 22:16:53 +020088 /* allocation mame */
89#define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
90 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
91
92 /* linked list */
93 struct _clib_mem_vm_map_hdr *prev, *next;
94} clib_mem_vm_map_hdr_t;
95
Damjan Marion57d1ec02020-09-16 21:15:44 +020096typedef struct
97{
Damjan Marionc63e2a42020-09-16 21:36:00 +020098 /* log2 system page size */
99 clib_mem_page_sz_t log2_page_sz;
100
101 /* log2 system default hugepage size */
102 clib_mem_page_sz_t log2_default_hugepage_sz;
103
104 /* bitmap of available numa nodes */
105 u32 numa_node_bitmap;
106
Damjan Marion57d1ec02020-09-16 21:15:44 +0200107 /* per CPU heaps */
108 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
109
110 /* per NUMA heaps */
111 void *per_numa_mheaps[CLIB_MAX_NUMAS];
Damjan Marion6bfd0762020-09-11 22:16:53 +0200112
113 /* memory maps */
114 clib_mem_vm_map_hdr_t *first_map, *last_map;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200115
116 /* last error */
117 clib_error_t *error;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200118} clib_mem_main_t;
119
120extern clib_mem_main_t clib_mem_main;
121
Dave Baracha690fdb2020-01-21 12:34:55 -0500122/* Unspecified NUMA socket */
123#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +0100124
Dave Baracha690fdb2020-01-21 12:34:55 -0500125always_inline void *
126clib_mem_get_per_cpu_heap (void)
127{
128 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200129 return clib_mem_main.per_cpu_mheaps[cpu];
Dave Baracha690fdb2020-01-21 12:34:55 -0500130}
131
132always_inline void *
133clib_mem_set_per_cpu_heap (u8 * new_heap)
134{
135 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200136 void *old = clib_mem_main.per_cpu_mheaps[cpu];
137 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500138 return old;
139}
140
141always_inline void *
142clib_mem_get_per_numa_heap (u32 numa_id)
143{
Damjan Marion57d1ec02020-09-16 21:15:44 +0200144 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
145 return clib_mem_main.per_numa_mheaps[numa_id];
Dave Baracha690fdb2020-01-21 12:34:55 -0500146}
147
148always_inline void *
149clib_mem_set_per_numa_heap (u8 * new_heap)
150{
151 int numa = os_get_numa_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200152 void *old = clib_mem_main.per_numa_mheaps[numa];
153 clib_mem_main.per_numa_mheaps[numa] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500154 return old;
155}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200157always_inline void
158clib_mem_set_thread_index (void)
159{
160 /*
161 * Find an unused slot in the per-cpu-mheaps array,
162 * and grab it for this thread. We need to be able to
163 * push/pop the thread heap without affecting other thread(s).
164 */
165 int i;
166 if (__os_thread_index != 0)
167 return;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200168 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
169 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
170 0, clib_mem_main.per_cpu_mheaps[0]))
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200171 {
172 os_set_thread_index (i);
173 break;
174 }
175 ASSERT (__os_thread_index > 0);
176}
177
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200178always_inline uword
179clib_mem_size_nocheck (void *p)
180{
Damjan Marion4537c302020-09-28 19:03:37 +0200181 size_t mspace_usable_size_with_delta (const void *p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200182 return mspace_usable_size_with_delta (p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200183}
184
Dave Barach241e5222016-10-13 10:53:26 -0400185/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400187clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
188 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700189{
Dave Barachc3799992016-08-15 11:12:27 -0400190 void *heap, *p;
Dave Barach6a5adc32018-07-04 10:56:23 -0400191 uword cpu;
Damjan Marion4537c302020-09-28 19:03:37 +0200192 void *mspace_get_aligned (void *msp, unsigned long n_user_data_bytes,
193 unsigned long align, unsigned long align_offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700194
195 if (align_offset > align)
196 {
197 if (align > 0)
198 align_offset %= align;
199 else
200 align_offset = align;
201 }
202
Damjan Marionf55f9b82017-05-10 21:06:28 +0200203 cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200204 heap = clib_mem_main.per_cpu_mheaps[cpu];
Dave Barach6a5adc32018-07-04 10:56:23 -0400205
Dave Barach6a5adc32018-07-04 10:56:23 -0400206 p = mspace_get_aligned (heap, size, align, align_offset);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200207
208 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400209 {
210 if (os_out_of_memory_on_failure)
211 os_out_of_memory ();
212 return 0;
213 }
214
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200215 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400216 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700217}
218
Dave Barach241e5222016-10-13 10:53:26 -0400219/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700220always_inline void *
221clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400222{
223 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400224 /* align_offset */ 0,
225 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400226}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700227
228always_inline void *
229clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400230{
Dave Barach241e5222016-10-13 10:53:26 -0400231 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
232 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400233}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700234
Dave Barach241e5222016-10-13 10:53:26 -0400235/* Memory allocator which calls os_out_of_memory() when it fails */
236always_inline void *
237clib_mem_alloc_or_null (uword size)
238{
239 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
240 /* align_offset */ 0,
241 /* os_out_of_memory */ 0);
242}
243
244always_inline void *
245clib_mem_alloc_aligned_or_null (uword size, uword align)
246{
247 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
248 /* os_out_of_memory */ 0);
249}
250
251
252
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253/* Memory allocator which panics when it fails.
254 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
255#define clib_mem_alloc_aligned_no_fail(size,align) \
256({ \
257 uword _clib_mem_alloc_size = (size); \
258 void * _clib_mem_alloc_p; \
259 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
260 if (! _clib_mem_alloc_p) \
261 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
262 _clib_mem_alloc_p; \
263})
264
265#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
266
267/* Alias to stack allocator for naming consistency. */
268#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
269
Dave Barachc3799992016-08-15 11:12:27 -0400270always_inline uword
271clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272{
Dave Barach6a5adc32018-07-04 10:56:23 -0400273 void *heap = clib_mem_get_per_cpu_heap ();
Damjan Marion4537c302020-09-28 19:03:37 +0200274 int mspace_is_heap_object (void *msp, void *p);
Dave Barach6a5adc32018-07-04 10:56:23 -0400275
276 return mspace_is_heap_object (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700277}
278
Dave Barachc3799992016-08-15 11:12:27 -0400279always_inline void
280clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281{
Dave Barachc3799992016-08-15 11:12:27 -0400282 u8 *heap = clib_mem_get_per_cpu_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700283
Damjan Marion4537c302020-09-28 19:03:37 +0200284 void mspace_put (void *msp, void *p_arg);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700285 /* Make sure object is in the correct heap. */
286 ASSERT (clib_mem_is_heap_object (p));
287
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200288 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
289
Dave Barach6a5adc32018-07-04 10:56:23 -0400290 mspace_put (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700291}
292
Dave Barachc3799992016-08-15 11:12:27 -0400293always_inline void *
294clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700295{
296 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400297 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700298 if (q)
299 {
300 uword copy_size;
301 if (old_size < new_size)
302 copy_size = old_size;
303 else
304 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500305 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700306 clib_mem_free (p);
307 }
308 return q;
309}
310
Dave Barachc3799992016-08-15 11:12:27 -0400311always_inline uword
312clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700313{
Dave Barach6a5adc32018-07-04 10:56:23 -0400314 ASSERT (clib_mem_is_heap_object (p));
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200315 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700316}
317
BenoƮt Ganne78af0a82019-04-29 17:27:24 +0200318always_inline void
319clib_mem_free_s (void *p)
320{
321 uword size = clib_mem_size (p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200322 CLIB_MEM_UNPOISON (p, size);
BenoƮt Ganne78af0a82019-04-29 17:27:24 +0200323 memset_s_inline (p, size, 0, size);
324 clib_mem_free (p);
325}
326
Dave Barachc3799992016-08-15 11:12:27 -0400327always_inline void *
328clib_mem_get_heap (void)
329{
330 return clib_mem_get_per_cpu_heap ();
331}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700332
Dave Barachc3799992016-08-15 11:12:27 -0400333always_inline void *
334clib_mem_set_heap (void *heap)
335{
336 return clib_mem_set_per_cpu_heap (heap);
337}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700338
Damjan Marion4537c302020-09-28 19:03:37 +0200339void clib_mem_destroy_heap (void *heap);
340void *clib_mem_create_heap (void *base, uword size, int is_locked, char *fmt,
341 ...);
342
Damjan Marionc63e2a42020-09-16 21:36:00 +0200343void clib_mem_main_init ();
Dave Barachc3799992016-08-15 11:12:27 -0400344void *clib_mem_init (void *heap, uword size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200345void *clib_mem_init_with_page_size (uword memory_size,
346 clib_mem_page_sz_t log2_page_sz);
Dave Barach6a5adc32018-07-04 10:56:23 -0400347void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Florin Coras4c959952020-02-09 18:09:31 +0000348void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
349 u8 numa);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700350
351void clib_mem_exit (void);
352
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353void clib_mem_trace (int enable);
354
Dave Barachd67a4282019-06-15 12:46:13 -0400355int clib_mem_is_traced (void);
356
Dave Barachc3799992016-08-15 11:12:27 -0400357typedef struct
358{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700359 /* Total number of objects allocated. */
360 uword object_count;
361
362 /* Total allocated bytes. Bytes used and free.
363 used + free = total */
364 uword bytes_total, bytes_used, bytes_free;
365
366 /* Number of bytes used by mheap data structure overhead
367 (e.g. free lists, mheap header). */
368 uword bytes_overhead;
369
370 /* Amount of free space returned to operating system. */
371 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400372
Ed Warnickecb9cada2015-12-08 15:45:58 -0700373 /* For malloc which puts small objects in sbrk region and
374 large objects in mmap'ed regions. */
375 uword bytes_used_sbrk;
376 uword bytes_used_mmap;
377
378 /* Max. number of bytes in this heap. */
379 uword bytes_max;
380} clib_mem_usage_t;
381
Damjan Marion4537c302020-09-28 19:03:37 +0200382void clib_mem_get_heap_usage (void *heap, clib_mem_usage_t * usage);
383
384void *clib_mem_get_heap_base (void *heap);
385uword clib_mem_get_heap_size (void *heap);
386uword clib_mem_get_heap_free_space (void *heap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700387
Dave Barachc3799992016-08-15 11:12:27 -0400388u8 *format_clib_mem_usage (u8 * s, va_list * args);
Damjan Marion4537c302020-09-28 19:03:37 +0200389u8 *format_clib_mem_heap (u8 * s, va_list * va);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700390
Damjan Marion01914ce2017-09-14 19:04:50 +0200391/* Allocate virtual address space. */
392always_inline void *
393clib_mem_vm_alloc (uword size)
394{
395 void *mmap_addr;
396 uword flags = MAP_PRIVATE;
397
398#ifdef MAP_ANONYMOUS
399 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700400#endif
401
Damjan Marion01914ce2017-09-14 19:04:50 +0200402 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
403 if (mmap_addr == (void *) -1)
404 mmap_addr = 0;
BenoƮt Ganne1557d9a2020-02-07 11:58:16 +0100405 else
406 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700407
Damjan Marion01914ce2017-09-14 19:04:50 +0200408 return mmap_addr;
409}
410
411always_inline void
412clib_mem_vm_free (void *addr, uword size)
413{
414 munmap (addr, size);
415}
416
Damjan Marion6bfd0762020-09-11 22:16:53 +0200417void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
418 uword size, int fd, uword offset, char *name);
Damjan Marion01914ce2017-09-14 19:04:50 +0200419
Damjan Marion6bfd0762020-09-11 22:16:53 +0200420void *clib_mem_vm_map (void *start, uword size,
421 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
422void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
423 char *fmt, ...);
424void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
425 char *fmt, ...);
426int clib_mem_vm_unmap (void *base);
427clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
428 hdr);
Damjan Marion01914ce2017-09-14 19:04:50 +0200429
430typedef struct
431{
432#define CLIB_MEM_VM_F_SHARED (1 << 0)
433#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
434#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
435#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
436#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
Damjan Marion7b185362018-03-04 16:41:35 +0100437#define CLIB_MEM_VM_F_LOCKED (1 << 5)
Damjan Marion01914ce2017-09-14 19:04:50 +0200438 u32 flags; /**< vm allocation flags:
439 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
Florin Corasd3e83a92018-01-16 02:40:18 -0800440 descriptor will be provided on successful allocation.
Damjan Marion01914ce2017-09-14 19:04:50 +0200441 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
442 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
443 numa node preference.
444 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
445 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
446 number of available pages is not sufficient.
Damjan Marion7b185362018-03-04 16:41:35 +0100447 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
Damjan Marion01914ce2017-09-14 19:04:50 +0200448 */
449 char *name; /**< Name for memory allocation, set by caller. */
450 uword size; /**< Allocation size, set by caller. */
451 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
452 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800453 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
Damjan Marion01914ce2017-09-14 19:04:50 +0200454 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
455 int n_pages; /* Number of pages. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800456 uword requested_va; /**< Request fixed position mapping */
Damjan Marion01914ce2017-09-14 19:04:50 +0200457} clib_mem_vm_alloc_t;
458
Damjan Marion6bfd0762020-09-11 22:16:53 +0200459
460static_always_inline clib_mem_page_sz_t
461clib_mem_get_log2_page_size (void)
462{
463 return clib_mem_main.log2_page_sz;
464}
465
466static_always_inline uword
467clib_mem_get_page_size (void)
468{
469 return 1ULL << clib_mem_main.log2_page_sz;
470}
471
472static_always_inline clib_mem_page_sz_t
473clib_mem_get_log2_default_hugepage_size ()
474{
475 return clib_mem_main.log2_default_hugepage_sz;
476}
477
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200478int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
Damjan Marion01914ce2017-09-14 19:04:50 +0200479clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
Haiyang Tan642829d2018-10-09 19:09:45 -0700480void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200481uword clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200482uword clib_mem_get_default_hugepage_size (void);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200483clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
Damjan Marionb5095042020-09-11 22:13:46 +0200484uword clib_mem_vm_reserve (uword start, uword size,
485 clib_mem_page_sz_t log2_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200486u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
487 int n_pages);
Dave Barach2b793412020-08-28 10:39:00 -0400488void clib_mem_destroy (void);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200489int clib_mem_set_numa_affinity (u8 numa_node, int force);
490int clib_mem_set_default_numa_affinity ();
Damjan Marion01914ce2017-09-14 19:04:50 +0200491
Florin Corasd3e83a92018-01-16 02:40:18 -0800492typedef struct
493{
494 uword size; /**< Map size */
495 int fd; /**< File descriptor to be mapped */
496 uword requested_va; /**< Request fixed position mapping */
497 void *addr; /**< Pointer to mapped memory, if successful */
Florin Coras6fe89982020-02-07 23:28:41 +0000498 u8 numa_node;
Florin Corasd3e83a92018-01-16 02:40:18 -0800499} clib_mem_vm_map_t;
Florin Corasd3e83a92018-01-16 02:40:18 -0800500
Florin Corasb384b542018-01-15 01:08:33 -0800501clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
Damjan Marionb5095042020-09-11 22:13:46 +0200502void clib_mem_vm_randomize_va (uword * requested_va,
503 clib_mem_page_sz_t log2_page_size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400504void mheap_trace (void *v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400505uword clib_mem_trace_enable_disable (uword enable);
506void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700507
Damjan Marion6bfd0762020-09-11 22:16:53 +0200508always_inline uword
509clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
510{
511 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
512
513 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
514 log2_page_size = clib_mem_get_log2_page_size ();
515 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
516 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
517
518 return round_pow2 (size, 1ULL << log2_page_size);
519}
520
521typedef struct
522{
523 uword mapped;
524 uword not_mapped;
525 uword per_numa[CLIB_MAX_NUMAS];
526 uword unknown;
527} clib_mem_page_stats_t;
528
529void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
530 uword n_pages, clib_mem_page_stats_t * stats);
531
532static_always_inline int
533vlib_mem_get_next_numa_node (int numa)
534{
535 clib_mem_main_t *mm = &clib_mem_main;
536 u32 bitmap = mm->numa_node_bitmap;
537
538 if (numa >= 0)
539 bitmap &= ~pow2_mask (numa + 1);
540 if (bitmap == 0)
541 return -1;
542
543 return count_trailing_zeros (bitmap);
544}
545
546static_always_inline clib_mem_page_sz_t
547clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
548{
549 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
550 return clib_mem_get_log2_page_size ();
551 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
552 return clib_mem_get_log2_default_hugepage_size ();
553 return log2_page_size;
554}
555
556static_always_inline uword
557clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
558{
559 return 1 << clib_mem_log2_page_size_validate (log2_page_size);
560}
561
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200562static_always_inline clib_error_t *
563clib_mem_get_last_error (void)
564{
565 return clib_mem_main.error;
566}
567
Damjan Marion6bfd0762020-09-11 22:16:53 +0200568
Dave Barachc3799992016-08-15 11:12:27 -0400569#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700570
571#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400572
573/*
574 * fd.io coding-style-patch-verification: ON
575 *
576 * Local Variables:
577 * eval: (c-set-style "gnu")
578 * End:
579 */