blob: 9fc7f0efa761503c7c737e676479a127e0ee55e8 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
Dave Barach6a5adc32018-07-04 10:56:23 -040048#include <vppinfra/dlmalloc.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040049
Ed Warnickecb9cada2015-12-08 15:45:58 -070050#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040051#include <vppinfra/string.h> /* memcpy, clib_memset */
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +020052#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070053
Damjan Marionce8debf2016-02-06 19:16:21 +010054#define CLIB_MAX_MHEAPS 256
Damjan Marion6bfd0762020-09-11 22:16:53 +020055#define CLIB_MAX_NUMAS 16
56#define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
Damjan Marion561ae5d2020-09-24 13:53:46 +020057#define CLIB_MEM_ERROR (-1)
Dave Baracha690fdb2020-01-21 12:34:55 -050058
Damjan Marionb5095042020-09-11 22:13:46 +020059typedef enum
60{
61 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
62 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
63 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
64 CLIB_MEM_PAGE_SZ_4K = 12,
65 CLIB_MEM_PAGE_SZ_16K = 14,
66 CLIB_MEM_PAGE_SZ_64K = 16,
67 CLIB_MEM_PAGE_SZ_1M = 20,
68 CLIB_MEM_PAGE_SZ_2M = 21,
69 CLIB_MEM_PAGE_SZ_16M = 24,
70 CLIB_MEM_PAGE_SZ_32M = 25,
71 CLIB_MEM_PAGE_SZ_512M = 29,
72 CLIB_MEM_PAGE_SZ_1G = 30,
73 CLIB_MEM_PAGE_SZ_16G = 34,
74} clib_mem_page_sz_t;
75
Damjan Marion6bfd0762020-09-11 22:16:53 +020076typedef struct _clib_mem_vm_map_hdr
77{
78 /* base address */
79 uword base_addr;
80
81 /* number of pages */
82 uword num_pages;
83
84 /* page size (log2) */
85 clib_mem_page_sz_t log2_page_sz;
86
Damjan Marion5ef25162020-09-17 13:29:33 +020087 /* file descriptor, -1 if memory is not shared */
88 int fd;
89
Damjan Marion6bfd0762020-09-11 22:16:53 +020090 /* allocation mame */
91#define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
92 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
93
94 /* linked list */
95 struct _clib_mem_vm_map_hdr *prev, *next;
96} clib_mem_vm_map_hdr_t;
97
Damjan Marion57d1ec02020-09-16 21:15:44 +020098typedef struct
99{
Damjan Marionc63e2a42020-09-16 21:36:00 +0200100 /* log2 system page size */
101 clib_mem_page_sz_t log2_page_sz;
102
103 /* log2 system default hugepage size */
104 clib_mem_page_sz_t log2_default_hugepage_sz;
105
106 /* bitmap of available numa nodes */
107 u32 numa_node_bitmap;
108
Damjan Marion57d1ec02020-09-16 21:15:44 +0200109 /* per CPU heaps */
110 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
111
112 /* per NUMA heaps */
113 void *per_numa_mheaps[CLIB_MAX_NUMAS];
Damjan Marion6bfd0762020-09-11 22:16:53 +0200114
115 /* memory maps */
116 clib_mem_vm_map_hdr_t *first_map, *last_map;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200117
118 /* last error */
119 clib_error_t *error;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200120} clib_mem_main_t;
121
122extern clib_mem_main_t clib_mem_main;
123
Dave Baracha690fdb2020-01-21 12:34:55 -0500124/* Unspecified NUMA socket */
125#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +0100126
Dave Baracha690fdb2020-01-21 12:34:55 -0500127always_inline void *
128clib_mem_get_per_cpu_heap (void)
129{
130 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200131 return clib_mem_main.per_cpu_mheaps[cpu];
Dave Baracha690fdb2020-01-21 12:34:55 -0500132}
133
134always_inline void *
135clib_mem_set_per_cpu_heap (u8 * new_heap)
136{
137 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200138 void *old = clib_mem_main.per_cpu_mheaps[cpu];
139 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500140 return old;
141}
142
143always_inline void *
144clib_mem_get_per_numa_heap (u32 numa_id)
145{
Damjan Marion57d1ec02020-09-16 21:15:44 +0200146 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
147 return clib_mem_main.per_numa_mheaps[numa_id];
Dave Baracha690fdb2020-01-21 12:34:55 -0500148}
149
150always_inline void *
151clib_mem_set_per_numa_heap (u8 * new_heap)
152{
153 int numa = os_get_numa_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200154 void *old = clib_mem_main.per_numa_mheaps[numa];
155 clib_mem_main.per_numa_mheaps[numa] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500156 return old;
157}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700158
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200159always_inline void
160clib_mem_set_thread_index (void)
161{
162 /*
163 * Find an unused slot in the per-cpu-mheaps array,
164 * and grab it for this thread. We need to be able to
165 * push/pop the thread heap without affecting other thread(s).
166 */
167 int i;
168 if (__os_thread_index != 0)
169 return;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200170 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
171 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
172 0, clib_mem_main.per_cpu_mheaps[0]))
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200173 {
174 os_set_thread_index (i);
175 break;
176 }
177 ASSERT (__os_thread_index > 0);
178}
179
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200180always_inline uword
181clib_mem_size_nocheck (void *p)
182{
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200183 return mspace_usable_size_with_delta (p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200184}
185
Dave Barach241e5222016-10-13 10:53:26 -0400186/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700187always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400188clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
189 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700190{
Dave Barachc3799992016-08-15 11:12:27 -0400191 void *heap, *p;
Dave Barach6a5adc32018-07-04 10:56:23 -0400192 uword cpu;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193
194 if (align_offset > align)
195 {
196 if (align > 0)
197 align_offset %= align;
198 else
199 align_offset = align;
200 }
201
Damjan Marionf55f9b82017-05-10 21:06:28 +0200202 cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200203 heap = clib_mem_main.per_cpu_mheaps[cpu];
Dave Barach6a5adc32018-07-04 10:56:23 -0400204
Dave Barach6a5adc32018-07-04 10:56:23 -0400205 p = mspace_get_aligned (heap, size, align, align_offset);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200206
207 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400208 {
209 if (os_out_of_memory_on_failure)
210 os_out_of_memory ();
211 return 0;
212 }
213
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200214 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400215 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216}
217
Dave Barach241e5222016-10-13 10:53:26 -0400218/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700219always_inline void *
220clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400221{
222 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400223 /* align_offset */ 0,
224 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400225}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700226
227always_inline void *
228clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400229{
Dave Barach241e5222016-10-13 10:53:26 -0400230 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
231 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400232}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700233
Dave Barach241e5222016-10-13 10:53:26 -0400234/* Memory allocator which calls os_out_of_memory() when it fails */
235always_inline void *
236clib_mem_alloc_or_null (uword size)
237{
238 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
239 /* align_offset */ 0,
240 /* os_out_of_memory */ 0);
241}
242
243always_inline void *
244clib_mem_alloc_aligned_or_null (uword size, uword align)
245{
246 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
247 /* os_out_of_memory */ 0);
248}
249
250
251
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252/* Memory allocator which panics when it fails.
253 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
254#define clib_mem_alloc_aligned_no_fail(size,align) \
255({ \
256 uword _clib_mem_alloc_size = (size); \
257 void * _clib_mem_alloc_p; \
258 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
259 if (! _clib_mem_alloc_p) \
260 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
261 _clib_mem_alloc_p; \
262})
263
264#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
265
266/* Alias to stack allocator for naming consistency. */
267#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
268
Dave Barachc3799992016-08-15 11:12:27 -0400269always_inline uword
270clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700271{
Dave Barach6a5adc32018-07-04 10:56:23 -0400272 void *heap = clib_mem_get_per_cpu_heap ();
273
274 return mspace_is_heap_object (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700275}
276
Dave Barachc3799992016-08-15 11:12:27 -0400277always_inline void
278clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700279{
Dave Barachc3799992016-08-15 11:12:27 -0400280 u8 *heap = clib_mem_get_per_cpu_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281
282 /* Make sure object is in the correct heap. */
283 ASSERT (clib_mem_is_heap_object (p));
284
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200285 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
286
Dave Barach6a5adc32018-07-04 10:56:23 -0400287 mspace_put (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700288}
289
Dave Barachc3799992016-08-15 11:12:27 -0400290always_inline void *
291clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700292{
293 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400294 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700295 if (q)
296 {
297 uword copy_size;
298 if (old_size < new_size)
299 copy_size = old_size;
300 else
301 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500302 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700303 clib_mem_free (p);
304 }
305 return q;
306}
307
Dave Barachc3799992016-08-15 11:12:27 -0400308always_inline uword
309clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310{
Dave Barach6a5adc32018-07-04 10:56:23 -0400311 ASSERT (clib_mem_is_heap_object (p));
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200312 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700313}
314
BenoƮt Ganne78af0a82019-04-29 17:27:24 +0200315always_inline void
316clib_mem_free_s (void *p)
317{
318 uword size = clib_mem_size (p);
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +0200319 CLIB_MEM_UNPOISON (p, size);
BenoƮt Ganne78af0a82019-04-29 17:27:24 +0200320 memset_s_inline (p, size, 0, size);
321 clib_mem_free (p);
322}
323
Dave Barachc3799992016-08-15 11:12:27 -0400324always_inline void *
325clib_mem_get_heap (void)
326{
327 return clib_mem_get_per_cpu_heap ();
328}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700329
Dave Barachc3799992016-08-15 11:12:27 -0400330always_inline void *
331clib_mem_set_heap (void *heap)
332{
333 return clib_mem_set_per_cpu_heap (heap);
334}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700335
Damjan Marionc63e2a42020-09-16 21:36:00 +0200336void clib_mem_main_init ();
Dave Barachc3799992016-08-15 11:12:27 -0400337void *clib_mem_init (void *heap, uword size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200338void *clib_mem_init_with_page_size (uword memory_size,
339 clib_mem_page_sz_t log2_page_sz);
Dave Barach6a5adc32018-07-04 10:56:23 -0400340void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Florin Coras4c959952020-02-09 18:09:31 +0000341void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
342 u8 numa);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700343
344void clib_mem_exit (void);
345
Ed Warnickecb9cada2015-12-08 15:45:58 -0700346void clib_mem_validate (void);
347
348void clib_mem_trace (int enable);
349
Dave Barachd67a4282019-06-15 12:46:13 -0400350int clib_mem_is_traced (void);
351
Dave Barachc3799992016-08-15 11:12:27 -0400352typedef struct
353{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700354 /* Total number of objects allocated. */
355 uword object_count;
356
357 /* Total allocated bytes. Bytes used and free.
358 used + free = total */
359 uword bytes_total, bytes_used, bytes_free;
360
361 /* Number of bytes used by mheap data structure overhead
362 (e.g. free lists, mheap header). */
363 uword bytes_overhead;
364
365 /* Amount of free space returned to operating system. */
366 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400367
Ed Warnickecb9cada2015-12-08 15:45:58 -0700368 /* For malloc which puts small objects in sbrk region and
369 large objects in mmap'ed regions. */
370 uword bytes_used_sbrk;
371 uword bytes_used_mmap;
372
373 /* Max. number of bytes in this heap. */
374 uword bytes_max;
375} clib_mem_usage_t;
376
377void clib_mem_usage (clib_mem_usage_t * usage);
378
Dave Barachc3799992016-08-15 11:12:27 -0400379u8 *format_clib_mem_usage (u8 * s, va_list * args);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700380
Damjan Marion01914ce2017-09-14 19:04:50 +0200381/* Allocate virtual address space. */
382always_inline void *
383clib_mem_vm_alloc (uword size)
384{
385 void *mmap_addr;
386 uword flags = MAP_PRIVATE;
387
388#ifdef MAP_ANONYMOUS
389 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700390#endif
391
Damjan Marion01914ce2017-09-14 19:04:50 +0200392 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
393 if (mmap_addr == (void *) -1)
394 mmap_addr = 0;
BenoƮt Ganne1557d9a2020-02-07 11:58:16 +0100395 else
396 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700397
Damjan Marion01914ce2017-09-14 19:04:50 +0200398 return mmap_addr;
399}
400
401always_inline void
402clib_mem_vm_free (void *addr, uword size)
403{
404 munmap (addr, size);
405}
406
Damjan Marion6bfd0762020-09-11 22:16:53 +0200407void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
408 uword size, int fd, uword offset, char *name);
Damjan Marion01914ce2017-09-14 19:04:50 +0200409
Damjan Marion6bfd0762020-09-11 22:16:53 +0200410void *clib_mem_vm_map (void *start, uword size,
411 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
412void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
413 char *fmt, ...);
414void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
415 char *fmt, ...);
416int clib_mem_vm_unmap (void *base);
417clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
418 hdr);
Damjan Marion01914ce2017-09-14 19:04:50 +0200419
420typedef struct
421{
422#define CLIB_MEM_VM_F_SHARED (1 << 0)
423#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
424#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
425#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
426#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
Damjan Marion7b185362018-03-04 16:41:35 +0100427#define CLIB_MEM_VM_F_LOCKED (1 << 5)
Damjan Marion01914ce2017-09-14 19:04:50 +0200428 u32 flags; /**< vm allocation flags:
429 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
Florin Corasd3e83a92018-01-16 02:40:18 -0800430 descriptor will be provided on successful allocation.
Damjan Marion01914ce2017-09-14 19:04:50 +0200431 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
432 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
433 numa node preference.
434 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
435 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
436 number of available pages is not sufficient.
Damjan Marion7b185362018-03-04 16:41:35 +0100437 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
Damjan Marion01914ce2017-09-14 19:04:50 +0200438 */
439 char *name; /**< Name for memory allocation, set by caller. */
440 uword size; /**< Allocation size, set by caller. */
441 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
442 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800443 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
Damjan Marion01914ce2017-09-14 19:04:50 +0200444 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
445 int n_pages; /* Number of pages. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800446 uword requested_va; /**< Request fixed position mapping */
Damjan Marion01914ce2017-09-14 19:04:50 +0200447} clib_mem_vm_alloc_t;
448
Damjan Marion6bfd0762020-09-11 22:16:53 +0200449
450static_always_inline clib_mem_page_sz_t
451clib_mem_get_log2_page_size (void)
452{
453 return clib_mem_main.log2_page_sz;
454}
455
456static_always_inline uword
457clib_mem_get_page_size (void)
458{
459 return 1ULL << clib_mem_main.log2_page_sz;
460}
461
462static_always_inline clib_mem_page_sz_t
463clib_mem_get_log2_default_hugepage_size ()
464{
465 return clib_mem_main.log2_default_hugepage_sz;
466}
467
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200468int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
Damjan Marion01914ce2017-09-14 19:04:50 +0200469clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
Haiyang Tan642829d2018-10-09 19:09:45 -0700470void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200471uword clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200472uword clib_mem_get_default_hugepage_size (void);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200473clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
Damjan Marionb5095042020-09-11 22:13:46 +0200474uword clib_mem_vm_reserve (uword start, uword size,
475 clib_mem_page_sz_t log2_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200476u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
477 int n_pages);
Dave Barach2b793412020-08-28 10:39:00 -0400478void clib_mem_destroy_mspace (void *mspace);
479void clib_mem_destroy (void);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200480int clib_mem_set_numa_affinity (u8 numa_node, int force);
481int clib_mem_set_default_numa_affinity ();
Damjan Marion01914ce2017-09-14 19:04:50 +0200482
Florin Corasd3e83a92018-01-16 02:40:18 -0800483typedef struct
484{
485 uword size; /**< Map size */
486 int fd; /**< File descriptor to be mapped */
487 uword requested_va; /**< Request fixed position mapping */
488 void *addr; /**< Pointer to mapped memory, if successful */
Florin Coras6fe89982020-02-07 23:28:41 +0000489 u8 numa_node;
Florin Corasd3e83a92018-01-16 02:40:18 -0800490} clib_mem_vm_map_t;
Florin Corasd3e83a92018-01-16 02:40:18 -0800491
Florin Corasb384b542018-01-15 01:08:33 -0800492clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
Damjan Marionb5095042020-09-11 22:13:46 +0200493void clib_mem_vm_randomize_va (uword * requested_va,
494 clib_mem_page_sz_t log2_page_size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400495void mheap_trace (void *v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400496uword clib_mem_trace_enable_disable (uword enable);
497void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700498
Damjan Marion6bfd0762020-09-11 22:16:53 +0200499always_inline uword
500clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
501{
502 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
503
504 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
505 log2_page_size = clib_mem_get_log2_page_size ();
506 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
507 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
508
509 return round_pow2 (size, 1ULL << log2_page_size);
510}
511
512typedef struct
513{
514 uword mapped;
515 uword not_mapped;
516 uword per_numa[CLIB_MAX_NUMAS];
517 uword unknown;
518} clib_mem_page_stats_t;
519
520void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
521 uword n_pages, clib_mem_page_stats_t * stats);
522
523static_always_inline int
524vlib_mem_get_next_numa_node (int numa)
525{
526 clib_mem_main_t *mm = &clib_mem_main;
527 u32 bitmap = mm->numa_node_bitmap;
528
529 if (numa >= 0)
530 bitmap &= ~pow2_mask (numa + 1);
531 if (bitmap == 0)
532 return -1;
533
534 return count_trailing_zeros (bitmap);
535}
536
537static_always_inline clib_mem_page_sz_t
538clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
539{
540 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
541 return clib_mem_get_log2_page_size ();
542 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
543 return clib_mem_get_log2_default_hugepage_size ();
544 return log2_page_size;
545}
546
547static_always_inline uword
548clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
549{
550 return 1 << clib_mem_log2_page_size_validate (log2_page_size);
551}
552
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200553static_always_inline clib_error_t *
554clib_mem_get_last_error (void)
555{
556 return clib_mem_main.error;
557}
558
Damjan Marion6bfd0762020-09-11 22:16:53 +0200559
Dave Barachc3799992016-08-15 11:12:27 -0400560#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700561
562#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400563
564/*
565 * fd.io coding-style-patch-verification: ON
566 *
567 * Local Variables:
568 * eval: (c-set-style "gnu")
569 * End:
570 */