blob: e33ab37e877e4552fc4399b82c64b094b1ab1f0d [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
Ed Warnickecb9cada2015-12-08 15:45:58 -070048#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040049#include <vppinfra/string.h> /* memcpy, clib_memset */
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +020050#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070051
Damjan Marionce8debf2016-02-06 19:16:21 +010052#define CLIB_MAX_MHEAPS 256
Damjan Marion6bfd0762020-09-11 22:16:53 +020053#define CLIB_MAX_NUMAS 16
54#define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
Damjan Marion561ae5d2020-09-24 13:53:46 +020055#define CLIB_MEM_ERROR (-1)
Damjan Marion299571a2022-03-19 00:07:52 +010056#define CLIB_MEM_LOG2_MIN_ALIGN (3)
57#define CLIB_MEM_MIN_ALIGN (1 << CLIB_MEM_LOG2_MIN_ALIGN)
Dave Baracha690fdb2020-01-21 12:34:55 -050058
Damjan Marionb5095042020-09-11 22:13:46 +020059typedef enum
60{
61 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
62 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
63 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
64 CLIB_MEM_PAGE_SZ_4K = 12,
65 CLIB_MEM_PAGE_SZ_16K = 14,
66 CLIB_MEM_PAGE_SZ_64K = 16,
67 CLIB_MEM_PAGE_SZ_1M = 20,
68 CLIB_MEM_PAGE_SZ_2M = 21,
69 CLIB_MEM_PAGE_SZ_16M = 24,
70 CLIB_MEM_PAGE_SZ_32M = 25,
71 CLIB_MEM_PAGE_SZ_512M = 29,
72 CLIB_MEM_PAGE_SZ_1G = 30,
73 CLIB_MEM_PAGE_SZ_16G = 34,
74} clib_mem_page_sz_t;
75
Damjan Marion6bfd0762020-09-11 22:16:53 +020076typedef struct _clib_mem_vm_map_hdr
77{
78 /* base address */
79 uword base_addr;
80
81 /* number of pages */
82 uword num_pages;
83
84 /* page size (log2) */
85 clib_mem_page_sz_t log2_page_sz;
86
Damjan Marion5ef25162020-09-17 13:29:33 +020087 /* file descriptor, -1 if memory is not shared */
88 int fd;
89
Damjan Marion6bfd0762020-09-11 22:16:53 +020090 /* allocation mame */
91#define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
92 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
93
94 /* linked list */
95 struct _clib_mem_vm_map_hdr *prev, *next;
96} clib_mem_vm_map_hdr_t;
97
Damjan Marion299571a2022-03-19 00:07:52 +010098#define foreach_clib_mem_heap_flag \
99 _ (0, LOCKED, "locked") \
100 _ (1, UNMAP_ON_DESTROY, "unmap-on-destroy") \
101 _ (2, TRACED, "traced")
Damjan Marionbfa75d62020-10-06 17:46:06 +0200102
103typedef enum
104{
105#define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i),
106 foreach_clib_mem_heap_flag
107#undef _
108} clib_mem_heap_flag_t;
109
110typedef struct
111{
112 /* base address */
113 void *base;
114
115 /* dlmalloc mspace */
116 void *mspace;
117
118 /* heap size */
119 uword size;
120
121 /* page size (log2) */
122 clib_mem_page_sz_t log2_page_sz:8;
123
124 /* flags */
125 clib_mem_heap_flag_t flags:8;
126
127 /* name - _MUST_ be last */
128 char name[0];
129} clib_mem_heap_t;
130
Damjan Marion57d1ec02020-09-16 21:15:44 +0200131typedef struct
132{
Damjan Marionc63e2a42020-09-16 21:36:00 +0200133 /* log2 system page size */
134 clib_mem_page_sz_t log2_page_sz;
135
Damjan Marionff011b22021-09-21 11:38:04 +0200136 /* log2 default hugepage size */
Damjan Marionc63e2a42020-09-16 21:36:00 +0200137 clib_mem_page_sz_t log2_default_hugepage_sz;
138
Damjan Marionff011b22021-09-21 11:38:04 +0200139 /* log2 system default hugepage size */
140 clib_mem_page_sz_t log2_sys_default_hugepage_sz;
141
Damjan Marionc63e2a42020-09-16 21:36:00 +0200142 /* bitmap of available numa nodes */
143 u32 numa_node_bitmap;
144
Damjan Marion57d1ec02020-09-16 21:15:44 +0200145 /* per CPU heaps */
146 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
147
148 /* per NUMA heaps */
149 void *per_numa_mheaps[CLIB_MAX_NUMAS];
Damjan Marion6bfd0762020-09-11 22:16:53 +0200150
151 /* memory maps */
152 clib_mem_vm_map_hdr_t *first_map, *last_map;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200153
Damjan Marion70ae0a92020-10-26 10:39:30 +0100154 /* map lock */
155 u8 map_lock;
156
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200157 /* last error */
158 clib_error_t *error;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200159} clib_mem_main_t;
160
161extern clib_mem_main_t clib_mem_main;
162
Dave Baracha690fdb2020-01-21 12:34:55 -0500163/* Unspecified NUMA socket */
164#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +0100165
Damjan Marionbfa75d62020-10-06 17:46:06 +0200166always_inline clib_mem_heap_t *
Dave Baracha690fdb2020-01-21 12:34:55 -0500167clib_mem_get_per_cpu_heap (void)
168{
169 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200170 return clib_mem_main.per_cpu_mheaps[cpu];
Dave Baracha690fdb2020-01-21 12:34:55 -0500171}
172
173always_inline void *
Damjan Marionbfa75d62020-10-06 17:46:06 +0200174clib_mem_set_per_cpu_heap (void *new_heap)
Dave Baracha690fdb2020-01-21 12:34:55 -0500175{
176 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200177 void *old = clib_mem_main.per_cpu_mheaps[cpu];
178 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500179 return old;
180}
181
182always_inline void *
183clib_mem_get_per_numa_heap (u32 numa_id)
184{
Damjan Marion57d1ec02020-09-16 21:15:44 +0200185 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
186 return clib_mem_main.per_numa_mheaps[numa_id];
Dave Baracha690fdb2020-01-21 12:34:55 -0500187}
188
189always_inline void *
Damjan Marionbfa75d62020-10-06 17:46:06 +0200190clib_mem_set_per_numa_heap (void *new_heap)
Dave Baracha690fdb2020-01-21 12:34:55 -0500191{
192 int numa = os_get_numa_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200193 void *old = clib_mem_main.per_numa_mheaps[numa];
194 clib_mem_main.per_numa_mheaps[numa] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500195 return old;
196}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700197
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200198always_inline void
199clib_mem_set_thread_index (void)
200{
201 /*
202 * Find an unused slot in the per-cpu-mheaps array,
203 * and grab it for this thread. We need to be able to
204 * push/pop the thread heap without affecting other thread(s).
205 */
206 int i;
207 if (__os_thread_index != 0)
208 return;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200209 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
210 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
211 0, clib_mem_main.per_cpu_mheaps[0]))
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200212 {
213 os_set_thread_index (i);
214 break;
215 }
216 ASSERT (__os_thread_index > 0);
217}
218
Dave Barach241e5222016-10-13 10:53:26 -0400219/* Memory allocator which calls os_out_of_memory() when it fails */
Damjan Marion299571a2022-03-19 00:07:52 +0100220void *clib_mem_alloc (uword size);
221void *clib_mem_alloc_aligned (uword size, uword align);
222void *clib_mem_alloc_or_null (uword size);
223void *clib_mem_alloc_aligned_or_null (uword size, uword align);
224void *clib_mem_realloc (void *p, uword new_size);
225void *clib_mem_realloc_aligned (void *p, uword new_size, uword align);
Damjan Marion24738582022-03-31 15:12:20 +0200226uword clib_mem_is_heap_object (void *p);
227void clib_mem_free (void *p);
228
229void *clib_mem_heap_alloc (void *heap, uword size);
230void *clib_mem_heap_alloc_aligned (void *heap, uword size, uword align);
231void *clib_mem_heap_alloc_or_null (void *heap, uword size);
232void *clib_mem_heap_alloc_aligned_or_null (void *heap, uword size,
233 uword align);
234void *clib_mem_heap_realloc (void *heap, void *p, uword new_size);
235void *clib_mem_heap_realloc_aligned (void *heap, void *p, uword new_size,
236 uword align);
237uword clib_mem_heap_is_heap_object (void *heap, void *p);
238void clib_mem_heap_free (void *heap, void *p);
239
240uword clib_mem_size (void *p);
241void clib_mem_free_s (void *p);
Dave Barach241e5222016-10-13 10:53:26 -0400242
Ed Warnickecb9cada2015-12-08 15:45:58 -0700243/* Memory allocator which panics when it fails.
244 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
245#define clib_mem_alloc_aligned_no_fail(size,align) \
246({ \
247 uword _clib_mem_alloc_size = (size); \
248 void * _clib_mem_alloc_p; \
249 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
250 if (! _clib_mem_alloc_p) \
251 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
252 _clib_mem_alloc_p; \
253})
254
255#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
256
257/* Alias to stack allocator for naming consistency. */
258#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
259
Damjan Marionbfa75d62020-10-06 17:46:06 +0200260always_inline clib_mem_heap_t *
Dave Barachc3799992016-08-15 11:12:27 -0400261clib_mem_get_heap (void)
262{
263 return clib_mem_get_per_cpu_heap ();
264}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265
Damjan Marionbfa75d62020-10-06 17:46:06 +0200266always_inline clib_mem_heap_t *
267clib_mem_set_heap (clib_mem_heap_t * heap)
Dave Barachc3799992016-08-15 11:12:27 -0400268{
269 return clib_mem_set_per_cpu_heap (heap);
270}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700271
Damjan Marionbfa75d62020-10-06 17:46:06 +0200272void clib_mem_destroy_heap (clib_mem_heap_t * heap);
273clib_mem_heap_t *clib_mem_create_heap (void *base, uword size, int is_locked,
274 char *fmt, ...);
Damjan Marion4537c302020-09-28 19:03:37 +0200275
Damjan Marionc63e2a42020-09-16 21:36:00 +0200276void clib_mem_main_init ();
Damjan Marionbfa75d62020-10-06 17:46:06 +0200277void *clib_mem_init (void *base, uword size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200278void *clib_mem_init_with_page_size (uword memory_size,
279 clib_mem_page_sz_t log2_page_sz);
Dave Barach6a5adc32018-07-04 10:56:23 -0400280void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281
282void clib_mem_exit (void);
283
Ed Warnickecb9cada2015-12-08 15:45:58 -0700284void clib_mem_trace (int enable);
285
Dave Barachd67a4282019-06-15 12:46:13 -0400286int clib_mem_is_traced (void);
287
Dave Barachc3799992016-08-15 11:12:27 -0400288typedef struct
289{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290 /* Total number of objects allocated. */
291 uword object_count;
292
293 /* Total allocated bytes. Bytes used and free.
294 used + free = total */
295 uword bytes_total, bytes_used, bytes_free;
296
297 /* Number of bytes used by mheap data structure overhead
298 (e.g. free lists, mheap header). */
299 uword bytes_overhead;
300
301 /* Amount of free space returned to operating system. */
302 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400303
Ed Warnickecb9cada2015-12-08 15:45:58 -0700304 /* For malloc which puts small objects in sbrk region and
305 large objects in mmap'ed regions. */
306 uword bytes_used_sbrk;
307 uword bytes_used_mmap;
308
309 /* Max. number of bytes in this heap. */
310 uword bytes_max;
311} clib_mem_usage_t;
312
Damjan Marionbfa75d62020-10-06 17:46:06 +0200313void clib_mem_get_heap_usage (clib_mem_heap_t * heap,
314 clib_mem_usage_t * usage);
Damjan Marion4537c302020-09-28 19:03:37 +0200315
Damjan Marionbfa75d62020-10-06 17:46:06 +0200316void *clib_mem_get_heap_base (clib_mem_heap_t * heap);
317uword clib_mem_get_heap_size (clib_mem_heap_t * heap);
318uword clib_mem_get_heap_free_space (clib_mem_heap_t * heap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319
Dave Barachc3799992016-08-15 11:12:27 -0400320u8 *format_clib_mem_usage (u8 * s, va_list * args);
Damjan Marion4537c302020-09-28 19:03:37 +0200321u8 *format_clib_mem_heap (u8 * s, va_list * va);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200322u8 *format_clib_mem_page_stats (u8 * s, va_list * va);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700323
Damjan Marion01914ce2017-09-14 19:04:50 +0200324/* Allocate virtual address space. */
325always_inline void *
326clib_mem_vm_alloc (uword size)
327{
328 void *mmap_addr;
329 uword flags = MAP_PRIVATE;
330
331#ifdef MAP_ANONYMOUS
332 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333#endif
334
Damjan Marion01914ce2017-09-14 19:04:50 +0200335 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
336 if (mmap_addr == (void *) -1)
337 mmap_addr = 0;
BenoƮt Ganne1557d9a2020-02-07 11:58:16 +0100338 else
339 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700340
Damjan Marion01914ce2017-09-14 19:04:50 +0200341 return mmap_addr;
342}
343
344always_inline void
345clib_mem_vm_free (void *addr, uword size)
346{
347 munmap (addr, size);
348}
349
Damjan Marion6bfd0762020-09-11 22:16:53 +0200350void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
351 uword size, int fd, uword offset, char *name);
Damjan Marion01914ce2017-09-14 19:04:50 +0200352
Damjan Marion6bfd0762020-09-11 22:16:53 +0200353void *clib_mem_vm_map (void *start, uword size,
354 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
355void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
356 char *fmt, ...);
357void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
358 char *fmt, ...);
359int clib_mem_vm_unmap (void *base);
360clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
361 hdr);
Damjan Marion01914ce2017-09-14 19:04:50 +0200362
Damjan Marion6bfd0762020-09-11 22:16:53 +0200363static_always_inline clib_mem_page_sz_t
364clib_mem_get_log2_page_size (void)
365{
366 return clib_mem_main.log2_page_sz;
367}
368
369static_always_inline uword
370clib_mem_get_page_size (void)
371{
372 return 1ULL << clib_mem_main.log2_page_sz;
373}
374
Damjan Marionff011b22021-09-21 11:38:04 +0200375static_always_inline void
376clib_mem_set_log2_default_hugepage_size (clib_mem_page_sz_t log2_page_sz)
377{
378 clib_mem_main.log2_default_hugepage_sz = log2_page_sz;
379}
380
Damjan Marion6bfd0762020-09-11 22:16:53 +0200381static_always_inline clib_mem_page_sz_t
382clib_mem_get_log2_default_hugepage_size ()
383{
384 return clib_mem_main.log2_default_hugepage_sz;
385}
386
Damjan Marionff011b22021-09-21 11:38:04 +0200387static_always_inline uword
388clib_mem_get_default_hugepage_size (void)
389{
390 return 1ULL << clib_mem_main.log2_default_hugepage_sz;
391}
392
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200393int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200394uword clib_mem_get_fd_page_size (int fd);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200395clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
Damjan Marionb5095042020-09-11 22:13:46 +0200396uword clib_mem_vm_reserve (uword start, uword size,
397 clib_mem_page_sz_t log2_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200398u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
399 int n_pages);
Dave Barach2b793412020-08-28 10:39:00 -0400400void clib_mem_destroy (void);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200401int clib_mem_set_numa_affinity (u8 numa_node, int force);
402int clib_mem_set_default_numa_affinity ();
Damjan Marionb5095042020-09-11 22:13:46 +0200403void clib_mem_vm_randomize_va (uword * requested_va,
404 clib_mem_page_sz_t log2_page_size);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200405void mheap_trace (clib_mem_heap_t * v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400406uword clib_mem_trace_enable_disable (uword enable);
407void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700408
Damjan Marion6bfd0762020-09-11 22:16:53 +0200409always_inline uword
410clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
411{
412 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
413
414 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
415 log2_page_size = clib_mem_get_log2_page_size ();
416 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
417 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
418
419 return round_pow2 (size, 1ULL << log2_page_size);
420}
421
422typedef struct
423{
Damjan Marionbfa75d62020-10-06 17:46:06 +0200424 clib_mem_page_sz_t log2_page_sz;
425 uword total;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200426 uword mapped;
427 uword not_mapped;
428 uword per_numa[CLIB_MAX_NUMAS];
429 uword unknown;
430} clib_mem_page_stats_t;
431
432void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
433 uword n_pages, clib_mem_page_stats_t * stats);
434
435static_always_inline int
436vlib_mem_get_next_numa_node (int numa)
437{
438 clib_mem_main_t *mm = &clib_mem_main;
439 u32 bitmap = mm->numa_node_bitmap;
440
441 if (numa >= 0)
442 bitmap &= ~pow2_mask (numa + 1);
443 if (bitmap == 0)
444 return -1;
445
446 return count_trailing_zeros (bitmap);
447}
448
449static_always_inline clib_mem_page_sz_t
450clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
451{
452 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
453 return clib_mem_get_log2_page_size ();
454 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
455 return clib_mem_get_log2_default_hugepage_size ();
456 return log2_page_size;
457}
458
459static_always_inline uword
460clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
461{
Dave Barach27c35e32020-10-07 09:37:36 -0400462 return 1ULL << clib_mem_log2_page_size_validate (log2_page_size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200463}
464
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200465static_always_inline clib_error_t *
466clib_mem_get_last_error (void)
467{
468 return clib_mem_main.error;
469}
470
Damjan Marion0da81682020-12-22 14:58:56 +0100471/* bulk allocator */
472
473typedef void *clib_mem_bulk_handle_t;
474clib_mem_bulk_handle_t clib_mem_bulk_init (u32 elt_sz, u32 align,
475 u32 min_elts_per_chunk);
476void clib_mem_bulk_destroy (clib_mem_bulk_handle_t h);
477void *clib_mem_bulk_alloc (clib_mem_bulk_handle_t h);
478void clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p);
479u8 *format_clib_mem_bulk (u8 *s, va_list *args);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200480
Dave Barachc3799992016-08-15 11:12:27 -0400481#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700482
483#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400484
485/*
486 * fd.io coding-style-patch-verification: ON
487 *
488 * Local Variables:
489 * eval: (c-set-style "gnu")
490 * End:
491 */