blob: 1a813be68b6b66edc1ebe8a9a31ea62ad6400769 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
Ed Warnickecb9cada2015-12-08 15:45:58 -070048#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040049#include <vppinfra/string.h> /* memcpy, clib_memset */
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +020050#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070051
Damjan Marionce8debf2016-02-06 19:16:21 +010052#define CLIB_MAX_MHEAPS 256
Damjan Marion6bfd0762020-09-11 22:16:53 +020053#define CLIB_MAX_NUMAS 16
54#define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
Damjan Marion561ae5d2020-09-24 13:53:46 +020055#define CLIB_MEM_ERROR (-1)
Damjan Marion299571a2022-03-19 00:07:52 +010056#define CLIB_MEM_LOG2_MIN_ALIGN (3)
57#define CLIB_MEM_MIN_ALIGN (1 << CLIB_MEM_LOG2_MIN_ALIGN)
Dave Baracha690fdb2020-01-21 12:34:55 -050058
Damjan Marionb5095042020-09-11 22:13:46 +020059typedef enum
60{
61 CLIB_MEM_PAGE_SZ_UNKNOWN = 0,
62 CLIB_MEM_PAGE_SZ_DEFAULT = 1,
63 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE = 2,
64 CLIB_MEM_PAGE_SZ_4K = 12,
65 CLIB_MEM_PAGE_SZ_16K = 14,
66 CLIB_MEM_PAGE_SZ_64K = 16,
67 CLIB_MEM_PAGE_SZ_1M = 20,
68 CLIB_MEM_PAGE_SZ_2M = 21,
69 CLIB_MEM_PAGE_SZ_16M = 24,
70 CLIB_MEM_PAGE_SZ_32M = 25,
71 CLIB_MEM_PAGE_SZ_512M = 29,
72 CLIB_MEM_PAGE_SZ_1G = 30,
73 CLIB_MEM_PAGE_SZ_16G = 34,
74} clib_mem_page_sz_t;
75
Damjan Marion6bfd0762020-09-11 22:16:53 +020076typedef struct _clib_mem_vm_map_hdr
77{
78 /* base address */
79 uword base_addr;
80
81 /* number of pages */
82 uword num_pages;
83
84 /* page size (log2) */
85 clib_mem_page_sz_t log2_page_sz;
86
Damjan Marion5ef25162020-09-17 13:29:33 +020087 /* file descriptor, -1 if memory is not shared */
88 int fd;
89
Damjan Marion6bfd0762020-09-11 22:16:53 +020090 /* allocation mame */
91#define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
92 char name[CLIB_VM_MAP_HDR_NAME_MAX_LEN];
93
94 /* linked list */
95 struct _clib_mem_vm_map_hdr *prev, *next;
96} clib_mem_vm_map_hdr_t;
97
Damjan Marion299571a2022-03-19 00:07:52 +010098#define foreach_clib_mem_heap_flag \
99 _ (0, LOCKED, "locked") \
100 _ (1, UNMAP_ON_DESTROY, "unmap-on-destroy") \
101 _ (2, TRACED, "traced")
Damjan Marionbfa75d62020-10-06 17:46:06 +0200102
103typedef enum
104{
105#define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i),
106 foreach_clib_mem_heap_flag
107#undef _
108} clib_mem_heap_flag_t;
109
110typedef struct
111{
112 /* base address */
113 void *base;
114
115 /* dlmalloc mspace */
116 void *mspace;
117
118 /* heap size */
119 uword size;
120
121 /* page size (log2) */
122 clib_mem_page_sz_t log2_page_sz:8;
123
124 /* flags */
125 clib_mem_heap_flag_t flags:8;
126
127 /* name - _MUST_ be last */
128 char name[0];
129} clib_mem_heap_t;
130
Damjan Marion57d1ec02020-09-16 21:15:44 +0200131typedef struct
132{
Damjan Marionc63e2a42020-09-16 21:36:00 +0200133 /* log2 system page size */
134 clib_mem_page_sz_t log2_page_sz;
135
Damjan Marionff011b22021-09-21 11:38:04 +0200136 /* log2 default hugepage size */
Damjan Marionc63e2a42020-09-16 21:36:00 +0200137 clib_mem_page_sz_t log2_default_hugepage_sz;
138
Damjan Marionff011b22021-09-21 11:38:04 +0200139 /* log2 system default hugepage size */
140 clib_mem_page_sz_t log2_sys_default_hugepage_sz;
141
Damjan Marionc63e2a42020-09-16 21:36:00 +0200142 /* bitmap of available numa nodes */
143 u32 numa_node_bitmap;
144
Damjan Marion57d1ec02020-09-16 21:15:44 +0200145 /* per CPU heaps */
146 void *per_cpu_mheaps[CLIB_MAX_MHEAPS];
147
148 /* per NUMA heaps */
149 void *per_numa_mheaps[CLIB_MAX_NUMAS];
Damjan Marion6bfd0762020-09-11 22:16:53 +0200150
151 /* memory maps */
152 clib_mem_vm_map_hdr_t *first_map, *last_map;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200153
Damjan Marion70ae0a92020-10-26 10:39:30 +0100154 /* map lock */
155 u8 map_lock;
156
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200157 /* last error */
158 clib_error_t *error;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200159} clib_mem_main_t;
160
161extern clib_mem_main_t clib_mem_main;
162
Dave Baracha690fdb2020-01-21 12:34:55 -0500163/* Unspecified NUMA socket */
164#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +0100165
Damjan Marionbfa75d62020-10-06 17:46:06 +0200166always_inline clib_mem_heap_t *
Dave Baracha690fdb2020-01-21 12:34:55 -0500167clib_mem_get_per_cpu_heap (void)
168{
169 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200170 return clib_mem_main.per_cpu_mheaps[cpu];
Dave Baracha690fdb2020-01-21 12:34:55 -0500171}
172
173always_inline void *
Damjan Marionbfa75d62020-10-06 17:46:06 +0200174clib_mem_set_per_cpu_heap (void *new_heap)
Dave Baracha690fdb2020-01-21 12:34:55 -0500175{
176 int cpu = os_get_thread_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200177 void *old = clib_mem_main.per_cpu_mheaps[cpu];
178 clib_mem_main.per_cpu_mheaps[cpu] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500179 return old;
180}
181
182always_inline void *
183clib_mem_get_per_numa_heap (u32 numa_id)
184{
Damjan Marion57d1ec02020-09-16 21:15:44 +0200185 ASSERT (numa_id < ARRAY_LEN (clib_mem_main.per_numa_mheaps));
186 return clib_mem_main.per_numa_mheaps[numa_id];
Dave Baracha690fdb2020-01-21 12:34:55 -0500187}
188
189always_inline void *
Damjan Marionbfa75d62020-10-06 17:46:06 +0200190clib_mem_set_per_numa_heap (void *new_heap)
Dave Baracha690fdb2020-01-21 12:34:55 -0500191{
192 int numa = os_get_numa_index ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200193 void *old = clib_mem_main.per_numa_mheaps[numa];
194 clib_mem_main.per_numa_mheaps[numa] = new_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500195 return old;
196}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700197
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200198always_inline void
199clib_mem_set_thread_index (void)
200{
201 /*
202 * Find an unused slot in the per-cpu-mheaps array,
203 * and grab it for this thread. We need to be able to
204 * push/pop the thread heap without affecting other thread(s).
205 */
206 int i;
207 if (__os_thread_index != 0)
208 return;
Damjan Marion57d1ec02020-09-16 21:15:44 +0200209 for (i = 0; i < ARRAY_LEN (clib_mem_main.per_cpu_mheaps); i++)
210 if (clib_atomic_bool_cmp_and_swap (&clib_mem_main.per_cpu_mheaps[i],
211 0, clib_mem_main.per_cpu_mheaps[0]))
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +0200212 {
213 os_set_thread_index (i);
214 break;
215 }
216 ASSERT (__os_thread_index > 0);
217}
218
Dave Barach241e5222016-10-13 10:53:26 -0400219/* Memory allocator which calls os_out_of_memory() when it fails */
Damjan Marion299571a2022-03-19 00:07:52 +0100220void *clib_mem_alloc (uword size);
221void *clib_mem_alloc_aligned (uword size, uword align);
222void *clib_mem_alloc_or_null (uword size);
223void *clib_mem_alloc_aligned_or_null (uword size, uword align);
224void *clib_mem_realloc (void *p, uword new_size);
225void *clib_mem_realloc_aligned (void *p, uword new_size, uword align);
Dave Barach241e5222016-10-13 10:53:26 -0400226
Ed Warnickecb9cada2015-12-08 15:45:58 -0700227/* Memory allocator which panics when it fails.
228 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
229#define clib_mem_alloc_aligned_no_fail(size,align) \
230({ \
231 uword _clib_mem_alloc_size = (size); \
232 void * _clib_mem_alloc_p; \
233 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
234 if (! _clib_mem_alloc_p) \
235 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
236 _clib_mem_alloc_p; \
237})
238
239#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
240
241/* Alias to stack allocator for naming consistency. */
242#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
243
Damjan Marion299571a2022-03-19 00:07:52 +0100244uword clib_mem_is_heap_object (void *p);
245void clib_mem_free (void *p);
246uword clib_mem_size (void *p);
247void clib_mem_free_s (void *p);
BenoƮt Ganne78af0a82019-04-29 17:27:24 +0200248
Damjan Marionbfa75d62020-10-06 17:46:06 +0200249always_inline clib_mem_heap_t *
Dave Barachc3799992016-08-15 11:12:27 -0400250clib_mem_get_heap (void)
251{
252 return clib_mem_get_per_cpu_heap ();
253}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700254
Damjan Marionbfa75d62020-10-06 17:46:06 +0200255always_inline clib_mem_heap_t *
256clib_mem_set_heap (clib_mem_heap_t * heap)
Dave Barachc3799992016-08-15 11:12:27 -0400257{
258 return clib_mem_set_per_cpu_heap (heap);
259}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700260
Damjan Marionbfa75d62020-10-06 17:46:06 +0200261void clib_mem_destroy_heap (clib_mem_heap_t * heap);
262clib_mem_heap_t *clib_mem_create_heap (void *base, uword size, int is_locked,
263 char *fmt, ...);
Damjan Marion4537c302020-09-28 19:03:37 +0200264
Damjan Marionc63e2a42020-09-16 21:36:00 +0200265void clib_mem_main_init ();
Damjan Marionbfa75d62020-10-06 17:46:06 +0200266void *clib_mem_init (void *base, uword size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200267void *clib_mem_init_with_page_size (uword memory_size,
268 clib_mem_page_sz_t log2_page_sz);
Dave Barach6a5adc32018-07-04 10:56:23 -0400269void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270
271void clib_mem_exit (void);
272
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273void clib_mem_trace (int enable);
274
Dave Barachd67a4282019-06-15 12:46:13 -0400275int clib_mem_is_traced (void);
276
Dave Barachc3799992016-08-15 11:12:27 -0400277typedef struct
278{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700279 /* Total number of objects allocated. */
280 uword object_count;
281
282 /* Total allocated bytes. Bytes used and free.
283 used + free = total */
284 uword bytes_total, bytes_used, bytes_free;
285
286 /* Number of bytes used by mheap data structure overhead
287 (e.g. free lists, mheap header). */
288 uword bytes_overhead;
289
290 /* Amount of free space returned to operating system. */
291 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400292
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293 /* For malloc which puts small objects in sbrk region and
294 large objects in mmap'ed regions. */
295 uword bytes_used_sbrk;
296 uword bytes_used_mmap;
297
298 /* Max. number of bytes in this heap. */
299 uword bytes_max;
300} clib_mem_usage_t;
301
Damjan Marionbfa75d62020-10-06 17:46:06 +0200302void clib_mem_get_heap_usage (clib_mem_heap_t * heap,
303 clib_mem_usage_t * usage);
Damjan Marion4537c302020-09-28 19:03:37 +0200304
Damjan Marionbfa75d62020-10-06 17:46:06 +0200305void *clib_mem_get_heap_base (clib_mem_heap_t * heap);
306uword clib_mem_get_heap_size (clib_mem_heap_t * heap);
307uword clib_mem_get_heap_free_space (clib_mem_heap_t * heap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308
Dave Barachc3799992016-08-15 11:12:27 -0400309u8 *format_clib_mem_usage (u8 * s, va_list * args);
Damjan Marion4537c302020-09-28 19:03:37 +0200310u8 *format_clib_mem_heap (u8 * s, va_list * va);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200311u8 *format_clib_mem_page_stats (u8 * s, va_list * va);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700312
Damjan Marion01914ce2017-09-14 19:04:50 +0200313/* Allocate virtual address space. */
314always_inline void *
315clib_mem_vm_alloc (uword size)
316{
317 void *mmap_addr;
318 uword flags = MAP_PRIVATE;
319
320#ifdef MAP_ANONYMOUS
321 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322#endif
323
Damjan Marion01914ce2017-09-14 19:04:50 +0200324 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
325 if (mmap_addr == (void *) -1)
326 mmap_addr = 0;
BenoƮt Ganne1557d9a2020-02-07 11:58:16 +0100327 else
328 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700329
Damjan Marion01914ce2017-09-14 19:04:50 +0200330 return mmap_addr;
331}
332
333always_inline void
334clib_mem_vm_free (void *addr, uword size)
335{
336 munmap (addr, size);
337}
338
Damjan Marion6bfd0762020-09-11 22:16:53 +0200339void *clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
340 uword size, int fd, uword offset, char *name);
Damjan Marion01914ce2017-09-14 19:04:50 +0200341
Damjan Marion6bfd0762020-09-11 22:16:53 +0200342void *clib_mem_vm_map (void *start, uword size,
343 clib_mem_page_sz_t log2_page_size, char *fmt, ...);
344void *clib_mem_vm_map_stack (uword size, clib_mem_page_sz_t log2_page_size,
345 char *fmt, ...);
346void *clib_mem_vm_map_shared (void *start, uword size, int fd, uword offset,
347 char *fmt, ...);
348int clib_mem_vm_unmap (void *base);
349clib_mem_vm_map_hdr_t *clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *
350 hdr);
Damjan Marion01914ce2017-09-14 19:04:50 +0200351
Damjan Marion6bfd0762020-09-11 22:16:53 +0200352static_always_inline clib_mem_page_sz_t
353clib_mem_get_log2_page_size (void)
354{
355 return clib_mem_main.log2_page_sz;
356}
357
358static_always_inline uword
359clib_mem_get_page_size (void)
360{
361 return 1ULL << clib_mem_main.log2_page_sz;
362}
363
Damjan Marionff011b22021-09-21 11:38:04 +0200364static_always_inline void
365clib_mem_set_log2_default_hugepage_size (clib_mem_page_sz_t log2_page_sz)
366{
367 clib_mem_main.log2_default_hugepage_sz = log2_page_sz;
368}
369
Damjan Marion6bfd0762020-09-11 22:16:53 +0200370static_always_inline clib_mem_page_sz_t
371clib_mem_get_log2_default_hugepage_size ()
372{
373 return clib_mem_main.log2_default_hugepage_sz;
374}
375
Damjan Marionff011b22021-09-21 11:38:04 +0200376static_always_inline uword
377clib_mem_get_default_hugepage_size (void)
378{
379 return 1ULL << clib_mem_main.log2_default_hugepage_sz;
380}
381
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200382int clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200383uword clib_mem_get_fd_page_size (int fd);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200384clib_mem_page_sz_t clib_mem_get_fd_log2_page_size (int fd);
Damjan Marionb5095042020-09-11 22:13:46 +0200385uword clib_mem_vm_reserve (uword start, uword size,
386 clib_mem_page_sz_t log2_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200387u64 *clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
388 int n_pages);
Dave Barach2b793412020-08-28 10:39:00 -0400389void clib_mem_destroy (void);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200390int clib_mem_set_numa_affinity (u8 numa_node, int force);
391int clib_mem_set_default_numa_affinity ();
Damjan Marionb5095042020-09-11 22:13:46 +0200392void clib_mem_vm_randomize_va (uword * requested_va,
393 clib_mem_page_sz_t log2_page_size);
Damjan Marionbfa75d62020-10-06 17:46:06 +0200394void mheap_trace (clib_mem_heap_t * v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400395uword clib_mem_trace_enable_disable (uword enable);
396void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700397
Damjan Marion6bfd0762020-09-11 22:16:53 +0200398always_inline uword
399clib_mem_round_to_page_size (uword size, clib_mem_page_sz_t log2_page_size)
400{
401 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
402
403 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
404 log2_page_size = clib_mem_get_log2_page_size ();
405 else if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
406 log2_page_size = clib_mem_get_log2_default_hugepage_size ();
407
408 return round_pow2 (size, 1ULL << log2_page_size);
409}
410
411typedef struct
412{
Damjan Marionbfa75d62020-10-06 17:46:06 +0200413 clib_mem_page_sz_t log2_page_sz;
414 uword total;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200415 uword mapped;
416 uword not_mapped;
417 uword per_numa[CLIB_MAX_NUMAS];
418 uword unknown;
419} clib_mem_page_stats_t;
420
421void clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
422 uword n_pages, clib_mem_page_stats_t * stats);
423
424static_always_inline int
425vlib_mem_get_next_numa_node (int numa)
426{
427 clib_mem_main_t *mm = &clib_mem_main;
428 u32 bitmap = mm->numa_node_bitmap;
429
430 if (numa >= 0)
431 bitmap &= ~pow2_mask (numa + 1);
432 if (bitmap == 0)
433 return -1;
434
435 return count_trailing_zeros (bitmap);
436}
437
438static_always_inline clib_mem_page_sz_t
439clib_mem_log2_page_size_validate (clib_mem_page_sz_t log2_page_size)
440{
441 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT)
442 return clib_mem_get_log2_page_size ();
443 if (log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
444 return clib_mem_get_log2_default_hugepage_size ();
445 return log2_page_size;
446}
447
448static_always_inline uword
449clib_mem_page_bytes (clib_mem_page_sz_t log2_page_size)
450{
Dave Barach27c35e32020-10-07 09:37:36 -0400451 return 1ULL << clib_mem_log2_page_size_validate (log2_page_size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200452}
453
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200454static_always_inline clib_error_t *
455clib_mem_get_last_error (void)
456{
457 return clib_mem_main.error;
458}
459
Damjan Marion0da81682020-12-22 14:58:56 +0100460/* bulk allocator */
461
462typedef void *clib_mem_bulk_handle_t;
463clib_mem_bulk_handle_t clib_mem_bulk_init (u32 elt_sz, u32 align,
464 u32 min_elts_per_chunk);
465void clib_mem_bulk_destroy (clib_mem_bulk_handle_t h);
466void *clib_mem_bulk_alloc (clib_mem_bulk_handle_t h);
467void clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p);
468u8 *format_clib_mem_bulk (u8 *s, va_list *args);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200469
Dave Barachc3799992016-08-15 11:12:27 -0400470#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700471
472#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400473
474/*
475 * fd.io coding-style-patch-verification: ON
476 *
477 * Local Variables:
478 * eval: (c-set-style "gnu")
479 * End:
480 */