blob: 85b9db9d56c59391f6751487ff1aa074c2c10654 [file] [log] [blame]
Damjan Marion68b4da62018-09-30 18:26:20 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
Tom Jones97188d72024-02-02 14:25:24 +000020#ifdef __FreeBSD__
21#include <sys/memrange.h>
22#endif /* __FreeBSD__ */
Damjan Marion68b4da62018-09-30 18:26:20 +020023#include <fcntl.h>
Artem Belovf6defa12019-02-26 01:47:34 +000024#include <unistd.h>
Damjan Marion1ee346a2019-03-18 17:06:51 +010025#include <sched.h>
Damjan Marion68b4da62018-09-30 18:26:20 +020026
27#include <vppinfra/format.h>
Damjan Marion3eb6cbe2024-02-12 19:44:58 +000028#ifdef __linux__
Damjan Marion68b4da62018-09-30 18:26:20 +020029#include <vppinfra/linux/sysfs.h>
Damjan Marion3eb6cbe2024-02-12 19:44:58 +000030#endif
Damjan Marion68b4da62018-09-30 18:26:20 +020031#include <vppinfra/mem.h>
32#include <vppinfra/hash.h>
33#include <vppinfra/pmalloc.h>
Damjan Marionf8cb7012020-10-09 17:16:55 +020034#include <vppinfra/cpu.h>
Damjan Marion68b4da62018-09-30 18:26:20 +020035
36#if __SIZEOF_POINTER__ >= 8
37#define DEFAULT_RESERVED_MB 16384
38#else
39#define DEFAULT_RESERVED_MB 256
40#endif
41
42static inline clib_pmalloc_chunk_t *
43get_chunk (clib_pmalloc_page_t * pp, u32 index)
44{
45 return pool_elt_at_index (pp->chunks, index);
46}
47
Damjan Marion567e61d2018-10-24 17:08:26 +020048static inline uword
49pmalloc_size2pages (uword size, u32 log2_page_sz)
50{
51 return round_pow2 (size, 1ULL << log2_page_sz) >> log2_page_sz;
52}
53
Damjan Mariondae1c7e2020-10-17 13:32:25 +020054__clib_export int
Damjan Marion5a6c8092019-02-21 14:44:59 +010055clib_pmalloc_init (clib_pmalloc_main_t * pm, uword base_addr, uword size)
Damjan Marion68b4da62018-09-30 18:26:20 +020056{
Dave Barach16e4a4a2020-04-16 12:00:14 -040057 uword base, pagesize;
Damjan Marionc04e2b02018-10-25 15:56:04 +020058 u64 *pt = 0;
Damjan Marion68b4da62018-09-30 18:26:20 +020059
60 ASSERT (pm->error == 0);
61
Damjan Marion9787f5f2018-10-24 12:56:32 +020062 pagesize = clib_mem_get_default_hugepage_size ();
Damjan Marion567e61d2018-10-24 17:08:26 +020063 pm->def_log2_page_sz = min_log2 (pagesize);
Damjan Marion567e61d2018-10-24 17:08:26 +020064 pm->lookup_log2_page_sz = pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020065
Damjan Marionc04e2b02018-10-25 15:56:04 +020066 /* check if pagemap is accessible */
Damjan Marion6bfd0762020-09-11 22:16:53 +020067 pt = clib_mem_vm_get_paddr (&pt, CLIB_MEM_PAGE_SZ_DEFAULT, 1);
Damjan Marionc04e2b02018-10-25 15:56:04 +020068 if (pt == 0 || pt[0] == 0)
69 pm->flags |= CLIB_PMALLOC_F_NO_PAGEMAP;
70
Damjan Marion68b4da62018-09-30 18:26:20 +020071 size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
72 size = round_pow2 (size, pagesize);
73
Damjan Marion567e61d2018-10-24 17:08:26 +020074 pm->max_pages = size >> pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020075
Dave Barach16e4a4a2020-04-16 12:00:14 -040076 base = clib_mem_vm_reserve (base_addr, size, pm->def_log2_page_sz);
Damjan Marion5a6c8092019-02-21 14:44:59 +010077
Dave Barach16e4a4a2020-04-16 12:00:14 -040078 if (base == ~0)
Damjan Marion68b4da62018-09-30 18:26:20 +020079 {
Dave Barach16e4a4a2020-04-16 12:00:14 -040080 pm->error = clib_error_return (0, "failed to reserve %u pages",
81 pm->max_pages);
Damjan Marion68b4da62018-09-30 18:26:20 +020082 return -1;
83 }
84
Dave Barach16e4a4a2020-04-16 12:00:14 -040085 pm->base = uword_to_pointer (base, void *);
Damjan Marion68b4da62018-09-30 18:26:20 +020086 return 0;
87}
88
89static inline void *
90alloc_chunk_from_page (clib_pmalloc_main_t * pm, clib_pmalloc_page_t * pp,
91 u32 n_blocks, u32 block_align, u32 numa_node)
92{
Damjan Marion567e61d2018-10-24 17:08:26 +020093 clib_pmalloc_chunk_t *c = 0;
94 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +020095 void *va;
96 u32 off;
97 u32 alloc_chunk_index;
98
Damjan Marion567e61d2018-10-24 17:08:26 +020099 a = pool_elt_at_index (pm->arenas, pp->arena_index);
100
Damjan Marion68b4da62018-09-30 18:26:20 +0200101 if (pp->chunks == 0)
102 {
Damjan Marion567e61d2018-10-24 17:08:26 +0200103 u32 i, start = 0, prev = ~0;
104
105 for (i = 0; i < a->subpages_per_page; i++)
106 {
107 pool_get (pp->chunks, c);
108 c->start = start;
109 c->prev = prev;
110 c->size = pp->n_free_blocks / a->subpages_per_page;
111 start += c->size;
112 if (prev == ~0)
113 pp->first_chunk_index = c - pp->chunks;
114 else
115 pp->chunks[prev].next = c - pp->chunks;
116 prev = c - pp->chunks;
117 }
118 c->next = ~0;
119 pp->n_free_chunks = a->subpages_per_page;
Damjan Marion68b4da62018-09-30 18:26:20 +0200120 }
121
Damjan Marion78c0ff72019-01-23 12:50:24 +0100122 if (pp->n_free_blocks < n_blocks)
123 return 0;
124
Damjan Marion68b4da62018-09-30 18:26:20 +0200125 alloc_chunk_index = pp->first_chunk_index;
126
127next_chunk:
128 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
129 off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
130
131 if (c->used || n_blocks + off > c->size)
132 {
133 if (c->next == ~0)
134 return 0;
135 alloc_chunk_index = c->next;
136 goto next_chunk;
137 }
138
139 /* if alignment is needed create new empty chunk */
140 if (off)
141 {
142 u32 offset_chunk_index;
143 clib_pmalloc_chunk_t *co;
144 pool_get (pp->chunks, c);
145 pp->n_free_chunks++;
146 offset_chunk_index = alloc_chunk_index;
147 alloc_chunk_index = c - pp->chunks;
148
149 co = pool_elt_at_index (pp->chunks, offset_chunk_index);
150 c->size = co->size - off;
151 c->next = co->next;
152 c->start = co->start + off;
153 c->prev = offset_chunk_index;
154 co->size = off;
155 co->next = alloc_chunk_index;
156 }
157
158 c->used = 1;
159 if (c->size > n_blocks)
160 {
161 u32 tail_chunk_index;
162 clib_pmalloc_chunk_t *ct;
163 pool_get (pp->chunks, ct);
164 pp->n_free_chunks++;
165 tail_chunk_index = ct - pp->chunks;
166 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
167 ct->size = c->size - n_blocks;
168 ct->next = c->next;
169 ct->prev = alloc_chunk_index;
170 ct->start = c->start + n_blocks;
171
172 c->size = n_blocks;
173 c->next = tail_chunk_index;
174 if (ct->next != ~0)
175 pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
176 }
177 else if (c->next != ~0)
178 pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
179
180 c = get_chunk (pp, alloc_chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200181 va = pm->base + ((pp - pm->pages) << pm->def_log2_page_sz) +
Damjan Marion68b4da62018-09-30 18:26:20 +0200182 (c->start << PMALLOC_LOG2_BLOCK_SZ);
183 hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
184 pp->n_free_blocks -= n_blocks;
185 pp->n_free_chunks--;
186 return va;
187}
188
Damjan Marion567e61d2018-10-24 17:08:26 +0200189static void
Tom Jones97188d72024-02-02 14:25:24 +0000190pmalloc_update_lookup_table (clib_pmalloc_main_t *pm, u32 first, u32 count)
Damjan Marion567e61d2018-10-24 17:08:26 +0200191{
Tom Jones97188d72024-02-02 14:25:24 +0000192#ifdef __linux
Damjan Marion567e61d2018-10-24 17:08:26 +0200193 uword seek, va, pa, p;
194 int fd;
195 u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
196
197 vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
198 elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
199
Dave Barach96e2d442018-11-14 11:42:03 -0500200 p = (uword) first *elts_per_page;
Damjan Marionc04e2b02018-10-25 15:56:04 +0200201 if (pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP)
202 {
Damjan Marion878b65a2018-10-26 10:29:35 +0200203 while (p < (uword) elts_per_page * count)
Damjan Marionc04e2b02018-10-25 15:56:04 +0200204 {
205 pm->lookup_table[p] = pointer_to_uword (pm->base) +
206 (p << pm->lookup_log2_page_sz);
207 p++;
208 }
209 return;
210 }
211
212 fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
Damjan Marion878b65a2018-10-26 10:29:35 +0200213 while (p < (uword) elts_per_page * count)
Damjan Marion567e61d2018-10-24 17:08:26 +0200214 {
215 va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
Damjan Marion878b65a2018-10-26 10:29:35 +0200216 pa = 0;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200217 seek = (va >> clib_mem_get_log2_page_size ()) * sizeof (pa);
Damjan Marion567e61d2018-10-24 17:08:26 +0200218 if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
219 read (fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
220 pa & (1ULL << 63) /* page present bit */ )
221 {
Damjan Marion6bfd0762020-09-11 22:16:53 +0200222 pa = (pa & pow2_mask (55)) << clib_mem_get_log2_page_size ();
Damjan Marion567e61d2018-10-24 17:08:26 +0200223 }
224 pm->lookup_table[p] = va - pa;
225 p++;
226 }
227
228 if (fd != -1)
229 close (fd);
Tom Jones97188d72024-02-02 14:25:24 +0000230#elif defined(__FreeBSD__)
231 struct mem_extract meme;
232 uword p;
233 int fd;
234 u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
235
236 vec_validate_aligned (pm->lookup_table,
237 vec_len (pm->pages) * elts_per_page - 1,
238 CLIB_CACHE_LINE_BYTES);
239
240 p = (uword) first * elts_per_page;
241 if (pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP)
242 {
243 while (p < (uword) elts_per_page * count)
244 {
245 pm->lookup_table[p] =
246 pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
247 p++;
248 }
249 return;
250 }
251
252 fd = open ((char *) "/dev/mem", O_RDONLY);
253 if (fd == -1)
254 return;
255
256 while (p < (uword) elts_per_page * count)
257 {
258 meme.me_vaddr =
259 pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
260 if (ioctl (fd, MEM_EXTRACT_PADDR, &meme) == -1)
261 continue;
262 pm->lookup_table[p] = meme.me_vaddr - meme.me_paddr;
263 p++;
264 }
265 return;
266#else
267#error "Unsupported OS"
268#endif
Damjan Marion567e61d2018-10-24 17:08:26 +0200269}
270
Damjan Marion68b4da62018-09-30 18:26:20 +0200271static inline clib_pmalloc_page_t *
272pmalloc_map_pages (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
273 u32 numa_node, u32 n_pages)
274{
Damjan Marionf8cb7012020-10-09 17:16:55 +0200275 clib_mem_page_stats_t stats = {};
Damjan Marion68b4da62018-09-30 18:26:20 +0200276 clib_pmalloc_page_t *pp = 0;
Damjan Marionf8cb7012020-10-09 17:16:55 +0200277 int rv, i, mmap_flags;
Damjan Marion801c7012019-10-30 18:07:35 +0100278 void *va = MAP_FAILED;
Damjan Marion567e61d2018-10-24 17:08:26 +0200279 uword size = (uword) n_pages << pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +0200280
281 clib_error_free (pm->error);
282
283 if (pm->max_pages <= vec_len (pm->pages))
284 {
285 pm->error = clib_error_return (0, "maximum number of pages reached");
286 return 0;
287 }
288
Tom Jonescb3372d2024-01-26 17:34:51 +0000289#ifdef __linux__
Damjan Marion6bfd0762020-09-11 22:16:53 +0200290 if (a->log2_subpage_sz != clib_mem_get_log2_page_size ())
Damjan Marion567e61d2018-10-24 17:08:26 +0200291 {
292 pm->error = clib_sysfs_prealloc_hugepages (numa_node,
293 a->log2_subpage_sz, n_pages);
Damjan Marion68b4da62018-09-30 18:26:20 +0200294
Damjan Marion567e61d2018-10-24 17:08:26 +0200295 if (pm->error)
296 return 0;
297 }
Tom Jonescb3372d2024-01-26 17:34:51 +0000298#endif /* __linux__ */
Damjan Marion68b4da62018-09-30 18:26:20 +0200299
Damjan Marionf8cb7012020-10-09 17:16:55 +0200300 rv = clib_mem_set_numa_affinity (numa_node, /* force */ 1);
301 if (rv == CLIB_MEM_ERROR && numa_node != 0)
Damjan Marion68b4da62018-09-30 18:26:20 +0200302 {
303 pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
304 "numa node %u", numa_node);
305 return 0;
306 }
307
Damjan Marion54e8e392018-11-07 17:55:26 +0100308 mmap_flags = MAP_FIXED;
Damjan Marionc04e2b02018-10-25 15:56:04 +0200309
Damjan Marion68b4da62018-09-30 18:26:20 +0200310 if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
311 {
312 mmap_flags |= MAP_SHARED;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200313 a->fd = clib_mem_vm_create_fd (a->log2_subpage_sz, "%s", a->name);
Damjan Marion68b4da62018-09-30 18:26:20 +0200314 if (a->fd == -1)
315 goto error;
Damjan Marion54e8e392018-11-07 17:55:26 +0100316 if ((ftruncate (a->fd, size)) == -1)
317 goto error;
Damjan Marion68b4da62018-09-30 18:26:20 +0200318 }
319 else
320 {
Tom Jones77ce67f2024-01-26 14:15:54 +0000321#ifdef __linux__
Damjan Marion6bfd0762020-09-11 22:16:53 +0200322 if (a->log2_subpage_sz != clib_mem_get_log2_page_size ())
Damjan Marion8ebd7922018-11-28 10:46:03 +0100323 mmap_flags |= MAP_HUGETLB;
Tom Jones77ce67f2024-01-26 14:15:54 +0000324#endif /* __linux__ */
Damjan Marion8ebd7922018-11-28 10:46:03 +0100325
Damjan Marion54e8e392018-11-07 17:55:26 +0100326 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
Damjan Marion68b4da62018-09-30 18:26:20 +0200327 a->fd = -1;
328 }
329
Damjan Marion567e61d2018-10-24 17:08:26 +0200330 va = pm->base + (((uword) vec_len (pm->pages)) << pm->def_log2_page_sz);
331 if (mmap (va, size, PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0) ==
332 MAP_FAILED)
Damjan Marion68b4da62018-09-30 18:26:20 +0200333 {
334 pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
335 "fd %d numa %d flags 0x%x", n_pages,
336 va, a->fd, numa_node, mmap_flags);
Andrew Yourtchenko9ce35232019-11-18 10:23:54 +0000337 va = MAP_FAILED;
Damjan Marion68b4da62018-09-30 18:26:20 +0200338 goto error;
339 }
340
Damjan Marion6bfd0762020-09-11 22:16:53 +0200341 if (a->log2_subpage_sz != clib_mem_get_log2_page_size () &&
342 mlock (va, size) != 0)
Artem Belovf6defa12019-02-26 01:47:34 +0000343 {
Damjan Marion801c7012019-10-30 18:07:35 +0100344 pm->error = clib_error_return_unix (0, "Unable to lock pages");
345 goto error;
Artem Belovf6defa12019-02-26 01:47:34 +0000346 }
347
Damjan Marion567e61d2018-10-24 17:08:26 +0200348 clib_memset (va, 0, size);
349
Damjan Marionf8cb7012020-10-09 17:16:55 +0200350 rv = clib_mem_set_default_numa_affinity ();
351 if (rv == CLIB_MEM_ERROR && numa_node != 0)
Damjan Marion68b4da62018-09-30 18:26:20 +0200352 {
353 pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
354 goto error;
355 }
356
357 /* we tolerate move_pages failure only if request os for numa node 0
358 to support non-numa kernels */
Damjan Marionf8cb7012020-10-09 17:16:55 +0200359 clib_mem_get_page_stats (va, CLIB_MEM_PAGE_SZ_DEFAULT, 1, &stats);
360
Klement Sekeraec62af52021-04-20 18:08:45 +0200361 if (stats.per_numa[numa_node] != 1 &&
362 !(numa_node == 0 && stats.unknown == 1))
Damjan Marion68b4da62018-09-30 18:26:20 +0200363 {
Damjan Marionf8cb7012020-10-09 17:16:55 +0200364 u16 allocated_at = ~0;
365 if (stats.unknown)
366 clib_error_return (0,
367 "unable to get information about numa allocation");
368
369 for (u16 i = 0; i < CLIB_MAX_NUMAS; i++)
370 if (stats.per_numa[i] == 1)
371 allocated_at = i;
372
373 clib_error_return (0,
374 "page allocated on the wrong numa node (%u), "
375 "expected %u",
376 allocated_at, numa_node);
Damjan Marion68b4da62018-09-30 18:26:20 +0200377
Damjan Marion68b4da62018-09-30 18:26:20 +0200378 goto error;
379 }
380
Damjan Marion68b4da62018-09-30 18:26:20 +0200381 for (i = 0; i < n_pages; i++)
382 {
Damjan Marion68b4da62018-09-30 18:26:20 +0200383 vec_add2 (pm->pages, pp, 1);
Damjan Marion567e61d2018-10-24 17:08:26 +0200384 pp->n_free_blocks = 1 << (pm->def_log2_page_sz - PMALLOC_LOG2_BLOCK_SZ);
Damjan Marion68b4da62018-09-30 18:26:20 +0200385 pp->index = pp - pm->pages;
386 pp->arena_index = a->index;
Damjan Marion68b4da62018-09-30 18:26:20 +0200387 vec_add1 (a->page_indices, pp->index);
388 a->n_pages++;
Damjan Marion68b4da62018-09-30 18:26:20 +0200389 }
390
Damjan Marion567e61d2018-10-24 17:08:26 +0200391
392 /* if new arena is using smaller page size, we need to rebuild whole
393 lookup table */
394 if (a->log2_subpage_sz < pm->lookup_log2_page_sz)
395 {
396 pm->lookup_log2_page_sz = a->log2_subpage_sz;
397 pmalloc_update_lookup_table (pm, vec_len (pm->pages) - n_pages,
398 n_pages);
399 }
400 else
401 pmalloc_update_lookup_table (pm, 0, vec_len (pm->pages));
Damjan Marion68b4da62018-09-30 18:26:20 +0200402
403 /* return pointer to 1st page */
404 return pp - (n_pages - 1);
405
406error:
Damjan Marion801c7012019-10-30 18:07:35 +0100407 if (va != MAP_FAILED)
408 {
409 /* unmap & reserve */
410 munmap (va, size);
411 mmap (va, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
412 -1, 0);
413 }
Damjan Marion68b4da62018-09-30 18:26:20 +0200414 if (a->fd != -1)
415 close (a->fd);
416 return 0;
417}
418
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200419__clib_export void *
Damjan Marion68b4da62018-09-30 18:26:20 +0200420clib_pmalloc_create_shared_arena (clib_pmalloc_main_t * pm, char *name,
Damjan Marion567e61d2018-10-24 17:08:26 +0200421 uword size, u32 log2_page_sz, u32 numa_node)
Damjan Marion68b4da62018-09-30 18:26:20 +0200422{
423 clib_pmalloc_arena_t *a;
424 clib_pmalloc_page_t *pp;
Damjan Marion567e61d2018-10-24 17:08:26 +0200425 u32 n_pages;
426
427 clib_error_free (pm->error);
428
429 if (log2_page_sz == 0)
430 log2_page_sz = pm->def_log2_page_sz;
431 else if (log2_page_sz != pm->def_log2_page_sz &&
Damjan Marion6bfd0762020-09-11 22:16:53 +0200432 log2_page_sz != clib_mem_get_log2_page_size ())
Damjan Marion567e61d2018-10-24 17:08:26 +0200433 {
434 pm->error = clib_error_create ("unsupported page size (%uKB)",
435 1 << (log2_page_sz - 10));
436 return 0;
437 }
438
439 n_pages = pmalloc_size2pages (size, pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200440
441 if (n_pages + vec_len (pm->pages) > pm->max_pages)
442 return 0;
443
Damjan Marionf8cb7012020-10-09 17:16:55 +0200444 if (numa_node == CLIB_PMALLOC_NUMA_LOCAL)
445 numa_node = clib_get_current_numa_node ();
Damjan Marion68b4da62018-09-30 18:26:20 +0200446
447 pool_get (pm->arenas, a);
448 a->index = a - pm->arenas;
449 a->name = format (0, "%s%c", name, 0);
450 a->numa_node = numa_node;
451 a->flags = CLIB_PMALLOC_ARENA_F_SHARED_MEM;
Damjan Marion567e61d2018-10-24 17:08:26 +0200452 a->log2_subpage_sz = log2_page_sz;
453 a->subpages_per_page = 1U << (pm->def_log2_page_sz - log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200454
455 if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
456 {
457 vec_free (a->name);
458 memset (a, 0, sizeof (*a));
459 pool_put (pm->arenas, a);
460 return 0;
461 }
462
Kingwel Xie5efaeee2018-11-10 02:56:00 -0500463 return pm->base + ((uword) pp->index << pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200464}
465
466static inline void *
467clib_pmalloc_alloc_inline (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
468 uword size, uword align, u32 numa_node)
469{
470 clib_pmalloc_page_t *pp;
471 u32 n_blocks, block_align, *page_index;
472
473 ASSERT (is_pow2 (align));
474
Damjan Marionf8cb7012020-10-09 17:16:55 +0200475 if (numa_node == CLIB_PMALLOC_NUMA_LOCAL)
476 numa_node = clib_get_current_numa_node ();
Damjan Marion68b4da62018-09-30 18:26:20 +0200477
478 if (a == 0)
479 {
Damjan Marion567e61d2018-10-24 17:08:26 +0200480 if (size > 1ULL << pm->def_log2_page_sz)
481 return 0;
482
Damjan Marion68b4da62018-09-30 18:26:20 +0200483 vec_validate_init_empty (pm->default_arena_for_numa_node,
484 numa_node, ~0);
485 if (pm->default_arena_for_numa_node[numa_node] == ~0)
486 {
487 pool_get (pm->arenas, a);
488 pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
489 a->name = format (0, "default-numa-%u%c", numa_node, 0);
490 a->numa_node = numa_node;
Damjan Marion567e61d2018-10-24 17:08:26 +0200491 a->log2_subpage_sz = pm->def_log2_page_sz;
492 a->subpages_per_page = 1;
Damjan Marion68b4da62018-09-30 18:26:20 +0200493 }
494 else
495 a = pool_elt_at_index (pm->arenas,
496 pm->default_arena_for_numa_node[numa_node]);
497 }
Damjan Marion567e61d2018-10-24 17:08:26 +0200498 else if (size > 1ULL << a->log2_subpage_sz)
499 return 0;
Damjan Marion68b4da62018-09-30 18:26:20 +0200500
501 n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
502 block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
503
504 vec_foreach (page_index, a->page_indices)
505 {
506 pp = vec_elt_at_index (pm->pages, *page_index);
507 void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
508 numa_node);
509
510 if (rv)
511 return rv;
512 }
513
514 if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
515 (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
516 return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
517
518 return 0;
519}
520
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200521__clib_export void *
Damjan Marion68b4da62018-09-30 18:26:20 +0200522clib_pmalloc_alloc_aligned_on_numa (clib_pmalloc_main_t * pm, uword size,
523 uword align, u32 numa_node)
524{
525 return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
526}
527
Damjan Marionf4cfa2a2022-06-01 16:18:23 +0200528__clib_export void *
529clib_pmalloc_alloc_aligned (clib_pmalloc_main_t *pm, uword size, uword align)
Damjan Marion68b4da62018-09-30 18:26:20 +0200530{
531 return clib_pmalloc_alloc_inline (pm, 0, size, align,
532 CLIB_PMALLOC_NUMA_LOCAL);
533}
534
Damjan Marion4a251d02021-05-06 17:28:12 +0200535__clib_export void *
536clib_pmalloc_alloc_from_arena (clib_pmalloc_main_t *pm, void *arena_va,
Damjan Marion68b4da62018-09-30 18:26:20 +0200537 uword size, uword align)
538{
539 clib_pmalloc_arena_t *a = clib_pmalloc_get_arena (pm, arena_va);
540 return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
541}
542
Damjan Marion567e61d2018-10-24 17:08:26 +0200543static inline int
544pmalloc_chunks_mergeable (clib_pmalloc_arena_t * a, clib_pmalloc_page_t * pp,
545 u32 ci1, u32 ci2)
546{
547 clib_pmalloc_chunk_t *c1, *c2;
548
549 if (ci1 == ~0 || ci2 == ~0)
550 return 0;
551
552 c1 = get_chunk (pp, ci1);
553 c2 = get_chunk (pp, ci2);
554
555 if (c1->used || c2->used)
556 return 0;
557
558 if (c1->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ) !=
559 c2->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ))
560 return 0;
561
562 return 1;
563}
564
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200565__clib_export void
Damjan Marion68b4da62018-09-30 18:26:20 +0200566clib_pmalloc_free (clib_pmalloc_main_t * pm, void *va)
567{
568 clib_pmalloc_page_t *pp;
569 clib_pmalloc_chunk_t *c;
Damjan Marion567e61d2018-10-24 17:08:26 +0200570 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +0200571 uword *p;
572 u32 chunk_index, page_index;
573
574 p = hash_get (pm->chunk_index_by_va, pointer_to_uword (va));
575
576 if (p == 0)
577 os_panic ();
578
579 chunk_index = p[0];
580 page_index = clib_pmalloc_get_page_index (pm, va);
581 hash_unset (pm->chunk_index_by_va, pointer_to_uword (va));
582
583 pp = vec_elt_at_index (pm->pages, page_index);
584 c = pool_elt_at_index (pp->chunks, chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200585 a = pool_elt_at_index (pm->arenas, pp->arena_index);
Damjan Marion68b4da62018-09-30 18:26:20 +0200586 c->used = 0;
587 pp->n_free_blocks += c->size;
588 pp->n_free_chunks++;
589
590 /* merge with next if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200591 if (pmalloc_chunks_mergeable (a, pp, chunk_index, c->next))
Damjan Marion68b4da62018-09-30 18:26:20 +0200592 {
593 clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
594 c->size += next->size;
595 c->next = next->next;
596 if (next->next != ~0)
597 get_chunk (pp, next->next)->prev = chunk_index;
598 memset (next, 0, sizeof (*next));
599 pool_put (pp->chunks, next);
600 pp->n_free_chunks--;
601 }
602
603 /* merge with prev if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200604 if (pmalloc_chunks_mergeable (a, pp, c->prev, chunk_index))
Damjan Marion68b4da62018-09-30 18:26:20 +0200605 {
606 clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
607 prev->size += c->size;
608 prev->next = c->next;
609 if (c->next != ~0)
610 get_chunk (pp, c->next)->prev = c->prev;
611 memset (c, 0, sizeof (*c));
612 pool_put (pp->chunks, c);
613 pp->n_free_chunks--;
614 }
615}
616
617static u8 *
618format_pmalloc_page (u8 * s, va_list * va)
619{
620 clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
621 int verbose = va_arg (*va, int);
622 u32 indent = format_get_indent (s);
623
Damjan Marion68b4da62018-09-30 18:26:20 +0200624 if (pp->chunks == 0)
625 return s;
626
627 s = format (s, "free %u chunks %u free-chunks %d ",
628 (pp->n_free_blocks) << PMALLOC_LOG2_BLOCK_SZ,
629 pool_elts (pp->chunks), pp->n_free_chunks);
630
631 if (verbose >= 2)
632 {
633 clib_pmalloc_chunk_t *c;
634 c = pool_elt_at_index (pp->chunks, pp->first_chunk_index);
635 s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
636 format_white_space, indent + 2,
637 "chunk offset", "size", "used", "index", "prev", "next");
638 while (1)
639 {
640 s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
641 format_white_space, indent + 2,
642 c->start << PMALLOC_LOG2_BLOCK_SZ,
643 c->size << PMALLOC_LOG2_BLOCK_SZ,
644 c->used ? "yes" : "no",
645 c - pp->chunks, c->prev, c->next);
646 if (c->next == ~0)
647 break;
648 c = pool_elt_at_index (pp->chunks, c->next);
649 }
650 }
651 return s;
652}
653
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200654__clib_export u8 *
Damjan Marion68b4da62018-09-30 18:26:20 +0200655format_pmalloc (u8 * s, va_list * va)
656{
657 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
658 int verbose = va_arg (*va, int);
659 u32 indent = format_get_indent (s);
660
661 clib_pmalloc_page_t *pp;
662 clib_pmalloc_arena_t *a;
663
Damjan Marion567e61d2018-10-24 17:08:26 +0200664 s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
Damjan Marionc04e2b02018-10-25 15:56:04 +0200665 "lookup-page-size %U%s", vec_len (pm->pages), pm->max_pages,
Damjan Marion567e61d2018-10-24 17:08:26 +0200666 format_log2_page_size, pm->def_log2_page_sz,
Damjan Marionc04e2b02018-10-25 15:56:04 +0200667 format_log2_page_size, pm->lookup_log2_page_sz,
668 pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP ? " no-pagemap" : "");
Damjan Marion567e61d2018-10-24 17:08:26 +0200669
Damjan Marion68b4da62018-09-30 18:26:20 +0200670
671 if (verbose >= 2)
672 s = format (s, " va-start %p", pm->base);
673
674 if (pm->error)
675 s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
676 format_clib_error, pm->error);
677
678
Damjan Marionb2c31b62020-12-13 21:47:40 +0100679 pool_foreach (a, pm->arenas)
Damjan Marion68b4da62018-09-30 18:26:20 +0200680 {
681 u32 *page_index;
Damjan Marion567e61d2018-10-24 17:08:26 +0200682 s = format (s, "\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
683 format_white_space, indent + 2, a->name,
684 vec_len (a->page_indices), format_log2_page_size,
685 a->log2_subpage_sz, a->numa_node);
Damjan Marion68b4da62018-09-30 18:26:20 +0200686 if (a->fd != -1)
687 s = format (s, " shared fd %d", a->fd);
688 if (verbose >= 1)
689 vec_foreach (page_index, a->page_indices)
690 {
691 pp = vec_elt_at_index (pm->pages, *page_index);
692 s = format (s, "\n%U%U", format_white_space, indent + 4,
693 format_pmalloc_page, pp, verbose);
694 }
Damjan Marionb2c31b62020-12-13 21:47:40 +0100695 }
Damjan Marion68b4da62018-09-30 18:26:20 +0200696
697 return s;
698}
699
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200700__clib_export u8 *
Mohsin Kazmi6ec99c32018-11-07 16:55:18 +0100701format_pmalloc_map (u8 * s, va_list * va)
702{
703 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
704
705 u32 index;
706 s = format (s, "%16s %13s %8s", "virtual-addr", "physical-addr", "size");
707 vec_foreach_index (index, pm->lookup_table)
708 {
709 uword *lookup_val, pa, va;
710 lookup_val = vec_elt_at_index (pm->lookup_table, index);
Kingwel Xiedbc34b82018-11-11 22:55:58 -0500711 va =
712 pointer_to_uword (pm->base) +
713 ((uword) index << pm->lookup_log2_page_sz);
Mohsin Kazmi6ec99c32018-11-07 16:55:18 +0100714 pa = va - *lookup_val;
715 s =
716 format (s, "\n %16p %13p %8U", uword_to_pointer (va, u64),
717 uword_to_pointer (pa, u64), format_log2_page_size,
718 pm->lookup_log2_page_sz);
719 }
720 return s;
721}
722
Damjan Marion68b4da62018-09-30 18:26:20 +0200723/*
724 * fd.io coding-style-patch-verification: ON
725 *
726 * Local Variables:
727 * eval: (c-set-style "gnu")
728 * End:
729 */