blob: 2a27379b5735278b494a9bb671d63199cc453755 [file] [log] [blame]
Damjan Marion68b4da62018-09-30 18:26:20 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <fcntl.h>
Artem Belovf6defa12019-02-26 01:47:34 +000021#include <unistd.h>
Damjan Marion1ee346a2019-03-18 17:06:51 +010022#include <sched.h>
Damjan Marion68b4da62018-09-30 18:26:20 +020023
24#include <vppinfra/format.h>
Damjan Marion3eb6cbe2024-02-12 19:44:58 +000025#ifdef __linux__
Damjan Marion68b4da62018-09-30 18:26:20 +020026#include <vppinfra/linux/sysfs.h>
Damjan Marion3eb6cbe2024-02-12 19:44:58 +000027#endif
Damjan Marion68b4da62018-09-30 18:26:20 +020028#include <vppinfra/mem.h>
29#include <vppinfra/hash.h>
30#include <vppinfra/pmalloc.h>
Damjan Marionf8cb7012020-10-09 17:16:55 +020031#include <vppinfra/cpu.h>
Damjan Marion68b4da62018-09-30 18:26:20 +020032
33#if __SIZEOF_POINTER__ >= 8
34#define DEFAULT_RESERVED_MB 16384
35#else
36#define DEFAULT_RESERVED_MB 256
37#endif
38
39static inline clib_pmalloc_chunk_t *
40get_chunk (clib_pmalloc_page_t * pp, u32 index)
41{
42 return pool_elt_at_index (pp->chunks, index);
43}
44
Damjan Marion567e61d2018-10-24 17:08:26 +020045static inline uword
46pmalloc_size2pages (uword size, u32 log2_page_sz)
47{
48 return round_pow2 (size, 1ULL << log2_page_sz) >> log2_page_sz;
49}
50
Damjan Mariondae1c7e2020-10-17 13:32:25 +020051__clib_export int
Damjan Marion5a6c8092019-02-21 14:44:59 +010052clib_pmalloc_init (clib_pmalloc_main_t * pm, uword base_addr, uword size)
Damjan Marion68b4da62018-09-30 18:26:20 +020053{
Dave Barach16e4a4a2020-04-16 12:00:14 -040054 uword base, pagesize;
Damjan Marionc04e2b02018-10-25 15:56:04 +020055 u64 *pt = 0;
Damjan Marion68b4da62018-09-30 18:26:20 +020056
57 ASSERT (pm->error == 0);
58
Damjan Marion9787f5f2018-10-24 12:56:32 +020059 pagesize = clib_mem_get_default_hugepage_size ();
Damjan Marion567e61d2018-10-24 17:08:26 +020060 pm->def_log2_page_sz = min_log2 (pagesize);
Damjan Marion567e61d2018-10-24 17:08:26 +020061 pm->lookup_log2_page_sz = pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020062
Damjan Marionc04e2b02018-10-25 15:56:04 +020063 /* check if pagemap is accessible */
Damjan Marion6bfd0762020-09-11 22:16:53 +020064 pt = clib_mem_vm_get_paddr (&pt, CLIB_MEM_PAGE_SZ_DEFAULT, 1);
Damjan Marionc04e2b02018-10-25 15:56:04 +020065 if (pt == 0 || pt[0] == 0)
66 pm->flags |= CLIB_PMALLOC_F_NO_PAGEMAP;
67
Damjan Marion68b4da62018-09-30 18:26:20 +020068 size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
69 size = round_pow2 (size, pagesize);
70
Damjan Marion567e61d2018-10-24 17:08:26 +020071 pm->max_pages = size >> pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020072
Dave Barach16e4a4a2020-04-16 12:00:14 -040073 base = clib_mem_vm_reserve (base_addr, size, pm->def_log2_page_sz);
Damjan Marion5a6c8092019-02-21 14:44:59 +010074
Dave Barach16e4a4a2020-04-16 12:00:14 -040075 if (base == ~0)
Damjan Marion68b4da62018-09-30 18:26:20 +020076 {
Dave Barach16e4a4a2020-04-16 12:00:14 -040077 pm->error = clib_error_return (0, "failed to reserve %u pages",
78 pm->max_pages);
Damjan Marion68b4da62018-09-30 18:26:20 +020079 return -1;
80 }
81
Dave Barach16e4a4a2020-04-16 12:00:14 -040082 pm->base = uword_to_pointer (base, void *);
Damjan Marion68b4da62018-09-30 18:26:20 +020083 return 0;
84}
85
86static inline void *
87alloc_chunk_from_page (clib_pmalloc_main_t * pm, clib_pmalloc_page_t * pp,
88 u32 n_blocks, u32 block_align, u32 numa_node)
89{
Damjan Marion567e61d2018-10-24 17:08:26 +020090 clib_pmalloc_chunk_t *c = 0;
91 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +020092 void *va;
93 u32 off;
94 u32 alloc_chunk_index;
95
Damjan Marion567e61d2018-10-24 17:08:26 +020096 a = pool_elt_at_index (pm->arenas, pp->arena_index);
97
Damjan Marion68b4da62018-09-30 18:26:20 +020098 if (pp->chunks == 0)
99 {
Damjan Marion567e61d2018-10-24 17:08:26 +0200100 u32 i, start = 0, prev = ~0;
101
102 for (i = 0; i < a->subpages_per_page; i++)
103 {
104 pool_get (pp->chunks, c);
105 c->start = start;
106 c->prev = prev;
107 c->size = pp->n_free_blocks / a->subpages_per_page;
108 start += c->size;
109 if (prev == ~0)
110 pp->first_chunk_index = c - pp->chunks;
111 else
112 pp->chunks[prev].next = c - pp->chunks;
113 prev = c - pp->chunks;
114 }
115 c->next = ~0;
116 pp->n_free_chunks = a->subpages_per_page;
Damjan Marion68b4da62018-09-30 18:26:20 +0200117 }
118
Damjan Marion78c0ff72019-01-23 12:50:24 +0100119 if (pp->n_free_blocks < n_blocks)
120 return 0;
121
Damjan Marion68b4da62018-09-30 18:26:20 +0200122 alloc_chunk_index = pp->first_chunk_index;
123
124next_chunk:
125 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
126 off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
127
128 if (c->used || n_blocks + off > c->size)
129 {
130 if (c->next == ~0)
131 return 0;
132 alloc_chunk_index = c->next;
133 goto next_chunk;
134 }
135
136 /* if alignment is needed create new empty chunk */
137 if (off)
138 {
139 u32 offset_chunk_index;
140 clib_pmalloc_chunk_t *co;
141 pool_get (pp->chunks, c);
142 pp->n_free_chunks++;
143 offset_chunk_index = alloc_chunk_index;
144 alloc_chunk_index = c - pp->chunks;
145
146 co = pool_elt_at_index (pp->chunks, offset_chunk_index);
147 c->size = co->size - off;
148 c->next = co->next;
149 c->start = co->start + off;
150 c->prev = offset_chunk_index;
151 co->size = off;
152 co->next = alloc_chunk_index;
153 }
154
155 c->used = 1;
156 if (c->size > n_blocks)
157 {
158 u32 tail_chunk_index;
159 clib_pmalloc_chunk_t *ct;
160 pool_get (pp->chunks, ct);
161 pp->n_free_chunks++;
162 tail_chunk_index = ct - pp->chunks;
163 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
164 ct->size = c->size - n_blocks;
165 ct->next = c->next;
166 ct->prev = alloc_chunk_index;
167 ct->start = c->start + n_blocks;
168
169 c->size = n_blocks;
170 c->next = tail_chunk_index;
171 if (ct->next != ~0)
172 pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
173 }
174 else if (c->next != ~0)
175 pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
176
177 c = get_chunk (pp, alloc_chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200178 va = pm->base + ((pp - pm->pages) << pm->def_log2_page_sz) +
Damjan Marion68b4da62018-09-30 18:26:20 +0200179 (c->start << PMALLOC_LOG2_BLOCK_SZ);
180 hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
181 pp->n_free_blocks -= n_blocks;
182 pp->n_free_chunks--;
183 return va;
184}
185
Damjan Marion567e61d2018-10-24 17:08:26 +0200186static void
187pmalloc_update_lookup_table (clib_pmalloc_main_t * pm, u32 first, u32 count)
188{
189 uword seek, va, pa, p;
190 int fd;
191 u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
192
193 vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
194 elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
195
Dave Barach96e2d442018-11-14 11:42:03 -0500196 p = (uword) first *elts_per_page;
Damjan Marionc04e2b02018-10-25 15:56:04 +0200197 if (pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP)
198 {
Damjan Marion878b65a2018-10-26 10:29:35 +0200199 while (p < (uword) elts_per_page * count)
Damjan Marionc04e2b02018-10-25 15:56:04 +0200200 {
201 pm->lookup_table[p] = pointer_to_uword (pm->base) +
202 (p << pm->lookup_log2_page_sz);
203 p++;
204 }
205 return;
206 }
207
208 fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
Damjan Marion878b65a2018-10-26 10:29:35 +0200209 while (p < (uword) elts_per_page * count)
Damjan Marion567e61d2018-10-24 17:08:26 +0200210 {
211 va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
Damjan Marion878b65a2018-10-26 10:29:35 +0200212 pa = 0;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200213 seek = (va >> clib_mem_get_log2_page_size ()) * sizeof (pa);
Damjan Marion567e61d2018-10-24 17:08:26 +0200214 if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
215 read (fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
216 pa & (1ULL << 63) /* page present bit */ )
217 {
Damjan Marion6bfd0762020-09-11 22:16:53 +0200218 pa = (pa & pow2_mask (55)) << clib_mem_get_log2_page_size ();
Damjan Marion567e61d2018-10-24 17:08:26 +0200219 }
220 pm->lookup_table[p] = va - pa;
221 p++;
222 }
223
224 if (fd != -1)
225 close (fd);
226}
227
Damjan Marion68b4da62018-09-30 18:26:20 +0200228static inline clib_pmalloc_page_t *
229pmalloc_map_pages (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
230 u32 numa_node, u32 n_pages)
231{
Damjan Marionf8cb7012020-10-09 17:16:55 +0200232 clib_mem_page_stats_t stats = {};
Damjan Marion68b4da62018-09-30 18:26:20 +0200233 clib_pmalloc_page_t *pp = 0;
Damjan Marionf8cb7012020-10-09 17:16:55 +0200234 int rv, i, mmap_flags;
Damjan Marion801c7012019-10-30 18:07:35 +0100235 void *va = MAP_FAILED;
Damjan Marion567e61d2018-10-24 17:08:26 +0200236 uword size = (uword) n_pages << pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +0200237
238 clib_error_free (pm->error);
239
240 if (pm->max_pages <= vec_len (pm->pages))
241 {
242 pm->error = clib_error_return (0, "maximum number of pages reached");
243 return 0;
244 }
245
Tom Jonescb3372d2024-01-26 17:34:51 +0000246#ifdef __linux__
Damjan Marion6bfd0762020-09-11 22:16:53 +0200247 if (a->log2_subpage_sz != clib_mem_get_log2_page_size ())
Damjan Marion567e61d2018-10-24 17:08:26 +0200248 {
249 pm->error = clib_sysfs_prealloc_hugepages (numa_node,
250 a->log2_subpage_sz, n_pages);
Damjan Marion68b4da62018-09-30 18:26:20 +0200251
Damjan Marion567e61d2018-10-24 17:08:26 +0200252 if (pm->error)
253 return 0;
254 }
Tom Jonescb3372d2024-01-26 17:34:51 +0000255#endif /* __linux__ */
Damjan Marion68b4da62018-09-30 18:26:20 +0200256
Damjan Marionf8cb7012020-10-09 17:16:55 +0200257 rv = clib_mem_set_numa_affinity (numa_node, /* force */ 1);
258 if (rv == CLIB_MEM_ERROR && numa_node != 0)
Damjan Marion68b4da62018-09-30 18:26:20 +0200259 {
260 pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
261 "numa node %u", numa_node);
262 return 0;
263 }
264
Damjan Marion54e8e392018-11-07 17:55:26 +0100265 mmap_flags = MAP_FIXED;
Damjan Marionc04e2b02018-10-25 15:56:04 +0200266
Damjan Marion68b4da62018-09-30 18:26:20 +0200267 if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
268 {
269 mmap_flags |= MAP_SHARED;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200270 a->fd = clib_mem_vm_create_fd (a->log2_subpage_sz, "%s", a->name);
Damjan Marion68b4da62018-09-30 18:26:20 +0200271 if (a->fd == -1)
272 goto error;
Damjan Marion54e8e392018-11-07 17:55:26 +0100273 if ((ftruncate (a->fd, size)) == -1)
274 goto error;
Damjan Marion68b4da62018-09-30 18:26:20 +0200275 }
276 else
277 {
Tom Jones77ce67f2024-01-26 14:15:54 +0000278#ifdef __linux__
Damjan Marion6bfd0762020-09-11 22:16:53 +0200279 if (a->log2_subpage_sz != clib_mem_get_log2_page_size ())
Damjan Marion8ebd7922018-11-28 10:46:03 +0100280 mmap_flags |= MAP_HUGETLB;
Tom Jones77ce67f2024-01-26 14:15:54 +0000281#endif /* __linux__ */
Damjan Marion8ebd7922018-11-28 10:46:03 +0100282
Damjan Marion54e8e392018-11-07 17:55:26 +0100283 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
Damjan Marion68b4da62018-09-30 18:26:20 +0200284 a->fd = -1;
285 }
286
Damjan Marion567e61d2018-10-24 17:08:26 +0200287 va = pm->base + (((uword) vec_len (pm->pages)) << pm->def_log2_page_sz);
288 if (mmap (va, size, PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0) ==
289 MAP_FAILED)
Damjan Marion68b4da62018-09-30 18:26:20 +0200290 {
291 pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
292 "fd %d numa %d flags 0x%x", n_pages,
293 va, a->fd, numa_node, mmap_flags);
Andrew Yourtchenko9ce35232019-11-18 10:23:54 +0000294 va = MAP_FAILED;
Damjan Marion68b4da62018-09-30 18:26:20 +0200295 goto error;
296 }
297
Damjan Marion6bfd0762020-09-11 22:16:53 +0200298 if (a->log2_subpage_sz != clib_mem_get_log2_page_size () &&
299 mlock (va, size) != 0)
Artem Belovf6defa12019-02-26 01:47:34 +0000300 {
Damjan Marion801c7012019-10-30 18:07:35 +0100301 pm->error = clib_error_return_unix (0, "Unable to lock pages");
302 goto error;
Artem Belovf6defa12019-02-26 01:47:34 +0000303 }
304
Damjan Marion567e61d2018-10-24 17:08:26 +0200305 clib_memset (va, 0, size);
306
Damjan Marionf8cb7012020-10-09 17:16:55 +0200307 rv = clib_mem_set_default_numa_affinity ();
308 if (rv == CLIB_MEM_ERROR && numa_node != 0)
Damjan Marion68b4da62018-09-30 18:26:20 +0200309 {
310 pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
311 goto error;
312 }
313
314 /* we tolerate move_pages failure only if request os for numa node 0
315 to support non-numa kernels */
Damjan Marionf8cb7012020-10-09 17:16:55 +0200316 clib_mem_get_page_stats (va, CLIB_MEM_PAGE_SZ_DEFAULT, 1, &stats);
317
Klement Sekeraec62af52021-04-20 18:08:45 +0200318 if (stats.per_numa[numa_node] != 1 &&
319 !(numa_node == 0 && stats.unknown == 1))
Damjan Marion68b4da62018-09-30 18:26:20 +0200320 {
Damjan Marionf8cb7012020-10-09 17:16:55 +0200321 u16 allocated_at = ~0;
322 if (stats.unknown)
323 clib_error_return (0,
324 "unable to get information about numa allocation");
325
326 for (u16 i = 0; i < CLIB_MAX_NUMAS; i++)
327 if (stats.per_numa[i] == 1)
328 allocated_at = i;
329
330 clib_error_return (0,
331 "page allocated on the wrong numa node (%u), "
332 "expected %u",
333 allocated_at, numa_node);
Damjan Marion68b4da62018-09-30 18:26:20 +0200334
Damjan Marion68b4da62018-09-30 18:26:20 +0200335 goto error;
336 }
337
Damjan Marion68b4da62018-09-30 18:26:20 +0200338 for (i = 0; i < n_pages; i++)
339 {
Damjan Marion68b4da62018-09-30 18:26:20 +0200340 vec_add2 (pm->pages, pp, 1);
Damjan Marion567e61d2018-10-24 17:08:26 +0200341 pp->n_free_blocks = 1 << (pm->def_log2_page_sz - PMALLOC_LOG2_BLOCK_SZ);
Damjan Marion68b4da62018-09-30 18:26:20 +0200342 pp->index = pp - pm->pages;
343 pp->arena_index = a->index;
Damjan Marion68b4da62018-09-30 18:26:20 +0200344 vec_add1 (a->page_indices, pp->index);
345 a->n_pages++;
Damjan Marion68b4da62018-09-30 18:26:20 +0200346 }
347
Damjan Marion567e61d2018-10-24 17:08:26 +0200348
349 /* if new arena is using smaller page size, we need to rebuild whole
350 lookup table */
351 if (a->log2_subpage_sz < pm->lookup_log2_page_sz)
352 {
353 pm->lookup_log2_page_sz = a->log2_subpage_sz;
354 pmalloc_update_lookup_table (pm, vec_len (pm->pages) - n_pages,
355 n_pages);
356 }
357 else
358 pmalloc_update_lookup_table (pm, 0, vec_len (pm->pages));
Damjan Marion68b4da62018-09-30 18:26:20 +0200359
360 /* return pointer to 1st page */
361 return pp - (n_pages - 1);
362
363error:
Damjan Marion801c7012019-10-30 18:07:35 +0100364 if (va != MAP_FAILED)
365 {
366 /* unmap & reserve */
367 munmap (va, size);
368 mmap (va, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
369 -1, 0);
370 }
Damjan Marion68b4da62018-09-30 18:26:20 +0200371 if (a->fd != -1)
372 close (a->fd);
373 return 0;
374}
375
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200376__clib_export void *
Damjan Marion68b4da62018-09-30 18:26:20 +0200377clib_pmalloc_create_shared_arena (clib_pmalloc_main_t * pm, char *name,
Damjan Marion567e61d2018-10-24 17:08:26 +0200378 uword size, u32 log2_page_sz, u32 numa_node)
Damjan Marion68b4da62018-09-30 18:26:20 +0200379{
380 clib_pmalloc_arena_t *a;
381 clib_pmalloc_page_t *pp;
Damjan Marion567e61d2018-10-24 17:08:26 +0200382 u32 n_pages;
383
384 clib_error_free (pm->error);
385
386 if (log2_page_sz == 0)
387 log2_page_sz = pm->def_log2_page_sz;
388 else if (log2_page_sz != pm->def_log2_page_sz &&
Damjan Marion6bfd0762020-09-11 22:16:53 +0200389 log2_page_sz != clib_mem_get_log2_page_size ())
Damjan Marion567e61d2018-10-24 17:08:26 +0200390 {
391 pm->error = clib_error_create ("unsupported page size (%uKB)",
392 1 << (log2_page_sz - 10));
393 return 0;
394 }
395
396 n_pages = pmalloc_size2pages (size, pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200397
398 if (n_pages + vec_len (pm->pages) > pm->max_pages)
399 return 0;
400
Damjan Marionf8cb7012020-10-09 17:16:55 +0200401 if (numa_node == CLIB_PMALLOC_NUMA_LOCAL)
402 numa_node = clib_get_current_numa_node ();
Damjan Marion68b4da62018-09-30 18:26:20 +0200403
404 pool_get (pm->arenas, a);
405 a->index = a - pm->arenas;
406 a->name = format (0, "%s%c", name, 0);
407 a->numa_node = numa_node;
408 a->flags = CLIB_PMALLOC_ARENA_F_SHARED_MEM;
Damjan Marion567e61d2018-10-24 17:08:26 +0200409 a->log2_subpage_sz = log2_page_sz;
410 a->subpages_per_page = 1U << (pm->def_log2_page_sz - log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200411
412 if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
413 {
414 vec_free (a->name);
415 memset (a, 0, sizeof (*a));
416 pool_put (pm->arenas, a);
417 return 0;
418 }
419
Kingwel Xie5efaeee2018-11-10 02:56:00 -0500420 return pm->base + ((uword) pp->index << pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200421}
422
423static inline void *
424clib_pmalloc_alloc_inline (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
425 uword size, uword align, u32 numa_node)
426{
427 clib_pmalloc_page_t *pp;
428 u32 n_blocks, block_align, *page_index;
429
430 ASSERT (is_pow2 (align));
431
Damjan Marionf8cb7012020-10-09 17:16:55 +0200432 if (numa_node == CLIB_PMALLOC_NUMA_LOCAL)
433 numa_node = clib_get_current_numa_node ();
Damjan Marion68b4da62018-09-30 18:26:20 +0200434
435 if (a == 0)
436 {
Damjan Marion567e61d2018-10-24 17:08:26 +0200437 if (size > 1ULL << pm->def_log2_page_sz)
438 return 0;
439
Damjan Marion68b4da62018-09-30 18:26:20 +0200440 vec_validate_init_empty (pm->default_arena_for_numa_node,
441 numa_node, ~0);
442 if (pm->default_arena_for_numa_node[numa_node] == ~0)
443 {
444 pool_get (pm->arenas, a);
445 pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
446 a->name = format (0, "default-numa-%u%c", numa_node, 0);
447 a->numa_node = numa_node;
Damjan Marion567e61d2018-10-24 17:08:26 +0200448 a->log2_subpage_sz = pm->def_log2_page_sz;
449 a->subpages_per_page = 1;
Damjan Marion68b4da62018-09-30 18:26:20 +0200450 }
451 else
452 a = pool_elt_at_index (pm->arenas,
453 pm->default_arena_for_numa_node[numa_node]);
454 }
Damjan Marion567e61d2018-10-24 17:08:26 +0200455 else if (size > 1ULL << a->log2_subpage_sz)
456 return 0;
Damjan Marion68b4da62018-09-30 18:26:20 +0200457
458 n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
459 block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
460
461 vec_foreach (page_index, a->page_indices)
462 {
463 pp = vec_elt_at_index (pm->pages, *page_index);
464 void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
465 numa_node);
466
467 if (rv)
468 return rv;
469 }
470
471 if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
472 (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
473 return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
474
475 return 0;
476}
477
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200478__clib_export void *
Damjan Marion68b4da62018-09-30 18:26:20 +0200479clib_pmalloc_alloc_aligned_on_numa (clib_pmalloc_main_t * pm, uword size,
480 uword align, u32 numa_node)
481{
482 return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
483}
484
Damjan Marionf4cfa2a2022-06-01 16:18:23 +0200485__clib_export void *
486clib_pmalloc_alloc_aligned (clib_pmalloc_main_t *pm, uword size, uword align)
Damjan Marion68b4da62018-09-30 18:26:20 +0200487{
488 return clib_pmalloc_alloc_inline (pm, 0, size, align,
489 CLIB_PMALLOC_NUMA_LOCAL);
490}
491
Damjan Marion4a251d02021-05-06 17:28:12 +0200492__clib_export void *
493clib_pmalloc_alloc_from_arena (clib_pmalloc_main_t *pm, void *arena_va,
Damjan Marion68b4da62018-09-30 18:26:20 +0200494 uword size, uword align)
495{
496 clib_pmalloc_arena_t *a = clib_pmalloc_get_arena (pm, arena_va);
497 return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
498}
499
Damjan Marion567e61d2018-10-24 17:08:26 +0200500static inline int
501pmalloc_chunks_mergeable (clib_pmalloc_arena_t * a, clib_pmalloc_page_t * pp,
502 u32 ci1, u32 ci2)
503{
504 clib_pmalloc_chunk_t *c1, *c2;
505
506 if (ci1 == ~0 || ci2 == ~0)
507 return 0;
508
509 c1 = get_chunk (pp, ci1);
510 c2 = get_chunk (pp, ci2);
511
512 if (c1->used || c2->used)
513 return 0;
514
515 if (c1->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ) !=
516 c2->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ))
517 return 0;
518
519 return 1;
520}
521
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200522__clib_export void
Damjan Marion68b4da62018-09-30 18:26:20 +0200523clib_pmalloc_free (clib_pmalloc_main_t * pm, void *va)
524{
525 clib_pmalloc_page_t *pp;
526 clib_pmalloc_chunk_t *c;
Damjan Marion567e61d2018-10-24 17:08:26 +0200527 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +0200528 uword *p;
529 u32 chunk_index, page_index;
530
531 p = hash_get (pm->chunk_index_by_va, pointer_to_uword (va));
532
533 if (p == 0)
534 os_panic ();
535
536 chunk_index = p[0];
537 page_index = clib_pmalloc_get_page_index (pm, va);
538 hash_unset (pm->chunk_index_by_va, pointer_to_uword (va));
539
540 pp = vec_elt_at_index (pm->pages, page_index);
541 c = pool_elt_at_index (pp->chunks, chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200542 a = pool_elt_at_index (pm->arenas, pp->arena_index);
Damjan Marion68b4da62018-09-30 18:26:20 +0200543 c->used = 0;
544 pp->n_free_blocks += c->size;
545 pp->n_free_chunks++;
546
547 /* merge with next if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200548 if (pmalloc_chunks_mergeable (a, pp, chunk_index, c->next))
Damjan Marion68b4da62018-09-30 18:26:20 +0200549 {
550 clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
551 c->size += next->size;
552 c->next = next->next;
553 if (next->next != ~0)
554 get_chunk (pp, next->next)->prev = chunk_index;
555 memset (next, 0, sizeof (*next));
556 pool_put (pp->chunks, next);
557 pp->n_free_chunks--;
558 }
559
560 /* merge with prev if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200561 if (pmalloc_chunks_mergeable (a, pp, c->prev, chunk_index))
Damjan Marion68b4da62018-09-30 18:26:20 +0200562 {
563 clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
564 prev->size += c->size;
565 prev->next = c->next;
566 if (c->next != ~0)
567 get_chunk (pp, c->next)->prev = c->prev;
568 memset (c, 0, sizeof (*c));
569 pool_put (pp->chunks, c);
570 pp->n_free_chunks--;
571 }
572}
573
574static u8 *
575format_pmalloc_page (u8 * s, va_list * va)
576{
577 clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
578 int verbose = va_arg (*va, int);
579 u32 indent = format_get_indent (s);
580
Damjan Marion68b4da62018-09-30 18:26:20 +0200581 if (pp->chunks == 0)
582 return s;
583
584 s = format (s, "free %u chunks %u free-chunks %d ",
585 (pp->n_free_blocks) << PMALLOC_LOG2_BLOCK_SZ,
586 pool_elts (pp->chunks), pp->n_free_chunks);
587
588 if (verbose >= 2)
589 {
590 clib_pmalloc_chunk_t *c;
591 c = pool_elt_at_index (pp->chunks, pp->first_chunk_index);
592 s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
593 format_white_space, indent + 2,
594 "chunk offset", "size", "used", "index", "prev", "next");
595 while (1)
596 {
597 s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
598 format_white_space, indent + 2,
599 c->start << PMALLOC_LOG2_BLOCK_SZ,
600 c->size << PMALLOC_LOG2_BLOCK_SZ,
601 c->used ? "yes" : "no",
602 c - pp->chunks, c->prev, c->next);
603 if (c->next == ~0)
604 break;
605 c = pool_elt_at_index (pp->chunks, c->next);
606 }
607 }
608 return s;
609}
610
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200611__clib_export u8 *
Damjan Marion68b4da62018-09-30 18:26:20 +0200612format_pmalloc (u8 * s, va_list * va)
613{
614 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
615 int verbose = va_arg (*va, int);
616 u32 indent = format_get_indent (s);
617
618 clib_pmalloc_page_t *pp;
619 clib_pmalloc_arena_t *a;
620
Damjan Marion567e61d2018-10-24 17:08:26 +0200621 s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
Damjan Marionc04e2b02018-10-25 15:56:04 +0200622 "lookup-page-size %U%s", vec_len (pm->pages), pm->max_pages,
Damjan Marion567e61d2018-10-24 17:08:26 +0200623 format_log2_page_size, pm->def_log2_page_sz,
Damjan Marionc04e2b02018-10-25 15:56:04 +0200624 format_log2_page_size, pm->lookup_log2_page_sz,
625 pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP ? " no-pagemap" : "");
Damjan Marion567e61d2018-10-24 17:08:26 +0200626
Damjan Marion68b4da62018-09-30 18:26:20 +0200627
628 if (verbose >= 2)
629 s = format (s, " va-start %p", pm->base);
630
631 if (pm->error)
632 s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
633 format_clib_error, pm->error);
634
635
Damjan Marionb2c31b62020-12-13 21:47:40 +0100636 pool_foreach (a, pm->arenas)
Damjan Marion68b4da62018-09-30 18:26:20 +0200637 {
638 u32 *page_index;
Damjan Marion567e61d2018-10-24 17:08:26 +0200639 s = format (s, "\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
640 format_white_space, indent + 2, a->name,
641 vec_len (a->page_indices), format_log2_page_size,
642 a->log2_subpage_sz, a->numa_node);
Damjan Marion68b4da62018-09-30 18:26:20 +0200643 if (a->fd != -1)
644 s = format (s, " shared fd %d", a->fd);
645 if (verbose >= 1)
646 vec_foreach (page_index, a->page_indices)
647 {
648 pp = vec_elt_at_index (pm->pages, *page_index);
649 s = format (s, "\n%U%U", format_white_space, indent + 4,
650 format_pmalloc_page, pp, verbose);
651 }
Damjan Marionb2c31b62020-12-13 21:47:40 +0100652 }
Damjan Marion68b4da62018-09-30 18:26:20 +0200653
654 return s;
655}
656
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200657__clib_export u8 *
Mohsin Kazmi6ec99c32018-11-07 16:55:18 +0100658format_pmalloc_map (u8 * s, va_list * va)
659{
660 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
661
662 u32 index;
663 s = format (s, "%16s %13s %8s", "virtual-addr", "physical-addr", "size");
664 vec_foreach_index (index, pm->lookup_table)
665 {
666 uword *lookup_val, pa, va;
667 lookup_val = vec_elt_at_index (pm->lookup_table, index);
Kingwel Xiedbc34b82018-11-11 22:55:58 -0500668 va =
669 pointer_to_uword (pm->base) +
670 ((uword) index << pm->lookup_log2_page_sz);
Mohsin Kazmi6ec99c32018-11-07 16:55:18 +0100671 pa = va - *lookup_val;
672 s =
673 format (s, "\n %16p %13p %8U", uword_to_pointer (va, u64),
674 uword_to_pointer (pa, u64), format_log2_page_size,
675 pm->lookup_log2_page_sz);
676 }
677 return s;
678}
679
Damjan Marion68b4da62018-09-30 18:26:20 +0200680/*
681 * fd.io coding-style-patch-verification: ON
682 *
683 * Local Variables:
684 * eval: (c-set-style "gnu")
685 * End:
686 */