blob: a0b1d1f11041347da2a928da38da397243941003 [file] [log] [blame]
Damjan Marion68b4da62018-09-30 18:26:20 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <fcntl.h>
Artem Belovf6defa12019-02-26 01:47:34 +000021#include <unistd.h>
Damjan Marion1ee346a2019-03-18 17:06:51 +010022#include <sched.h>
Damjan Marion68b4da62018-09-30 18:26:20 +020023
24#include <vppinfra/format.h>
Damjan Marion68b4da62018-09-30 18:26:20 +020025#include <vppinfra/linux/sysfs.h>
26#include <vppinfra/mem.h>
27#include <vppinfra/hash.h>
28#include <vppinfra/pmalloc.h>
Damjan Marionf8cb7012020-10-09 17:16:55 +020029#include <vppinfra/cpu.h>
Damjan Marion68b4da62018-09-30 18:26:20 +020030
31#if __SIZEOF_POINTER__ >= 8
32#define DEFAULT_RESERVED_MB 16384
33#else
34#define DEFAULT_RESERVED_MB 256
35#endif
36
37static inline clib_pmalloc_chunk_t *
38get_chunk (clib_pmalloc_page_t * pp, u32 index)
39{
40 return pool_elt_at_index (pp->chunks, index);
41}
42
Damjan Marion567e61d2018-10-24 17:08:26 +020043static inline uword
44pmalloc_size2pages (uword size, u32 log2_page_sz)
45{
46 return round_pow2 (size, 1ULL << log2_page_sz) >> log2_page_sz;
47}
48
Damjan Mariondae1c7e2020-10-17 13:32:25 +020049__clib_export int
Damjan Marion5a6c8092019-02-21 14:44:59 +010050clib_pmalloc_init (clib_pmalloc_main_t * pm, uword base_addr, uword size)
Damjan Marion68b4da62018-09-30 18:26:20 +020051{
Dave Barach16e4a4a2020-04-16 12:00:14 -040052 uword base, pagesize;
Damjan Marionc04e2b02018-10-25 15:56:04 +020053 u64 *pt = 0;
Damjan Marion68b4da62018-09-30 18:26:20 +020054
55 ASSERT (pm->error == 0);
56
Damjan Marion9787f5f2018-10-24 12:56:32 +020057 pagesize = clib_mem_get_default_hugepage_size ();
Damjan Marion567e61d2018-10-24 17:08:26 +020058 pm->def_log2_page_sz = min_log2 (pagesize);
Damjan Marion567e61d2018-10-24 17:08:26 +020059 pm->lookup_log2_page_sz = pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020060
Damjan Marionc04e2b02018-10-25 15:56:04 +020061 /* check if pagemap is accessible */
Damjan Marion6bfd0762020-09-11 22:16:53 +020062 pt = clib_mem_vm_get_paddr (&pt, CLIB_MEM_PAGE_SZ_DEFAULT, 1);
Damjan Marionc04e2b02018-10-25 15:56:04 +020063 if (pt == 0 || pt[0] == 0)
64 pm->flags |= CLIB_PMALLOC_F_NO_PAGEMAP;
65
Damjan Marion68b4da62018-09-30 18:26:20 +020066 size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
67 size = round_pow2 (size, pagesize);
68
Damjan Marion567e61d2018-10-24 17:08:26 +020069 pm->max_pages = size >> pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020070
Dave Barach16e4a4a2020-04-16 12:00:14 -040071 base = clib_mem_vm_reserve (base_addr, size, pm->def_log2_page_sz);
Damjan Marion5a6c8092019-02-21 14:44:59 +010072
Dave Barach16e4a4a2020-04-16 12:00:14 -040073 if (base == ~0)
Damjan Marion68b4da62018-09-30 18:26:20 +020074 {
Dave Barach16e4a4a2020-04-16 12:00:14 -040075 pm->error = clib_error_return (0, "failed to reserve %u pages",
76 pm->max_pages);
Damjan Marion68b4da62018-09-30 18:26:20 +020077 return -1;
78 }
79
Dave Barach16e4a4a2020-04-16 12:00:14 -040080 pm->base = uword_to_pointer (base, void *);
Damjan Marion68b4da62018-09-30 18:26:20 +020081 return 0;
82}
83
84static inline void *
85alloc_chunk_from_page (clib_pmalloc_main_t * pm, clib_pmalloc_page_t * pp,
86 u32 n_blocks, u32 block_align, u32 numa_node)
87{
Damjan Marion567e61d2018-10-24 17:08:26 +020088 clib_pmalloc_chunk_t *c = 0;
89 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +020090 void *va;
91 u32 off;
92 u32 alloc_chunk_index;
93
Damjan Marion567e61d2018-10-24 17:08:26 +020094 a = pool_elt_at_index (pm->arenas, pp->arena_index);
95
Damjan Marion68b4da62018-09-30 18:26:20 +020096 if (pp->chunks == 0)
97 {
Damjan Marion567e61d2018-10-24 17:08:26 +020098 u32 i, start = 0, prev = ~0;
99
100 for (i = 0; i < a->subpages_per_page; i++)
101 {
102 pool_get (pp->chunks, c);
103 c->start = start;
104 c->prev = prev;
105 c->size = pp->n_free_blocks / a->subpages_per_page;
106 start += c->size;
107 if (prev == ~0)
108 pp->first_chunk_index = c - pp->chunks;
109 else
110 pp->chunks[prev].next = c - pp->chunks;
111 prev = c - pp->chunks;
112 }
113 c->next = ~0;
114 pp->n_free_chunks = a->subpages_per_page;
Damjan Marion68b4da62018-09-30 18:26:20 +0200115 }
116
Damjan Marion78c0ff72019-01-23 12:50:24 +0100117 if (pp->n_free_blocks < n_blocks)
118 return 0;
119
Damjan Marion68b4da62018-09-30 18:26:20 +0200120 alloc_chunk_index = pp->first_chunk_index;
121
122next_chunk:
123 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
124 off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
125
126 if (c->used || n_blocks + off > c->size)
127 {
128 if (c->next == ~0)
129 return 0;
130 alloc_chunk_index = c->next;
131 goto next_chunk;
132 }
133
134 /* if alignment is needed create new empty chunk */
135 if (off)
136 {
137 u32 offset_chunk_index;
138 clib_pmalloc_chunk_t *co;
139 pool_get (pp->chunks, c);
140 pp->n_free_chunks++;
141 offset_chunk_index = alloc_chunk_index;
142 alloc_chunk_index = c - pp->chunks;
143
144 co = pool_elt_at_index (pp->chunks, offset_chunk_index);
145 c->size = co->size - off;
146 c->next = co->next;
147 c->start = co->start + off;
148 c->prev = offset_chunk_index;
149 co->size = off;
150 co->next = alloc_chunk_index;
151 }
152
153 c->used = 1;
154 if (c->size > n_blocks)
155 {
156 u32 tail_chunk_index;
157 clib_pmalloc_chunk_t *ct;
158 pool_get (pp->chunks, ct);
159 pp->n_free_chunks++;
160 tail_chunk_index = ct - pp->chunks;
161 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
162 ct->size = c->size - n_blocks;
163 ct->next = c->next;
164 ct->prev = alloc_chunk_index;
165 ct->start = c->start + n_blocks;
166
167 c->size = n_blocks;
168 c->next = tail_chunk_index;
169 if (ct->next != ~0)
170 pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
171 }
172 else if (c->next != ~0)
173 pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
174
175 c = get_chunk (pp, alloc_chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200176 va = pm->base + ((pp - pm->pages) << pm->def_log2_page_sz) +
Damjan Marion68b4da62018-09-30 18:26:20 +0200177 (c->start << PMALLOC_LOG2_BLOCK_SZ);
178 hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
179 pp->n_free_blocks -= n_blocks;
180 pp->n_free_chunks--;
181 return va;
182}
183
Damjan Marion567e61d2018-10-24 17:08:26 +0200184static void
185pmalloc_update_lookup_table (clib_pmalloc_main_t * pm, u32 first, u32 count)
186{
187 uword seek, va, pa, p;
188 int fd;
189 u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
190
191 vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
192 elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
193
Dave Barach96e2d442018-11-14 11:42:03 -0500194 p = (uword) first *elts_per_page;
Damjan Marionc04e2b02018-10-25 15:56:04 +0200195 if (pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP)
196 {
Damjan Marion878b65a2018-10-26 10:29:35 +0200197 while (p < (uword) elts_per_page * count)
Damjan Marionc04e2b02018-10-25 15:56:04 +0200198 {
199 pm->lookup_table[p] = pointer_to_uword (pm->base) +
200 (p << pm->lookup_log2_page_sz);
201 p++;
202 }
203 return;
204 }
205
206 fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
Damjan Marion878b65a2018-10-26 10:29:35 +0200207 while (p < (uword) elts_per_page * count)
Damjan Marion567e61d2018-10-24 17:08:26 +0200208 {
209 va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
Damjan Marion878b65a2018-10-26 10:29:35 +0200210 pa = 0;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200211 seek = (va >> clib_mem_get_log2_page_size ()) * sizeof (pa);
Damjan Marion567e61d2018-10-24 17:08:26 +0200212 if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
213 read (fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
214 pa & (1ULL << 63) /* page present bit */ )
215 {
Damjan Marion6bfd0762020-09-11 22:16:53 +0200216 pa = (pa & pow2_mask (55)) << clib_mem_get_log2_page_size ();
Damjan Marion567e61d2018-10-24 17:08:26 +0200217 }
218 pm->lookup_table[p] = va - pa;
219 p++;
220 }
221
222 if (fd != -1)
223 close (fd);
224}
225
Damjan Marion68b4da62018-09-30 18:26:20 +0200226static inline clib_pmalloc_page_t *
227pmalloc_map_pages (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
228 u32 numa_node, u32 n_pages)
229{
Damjan Marionf8cb7012020-10-09 17:16:55 +0200230 clib_mem_page_stats_t stats = {};
Damjan Marion68b4da62018-09-30 18:26:20 +0200231 clib_pmalloc_page_t *pp = 0;
Damjan Marionf8cb7012020-10-09 17:16:55 +0200232 int rv, i, mmap_flags;
Damjan Marion801c7012019-10-30 18:07:35 +0100233 void *va = MAP_FAILED;
Damjan Marion567e61d2018-10-24 17:08:26 +0200234 uword size = (uword) n_pages << pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +0200235
236 clib_error_free (pm->error);
237
238 if (pm->max_pages <= vec_len (pm->pages))
239 {
240 pm->error = clib_error_return (0, "maximum number of pages reached");
241 return 0;
242 }
243
Damjan Marion6bfd0762020-09-11 22:16:53 +0200244 if (a->log2_subpage_sz != clib_mem_get_log2_page_size ())
Damjan Marion567e61d2018-10-24 17:08:26 +0200245 {
246 pm->error = clib_sysfs_prealloc_hugepages (numa_node,
247 a->log2_subpage_sz, n_pages);
Damjan Marion68b4da62018-09-30 18:26:20 +0200248
Damjan Marion567e61d2018-10-24 17:08:26 +0200249 if (pm->error)
250 return 0;
251 }
Damjan Marion68b4da62018-09-30 18:26:20 +0200252
Damjan Marionf8cb7012020-10-09 17:16:55 +0200253 rv = clib_mem_set_numa_affinity (numa_node, /* force */ 1);
254 if (rv == CLIB_MEM_ERROR && numa_node != 0)
Damjan Marion68b4da62018-09-30 18:26:20 +0200255 {
256 pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
257 "numa node %u", numa_node);
258 return 0;
259 }
260
Damjan Marion54e8e392018-11-07 17:55:26 +0100261 mmap_flags = MAP_FIXED;
Damjan Marionc04e2b02018-10-25 15:56:04 +0200262
Damjan Marion68b4da62018-09-30 18:26:20 +0200263 if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
264 {
265 mmap_flags |= MAP_SHARED;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200266 a->fd = clib_mem_vm_create_fd (a->log2_subpage_sz, "%s", a->name);
Damjan Marion68b4da62018-09-30 18:26:20 +0200267 if (a->fd == -1)
268 goto error;
Damjan Marion54e8e392018-11-07 17:55:26 +0100269 if ((ftruncate (a->fd, size)) == -1)
270 goto error;
Damjan Marion68b4da62018-09-30 18:26:20 +0200271 }
272 else
273 {
Damjan Marion6bfd0762020-09-11 22:16:53 +0200274 if (a->log2_subpage_sz != clib_mem_get_log2_page_size ())
Damjan Marion8ebd7922018-11-28 10:46:03 +0100275 mmap_flags |= MAP_HUGETLB;
276
Damjan Marion54e8e392018-11-07 17:55:26 +0100277 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
Damjan Marion68b4da62018-09-30 18:26:20 +0200278 a->fd = -1;
279 }
280
Damjan Marion567e61d2018-10-24 17:08:26 +0200281 va = pm->base + (((uword) vec_len (pm->pages)) << pm->def_log2_page_sz);
282 if (mmap (va, size, PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0) ==
283 MAP_FAILED)
Damjan Marion68b4da62018-09-30 18:26:20 +0200284 {
285 pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
286 "fd %d numa %d flags 0x%x", n_pages,
287 va, a->fd, numa_node, mmap_flags);
Andrew Yourtchenko9ce35232019-11-18 10:23:54 +0000288 va = MAP_FAILED;
Damjan Marion68b4da62018-09-30 18:26:20 +0200289 goto error;
290 }
291
Damjan Marion6bfd0762020-09-11 22:16:53 +0200292 if (a->log2_subpage_sz != clib_mem_get_log2_page_size () &&
293 mlock (va, size) != 0)
Artem Belovf6defa12019-02-26 01:47:34 +0000294 {
Damjan Marion801c7012019-10-30 18:07:35 +0100295 pm->error = clib_error_return_unix (0, "Unable to lock pages");
296 goto error;
Artem Belovf6defa12019-02-26 01:47:34 +0000297 }
298
Damjan Marion567e61d2018-10-24 17:08:26 +0200299 clib_memset (va, 0, size);
300
Damjan Marionf8cb7012020-10-09 17:16:55 +0200301 rv = clib_mem_set_default_numa_affinity ();
302 if (rv == CLIB_MEM_ERROR && numa_node != 0)
Damjan Marion68b4da62018-09-30 18:26:20 +0200303 {
304 pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
305 goto error;
306 }
307
308 /* we tolerate move_pages failure only if request os for numa node 0
309 to support non-numa kernels */
Damjan Marionf8cb7012020-10-09 17:16:55 +0200310 clib_mem_get_page_stats (va, CLIB_MEM_PAGE_SZ_DEFAULT, 1, &stats);
311
Klement Sekeraec62af52021-04-20 18:08:45 +0200312 if (stats.per_numa[numa_node] != 1 &&
313 !(numa_node == 0 && stats.unknown == 1))
Damjan Marion68b4da62018-09-30 18:26:20 +0200314 {
Damjan Marionf8cb7012020-10-09 17:16:55 +0200315 u16 allocated_at = ~0;
316 if (stats.unknown)
317 clib_error_return (0,
318 "unable to get information about numa allocation");
319
320 for (u16 i = 0; i < CLIB_MAX_NUMAS; i++)
321 if (stats.per_numa[i] == 1)
322 allocated_at = i;
323
324 clib_error_return (0,
325 "page allocated on the wrong numa node (%u), "
326 "expected %u",
327 allocated_at, numa_node);
Damjan Marion68b4da62018-09-30 18:26:20 +0200328
Damjan Marion68b4da62018-09-30 18:26:20 +0200329 goto error;
330 }
331
Damjan Marion68b4da62018-09-30 18:26:20 +0200332 for (i = 0; i < n_pages; i++)
333 {
Damjan Marion68b4da62018-09-30 18:26:20 +0200334 vec_add2 (pm->pages, pp, 1);
Damjan Marion567e61d2018-10-24 17:08:26 +0200335 pp->n_free_blocks = 1 << (pm->def_log2_page_sz - PMALLOC_LOG2_BLOCK_SZ);
Damjan Marion68b4da62018-09-30 18:26:20 +0200336 pp->index = pp - pm->pages;
337 pp->arena_index = a->index;
Damjan Marion68b4da62018-09-30 18:26:20 +0200338 vec_add1 (a->page_indices, pp->index);
339 a->n_pages++;
Damjan Marion68b4da62018-09-30 18:26:20 +0200340 }
341
Damjan Marion567e61d2018-10-24 17:08:26 +0200342
343 /* if new arena is using smaller page size, we need to rebuild whole
344 lookup table */
345 if (a->log2_subpage_sz < pm->lookup_log2_page_sz)
346 {
347 pm->lookup_log2_page_sz = a->log2_subpage_sz;
348 pmalloc_update_lookup_table (pm, vec_len (pm->pages) - n_pages,
349 n_pages);
350 }
351 else
352 pmalloc_update_lookup_table (pm, 0, vec_len (pm->pages));
Damjan Marion68b4da62018-09-30 18:26:20 +0200353
354 /* return pointer to 1st page */
355 return pp - (n_pages - 1);
356
357error:
Damjan Marion801c7012019-10-30 18:07:35 +0100358 if (va != MAP_FAILED)
359 {
360 /* unmap & reserve */
361 munmap (va, size);
362 mmap (va, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
363 -1, 0);
364 }
Damjan Marion68b4da62018-09-30 18:26:20 +0200365 if (a->fd != -1)
366 close (a->fd);
367 return 0;
368}
369
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200370__clib_export void *
Damjan Marion68b4da62018-09-30 18:26:20 +0200371clib_pmalloc_create_shared_arena (clib_pmalloc_main_t * pm, char *name,
Damjan Marion567e61d2018-10-24 17:08:26 +0200372 uword size, u32 log2_page_sz, u32 numa_node)
Damjan Marion68b4da62018-09-30 18:26:20 +0200373{
374 clib_pmalloc_arena_t *a;
375 clib_pmalloc_page_t *pp;
Damjan Marion567e61d2018-10-24 17:08:26 +0200376 u32 n_pages;
377
378 clib_error_free (pm->error);
379
380 if (log2_page_sz == 0)
381 log2_page_sz = pm->def_log2_page_sz;
382 else if (log2_page_sz != pm->def_log2_page_sz &&
Damjan Marion6bfd0762020-09-11 22:16:53 +0200383 log2_page_sz != clib_mem_get_log2_page_size ())
Damjan Marion567e61d2018-10-24 17:08:26 +0200384 {
385 pm->error = clib_error_create ("unsupported page size (%uKB)",
386 1 << (log2_page_sz - 10));
387 return 0;
388 }
389
390 n_pages = pmalloc_size2pages (size, pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200391
392 if (n_pages + vec_len (pm->pages) > pm->max_pages)
393 return 0;
394
Damjan Marionf8cb7012020-10-09 17:16:55 +0200395 if (numa_node == CLIB_PMALLOC_NUMA_LOCAL)
396 numa_node = clib_get_current_numa_node ();
Damjan Marion68b4da62018-09-30 18:26:20 +0200397
398 pool_get (pm->arenas, a);
399 a->index = a - pm->arenas;
400 a->name = format (0, "%s%c", name, 0);
401 a->numa_node = numa_node;
402 a->flags = CLIB_PMALLOC_ARENA_F_SHARED_MEM;
Damjan Marion567e61d2018-10-24 17:08:26 +0200403 a->log2_subpage_sz = log2_page_sz;
404 a->subpages_per_page = 1U << (pm->def_log2_page_sz - log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200405
406 if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
407 {
408 vec_free (a->name);
409 memset (a, 0, sizeof (*a));
410 pool_put (pm->arenas, a);
411 return 0;
412 }
413
Kingwel Xie5efaeee2018-11-10 02:56:00 -0500414 return pm->base + ((uword) pp->index << pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200415}
416
417static inline void *
418clib_pmalloc_alloc_inline (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
419 uword size, uword align, u32 numa_node)
420{
421 clib_pmalloc_page_t *pp;
422 u32 n_blocks, block_align, *page_index;
423
424 ASSERT (is_pow2 (align));
425
Damjan Marionf8cb7012020-10-09 17:16:55 +0200426 if (numa_node == CLIB_PMALLOC_NUMA_LOCAL)
427 numa_node = clib_get_current_numa_node ();
Damjan Marion68b4da62018-09-30 18:26:20 +0200428
429 if (a == 0)
430 {
Damjan Marion567e61d2018-10-24 17:08:26 +0200431 if (size > 1ULL << pm->def_log2_page_sz)
432 return 0;
433
Damjan Marion68b4da62018-09-30 18:26:20 +0200434 vec_validate_init_empty (pm->default_arena_for_numa_node,
435 numa_node, ~0);
436 if (pm->default_arena_for_numa_node[numa_node] == ~0)
437 {
438 pool_get (pm->arenas, a);
439 pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
440 a->name = format (0, "default-numa-%u%c", numa_node, 0);
441 a->numa_node = numa_node;
Damjan Marion567e61d2018-10-24 17:08:26 +0200442 a->log2_subpage_sz = pm->def_log2_page_sz;
443 a->subpages_per_page = 1;
Damjan Marion68b4da62018-09-30 18:26:20 +0200444 }
445 else
446 a = pool_elt_at_index (pm->arenas,
447 pm->default_arena_for_numa_node[numa_node]);
448 }
Damjan Marion567e61d2018-10-24 17:08:26 +0200449 else if (size > 1ULL << a->log2_subpage_sz)
450 return 0;
Damjan Marion68b4da62018-09-30 18:26:20 +0200451
452 n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
453 block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
454
455 vec_foreach (page_index, a->page_indices)
456 {
457 pp = vec_elt_at_index (pm->pages, *page_index);
458 void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
459 numa_node);
460
461 if (rv)
462 return rv;
463 }
464
465 if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
466 (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
467 return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
468
469 return 0;
470}
471
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200472__clib_export void *
Damjan Marion68b4da62018-09-30 18:26:20 +0200473clib_pmalloc_alloc_aligned_on_numa (clib_pmalloc_main_t * pm, uword size,
474 uword align, u32 numa_node)
475{
476 return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
477}
478
479void *
480clib_pmalloc_alloc_aligned (clib_pmalloc_main_t * pm, uword size, uword align)
481{
482 return clib_pmalloc_alloc_inline (pm, 0, size, align,
483 CLIB_PMALLOC_NUMA_LOCAL);
484}
485
Damjan Marion4a251d02021-05-06 17:28:12 +0200486__clib_export void *
487clib_pmalloc_alloc_from_arena (clib_pmalloc_main_t *pm, void *arena_va,
Damjan Marion68b4da62018-09-30 18:26:20 +0200488 uword size, uword align)
489{
490 clib_pmalloc_arena_t *a = clib_pmalloc_get_arena (pm, arena_va);
491 return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
492}
493
Damjan Marion567e61d2018-10-24 17:08:26 +0200494static inline int
495pmalloc_chunks_mergeable (clib_pmalloc_arena_t * a, clib_pmalloc_page_t * pp,
496 u32 ci1, u32 ci2)
497{
498 clib_pmalloc_chunk_t *c1, *c2;
499
500 if (ci1 == ~0 || ci2 == ~0)
501 return 0;
502
503 c1 = get_chunk (pp, ci1);
504 c2 = get_chunk (pp, ci2);
505
506 if (c1->used || c2->used)
507 return 0;
508
509 if (c1->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ) !=
510 c2->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ))
511 return 0;
512
513 return 1;
514}
515
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200516__clib_export void
Damjan Marion68b4da62018-09-30 18:26:20 +0200517clib_pmalloc_free (clib_pmalloc_main_t * pm, void *va)
518{
519 clib_pmalloc_page_t *pp;
520 clib_pmalloc_chunk_t *c;
Damjan Marion567e61d2018-10-24 17:08:26 +0200521 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +0200522 uword *p;
523 u32 chunk_index, page_index;
524
525 p = hash_get (pm->chunk_index_by_va, pointer_to_uword (va));
526
527 if (p == 0)
528 os_panic ();
529
530 chunk_index = p[0];
531 page_index = clib_pmalloc_get_page_index (pm, va);
532 hash_unset (pm->chunk_index_by_va, pointer_to_uword (va));
533
534 pp = vec_elt_at_index (pm->pages, page_index);
535 c = pool_elt_at_index (pp->chunks, chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200536 a = pool_elt_at_index (pm->arenas, pp->arena_index);
Damjan Marion68b4da62018-09-30 18:26:20 +0200537 c->used = 0;
538 pp->n_free_blocks += c->size;
539 pp->n_free_chunks++;
540
541 /* merge with next if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200542 if (pmalloc_chunks_mergeable (a, pp, chunk_index, c->next))
Damjan Marion68b4da62018-09-30 18:26:20 +0200543 {
544 clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
545 c->size += next->size;
546 c->next = next->next;
547 if (next->next != ~0)
548 get_chunk (pp, next->next)->prev = chunk_index;
549 memset (next, 0, sizeof (*next));
550 pool_put (pp->chunks, next);
551 pp->n_free_chunks--;
552 }
553
554 /* merge with prev if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200555 if (pmalloc_chunks_mergeable (a, pp, c->prev, chunk_index))
Damjan Marion68b4da62018-09-30 18:26:20 +0200556 {
557 clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
558 prev->size += c->size;
559 prev->next = c->next;
560 if (c->next != ~0)
561 get_chunk (pp, c->next)->prev = c->prev;
562 memset (c, 0, sizeof (*c));
563 pool_put (pp->chunks, c);
564 pp->n_free_chunks--;
565 }
566}
567
568static u8 *
569format_pmalloc_page (u8 * s, va_list * va)
570{
571 clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
572 int verbose = va_arg (*va, int);
573 u32 indent = format_get_indent (s);
574
Damjan Marion68b4da62018-09-30 18:26:20 +0200575 if (pp->chunks == 0)
576 return s;
577
578 s = format (s, "free %u chunks %u free-chunks %d ",
579 (pp->n_free_blocks) << PMALLOC_LOG2_BLOCK_SZ,
580 pool_elts (pp->chunks), pp->n_free_chunks);
581
582 if (verbose >= 2)
583 {
584 clib_pmalloc_chunk_t *c;
585 c = pool_elt_at_index (pp->chunks, pp->first_chunk_index);
586 s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
587 format_white_space, indent + 2,
588 "chunk offset", "size", "used", "index", "prev", "next");
589 while (1)
590 {
591 s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
592 format_white_space, indent + 2,
593 c->start << PMALLOC_LOG2_BLOCK_SZ,
594 c->size << PMALLOC_LOG2_BLOCK_SZ,
595 c->used ? "yes" : "no",
596 c - pp->chunks, c->prev, c->next);
597 if (c->next == ~0)
598 break;
599 c = pool_elt_at_index (pp->chunks, c->next);
600 }
601 }
602 return s;
603}
604
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200605__clib_export u8 *
Damjan Marion68b4da62018-09-30 18:26:20 +0200606format_pmalloc (u8 * s, va_list * va)
607{
608 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
609 int verbose = va_arg (*va, int);
610 u32 indent = format_get_indent (s);
611
612 clib_pmalloc_page_t *pp;
613 clib_pmalloc_arena_t *a;
614
Damjan Marion567e61d2018-10-24 17:08:26 +0200615 s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
Damjan Marionc04e2b02018-10-25 15:56:04 +0200616 "lookup-page-size %U%s", vec_len (pm->pages), pm->max_pages,
Damjan Marion567e61d2018-10-24 17:08:26 +0200617 format_log2_page_size, pm->def_log2_page_sz,
Damjan Marionc04e2b02018-10-25 15:56:04 +0200618 format_log2_page_size, pm->lookup_log2_page_sz,
619 pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP ? " no-pagemap" : "");
Damjan Marion567e61d2018-10-24 17:08:26 +0200620
Damjan Marion68b4da62018-09-30 18:26:20 +0200621
622 if (verbose >= 2)
623 s = format (s, " va-start %p", pm->base);
624
625 if (pm->error)
626 s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
627 format_clib_error, pm->error);
628
629
630 /* *INDENT-OFF* */
Damjan Marionb2c31b62020-12-13 21:47:40 +0100631 pool_foreach (a, pm->arenas)
Damjan Marion68b4da62018-09-30 18:26:20 +0200632 {
633 u32 *page_index;
Damjan Marion567e61d2018-10-24 17:08:26 +0200634 s = format (s, "\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
635 format_white_space, indent + 2, a->name,
636 vec_len (a->page_indices), format_log2_page_size,
637 a->log2_subpage_sz, a->numa_node);
Damjan Marion68b4da62018-09-30 18:26:20 +0200638 if (a->fd != -1)
639 s = format (s, " shared fd %d", a->fd);
640 if (verbose >= 1)
641 vec_foreach (page_index, a->page_indices)
642 {
643 pp = vec_elt_at_index (pm->pages, *page_index);
644 s = format (s, "\n%U%U", format_white_space, indent + 4,
645 format_pmalloc_page, pp, verbose);
646 }
Damjan Marionb2c31b62020-12-13 21:47:40 +0100647 }
Damjan Marion68b4da62018-09-30 18:26:20 +0200648 /* *INDENT-ON* */
649
650 return s;
651}
652
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200653__clib_export u8 *
Mohsin Kazmi6ec99c32018-11-07 16:55:18 +0100654format_pmalloc_map (u8 * s, va_list * va)
655{
656 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
657
658 u32 index;
659 s = format (s, "%16s %13s %8s", "virtual-addr", "physical-addr", "size");
660 vec_foreach_index (index, pm->lookup_table)
661 {
662 uword *lookup_val, pa, va;
663 lookup_val = vec_elt_at_index (pm->lookup_table, index);
Kingwel Xiedbc34b82018-11-11 22:55:58 -0500664 va =
665 pointer_to_uword (pm->base) +
666 ((uword) index << pm->lookup_log2_page_sz);
Mohsin Kazmi6ec99c32018-11-07 16:55:18 +0100667 pa = va - *lookup_val;
668 s =
669 format (s, "\n %16p %13p %8U", uword_to_pointer (va, u64),
670 uword_to_pointer (pa, u64), format_log2_page_size,
671 pm->lookup_log2_page_sz);
672 }
673 return s;
674}
675
Damjan Marion68b4da62018-09-30 18:26:20 +0200676/*
677 * fd.io coding-style-patch-verification: ON
678 *
679 * Local Variables:
680 * eval: (c-set-style "gnu")
681 * End:
682 */