blob: d9e50d15733b0ce75b386da3276ffad184f02267 [file] [log] [blame]
Damjan Marion68b4da62018-09-30 18:26:20 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <fcntl.h>
Artem Belovf6defa12019-02-26 01:47:34 +000021#include <unistd.h>
Damjan Marion68b4da62018-09-30 18:26:20 +020022#include <linux/mempolicy.h>
23#include <linux/memfd.h>
Damjan Marion1ee346a2019-03-18 17:06:51 +010024#include <sched.h>
Damjan Marion68b4da62018-09-30 18:26:20 +020025
26#include <vppinfra/format.h>
27#include <vppinfra/linux/syscall.h>
28#include <vppinfra/linux/sysfs.h>
29#include <vppinfra/mem.h>
30#include <vppinfra/hash.h>
31#include <vppinfra/pmalloc.h>
32
33#if __SIZEOF_POINTER__ >= 8
34#define DEFAULT_RESERVED_MB 16384
35#else
36#define DEFAULT_RESERVED_MB 256
37#endif
38
39static inline clib_pmalloc_chunk_t *
40get_chunk (clib_pmalloc_page_t * pp, u32 index)
41{
42 return pool_elt_at_index (pp->chunks, index);
43}
44
Damjan Marion567e61d2018-10-24 17:08:26 +020045static inline uword
46pmalloc_size2pages (uword size, u32 log2_page_sz)
47{
48 return round_pow2 (size, 1ULL << log2_page_sz) >> log2_page_sz;
49}
50
Damjan Marion68b4da62018-09-30 18:26:20 +020051static inline int
52pmalloc_validate_numa_node (u32 * numa_node)
53{
54 if (*numa_node == CLIB_PMALLOC_NUMA_LOCAL)
55 {
56 u32 cpu;
Damjan Marion1ee346a2019-03-18 17:06:51 +010057 if (getcpu (&cpu, numa_node) != 0)
Damjan Marion68b4da62018-09-30 18:26:20 +020058 return 1;
59 }
60 return 0;
61}
62
63int
Damjan Marion5a6c8092019-02-21 14:44:59 +010064clib_pmalloc_init (clib_pmalloc_main_t * pm, uword base_addr, uword size)
Damjan Marion68b4da62018-09-30 18:26:20 +020065{
Damjan Marion68b4da62018-09-30 18:26:20 +020066 uword off, pagesize;
Damjan Marionc04e2b02018-10-25 15:56:04 +020067 u64 *pt = 0;
Damjan Marion5a6c8092019-02-21 14:44:59 +010068 int mmap_flags;
Damjan Marion68b4da62018-09-30 18:26:20 +020069
70 ASSERT (pm->error == 0);
71
Damjan Marion9787f5f2018-10-24 12:56:32 +020072 pagesize = clib_mem_get_default_hugepage_size ();
Damjan Marion567e61d2018-10-24 17:08:26 +020073 pm->def_log2_page_sz = min_log2 (pagesize);
74 pm->sys_log2_page_sz = min_log2 (sysconf (_SC_PAGESIZE));
75 pm->lookup_log2_page_sz = pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020076
Damjan Marionc04e2b02018-10-25 15:56:04 +020077 /* check if pagemap is accessible */
78 pt = clib_mem_vm_get_paddr (&pt, pm->sys_log2_page_sz, 1);
79 if (pt == 0 || pt[0] == 0)
80 pm->flags |= CLIB_PMALLOC_F_NO_PAGEMAP;
81
Damjan Marion68b4da62018-09-30 18:26:20 +020082 size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
83 size = round_pow2 (size, pagesize);
84
Damjan Marion567e61d2018-10-24 17:08:26 +020085 pm->max_pages = size >> pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020086
87 /* reserve VA space for future growth */
Damjan Marion5a6c8092019-02-21 14:44:59 +010088 mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
89
90 if (base_addr)
91 mmap_flags |= MAP_FIXED;
92
93 pm->base = mmap (uword_to_pointer (base_addr, void *), size + pagesize,
94 PROT_NONE, mmap_flags, -1, 0);
Damjan Marion68b4da62018-09-30 18:26:20 +020095
96 if (pm->base == MAP_FAILED)
97 {
98 pm->error = clib_error_return_unix (0, "failed to reserve %u pages");
99 return -1;
100 }
101
102 off = round_pow2 (pointer_to_uword (pm->base), pagesize) -
103 pointer_to_uword (pm->base);
104
105 /* trim start and end of reservation to be page aligned */
106 if (off)
107 {
108 munmap (pm->base, off);
109 pm->base += off;
110 }
111
Damjan Marion878b65a2018-10-26 10:29:35 +0200112 munmap (pm->base + ((uword) pm->max_pages * pagesize), pagesize - off);
Damjan Marion68b4da62018-09-30 18:26:20 +0200113 return 0;
114}
115
116static inline void *
117alloc_chunk_from_page (clib_pmalloc_main_t * pm, clib_pmalloc_page_t * pp,
118 u32 n_blocks, u32 block_align, u32 numa_node)
119{
Damjan Marion567e61d2018-10-24 17:08:26 +0200120 clib_pmalloc_chunk_t *c = 0;
121 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +0200122 void *va;
123 u32 off;
124 u32 alloc_chunk_index;
125
Damjan Marion567e61d2018-10-24 17:08:26 +0200126 a = pool_elt_at_index (pm->arenas, pp->arena_index);
127
Damjan Marion68b4da62018-09-30 18:26:20 +0200128 if (pp->chunks == 0)
129 {
Damjan Marion567e61d2018-10-24 17:08:26 +0200130 u32 i, start = 0, prev = ~0;
131
132 for (i = 0; i < a->subpages_per_page; i++)
133 {
134 pool_get (pp->chunks, c);
135 c->start = start;
136 c->prev = prev;
137 c->size = pp->n_free_blocks / a->subpages_per_page;
138 start += c->size;
139 if (prev == ~0)
140 pp->first_chunk_index = c - pp->chunks;
141 else
142 pp->chunks[prev].next = c - pp->chunks;
143 prev = c - pp->chunks;
144 }
145 c->next = ~0;
146 pp->n_free_chunks = a->subpages_per_page;
Damjan Marion68b4da62018-09-30 18:26:20 +0200147 }
148
Damjan Marion78c0ff72019-01-23 12:50:24 +0100149 if (pp->n_free_blocks < n_blocks)
150 return 0;
151
Damjan Marion68b4da62018-09-30 18:26:20 +0200152 alloc_chunk_index = pp->first_chunk_index;
153
154next_chunk:
155 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
156 off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
157
158 if (c->used || n_blocks + off > c->size)
159 {
160 if (c->next == ~0)
161 return 0;
162 alloc_chunk_index = c->next;
163 goto next_chunk;
164 }
165
166 /* if alignment is needed create new empty chunk */
167 if (off)
168 {
169 u32 offset_chunk_index;
170 clib_pmalloc_chunk_t *co;
171 pool_get (pp->chunks, c);
172 pp->n_free_chunks++;
173 offset_chunk_index = alloc_chunk_index;
174 alloc_chunk_index = c - pp->chunks;
175
176 co = pool_elt_at_index (pp->chunks, offset_chunk_index);
177 c->size = co->size - off;
178 c->next = co->next;
179 c->start = co->start + off;
180 c->prev = offset_chunk_index;
181 co->size = off;
182 co->next = alloc_chunk_index;
183 }
184
185 c->used = 1;
186 if (c->size > n_blocks)
187 {
188 u32 tail_chunk_index;
189 clib_pmalloc_chunk_t *ct;
190 pool_get (pp->chunks, ct);
191 pp->n_free_chunks++;
192 tail_chunk_index = ct - pp->chunks;
193 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
194 ct->size = c->size - n_blocks;
195 ct->next = c->next;
196 ct->prev = alloc_chunk_index;
197 ct->start = c->start + n_blocks;
198
199 c->size = n_blocks;
200 c->next = tail_chunk_index;
201 if (ct->next != ~0)
202 pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
203 }
204 else if (c->next != ~0)
205 pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
206
207 c = get_chunk (pp, alloc_chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200208 va = pm->base + ((pp - pm->pages) << pm->def_log2_page_sz) +
Damjan Marion68b4da62018-09-30 18:26:20 +0200209 (c->start << PMALLOC_LOG2_BLOCK_SZ);
210 hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
211 pp->n_free_blocks -= n_blocks;
212 pp->n_free_chunks--;
213 return va;
214}
215
Damjan Marion567e61d2018-10-24 17:08:26 +0200216static void
217pmalloc_update_lookup_table (clib_pmalloc_main_t * pm, u32 first, u32 count)
218{
219 uword seek, va, pa, p;
220 int fd;
221 u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
222
223 vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
224 elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
225
Dave Barach96e2d442018-11-14 11:42:03 -0500226 p = (uword) first *elts_per_page;
Damjan Marionc04e2b02018-10-25 15:56:04 +0200227 if (pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP)
228 {
Damjan Marion878b65a2018-10-26 10:29:35 +0200229 while (p < (uword) elts_per_page * count)
Damjan Marionc04e2b02018-10-25 15:56:04 +0200230 {
231 pm->lookup_table[p] = pointer_to_uword (pm->base) +
232 (p << pm->lookup_log2_page_sz);
233 p++;
234 }
235 return;
236 }
237
238 fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
Damjan Marion878b65a2018-10-26 10:29:35 +0200239 while (p < (uword) elts_per_page * count)
Damjan Marion567e61d2018-10-24 17:08:26 +0200240 {
241 va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
Damjan Marion878b65a2018-10-26 10:29:35 +0200242 pa = 0;
Damjan Marion567e61d2018-10-24 17:08:26 +0200243 seek = (va >> pm->sys_log2_page_sz) * sizeof (pa);
244 if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
245 read (fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
246 pa & (1ULL << 63) /* page present bit */ )
247 {
248 pa = (pa & pow2_mask (55)) << pm->sys_log2_page_sz;
249 }
250 pm->lookup_table[p] = va - pa;
251 p++;
252 }
253
254 if (fd != -1)
255 close (fd);
256}
257
Damjan Marion68b4da62018-09-30 18:26:20 +0200258static inline clib_pmalloc_page_t *
259pmalloc_map_pages (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
260 u32 numa_node, u32 n_pages)
261{
262 clib_pmalloc_page_t *pp = 0;
Damjan Marion567e61d2018-10-24 17:08:26 +0200263 int status, rv, i, mmap_flags;
Damjan Marion68b4da62018-09-30 18:26:20 +0200264 void *va;
265 int old_mpol = -1;
266 long unsigned int mask[16] = { 0 };
267 long unsigned int old_mask[16] = { 0 };
Dave Baracha2aefef2019-02-27 12:36:28 -0500268 uword page_size = 1ULL << a->log2_subpage_sz;
Damjan Marion567e61d2018-10-24 17:08:26 +0200269 uword size = (uword) n_pages << pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +0200270
271 clib_error_free (pm->error);
272
273 if (pm->max_pages <= vec_len (pm->pages))
274 {
275 pm->error = clib_error_return (0, "maximum number of pages reached");
276 return 0;
277 }
278
Damjan Marion567e61d2018-10-24 17:08:26 +0200279 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
280 {
281 pm->error = clib_sysfs_prealloc_hugepages (numa_node,
282 a->log2_subpage_sz, n_pages);
Damjan Marion68b4da62018-09-30 18:26:20 +0200283
Damjan Marion567e61d2018-10-24 17:08:26 +0200284 if (pm->error)
285 return 0;
286 }
Damjan Marion68b4da62018-09-30 18:26:20 +0200287
288 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1, 0, 0);
289 /* failure to get mempolicy means we can only proceed with numa 0 maps */
290 if (rv == -1 && numa_node != 0)
291 {
292 pm->error = clib_error_return_unix (0, "failed to get mempolicy");
293 return 0;
294 }
295
296 mask[0] = 1 << numa_node;
297 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
298 if (rv == -1 && numa_node != 0)
299 {
300 pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
301 "numa node %u", numa_node);
302 return 0;
303 }
304
Damjan Marion54e8e392018-11-07 17:55:26 +0100305 mmap_flags = MAP_FIXED;
Damjan Marionc04e2b02018-10-25 15:56:04 +0200306
307 if ((pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP) == 0)
308 mmap_flags |= MAP_LOCKED;
Damjan Marion567e61d2018-10-24 17:08:26 +0200309
Damjan Marion68b4da62018-09-30 18:26:20 +0200310 if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
311 {
312 mmap_flags |= MAP_SHARED;
Damjan Marion8ebd7922018-11-28 10:46:03 +0100313 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
Damjan Marion567e61d2018-10-24 17:08:26 +0200314 pm->error = clib_mem_create_hugetlb_fd ((char *) a->name, &a->fd);
315 else
316 pm->error = clib_mem_create_fd ((char *) a->name, &a->fd);
Damjan Marion68b4da62018-09-30 18:26:20 +0200317 if (a->fd == -1)
318 goto error;
Damjan Marion54e8e392018-11-07 17:55:26 +0100319 if ((ftruncate (a->fd, size)) == -1)
320 goto error;
Damjan Marion68b4da62018-09-30 18:26:20 +0200321 }
322 else
323 {
Damjan Marion8ebd7922018-11-28 10:46:03 +0100324 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
325 mmap_flags |= MAP_HUGETLB;
326
Damjan Marion54e8e392018-11-07 17:55:26 +0100327 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
Damjan Marion68b4da62018-09-30 18:26:20 +0200328 a->fd = -1;
329 }
330
Damjan Marion567e61d2018-10-24 17:08:26 +0200331 va = pm->base + (((uword) vec_len (pm->pages)) << pm->def_log2_page_sz);
332 if (mmap (va, size, PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0) ==
333 MAP_FAILED)
Damjan Marion68b4da62018-09-30 18:26:20 +0200334 {
335 pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
336 "fd %d numa %d flags 0x%x", n_pages,
337 va, a->fd, numa_node, mmap_flags);
338 goto error;
339 }
340
Artem Belovf6defa12019-02-26 01:47:34 +0000341 /* Check if huge page is not allocated,
342 wrong allocation will generate the SIGBUS */
343 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
344 {
345 for (int i = 0; i < n_pages; i++)
346 {
347 unsigned char flag;
348 mincore (va + i * page_size, 1, &flag);
349 // flag is 1 if the page was successfully allocated and in memory
350 if (!flag)
351 {
352 pm->error =
353 clib_error_return_unix (0,
354 "Unable to fulfill huge page allocation request");
355 goto error;
356 }
357 }
358 }
359
Damjan Marion567e61d2018-10-24 17:08:26 +0200360 clib_memset (va, 0, size);
361
Damjan Marion68b4da62018-09-30 18:26:20 +0200362 rv = set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1);
363 if (rv == -1 && numa_node != 0)
364 {
365 pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
366 goto error;
367 }
368
369 /* we tolerate move_pages failure only if request os for numa node 0
370 to support non-numa kernels */
371 rv = move_pages (0, 1, &va, 0, &status, 0);
372 if ((rv == 0 && status != numa_node) || (rv != 0 && numa_node != 0))
373 {
374 pm->error = rv == -1 ?
375 clib_error_return_unix (0, "page allocated on wrong node, numa node "
376 "%u status %d", numa_node, status) :
377 clib_error_return (0, "page allocated on wrong node, numa node "
378 "%u status %d", numa_node, status);
379
380 /* unmap & reesrve */
Damjan Marion567e61d2018-10-24 17:08:26 +0200381 munmap (va, size);
382 mmap (va, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
383 -1, 0);
Damjan Marion68b4da62018-09-30 18:26:20 +0200384 goto error;
385 }
386
Damjan Marion68b4da62018-09-30 18:26:20 +0200387 for (i = 0; i < n_pages; i++)
388 {
Damjan Marion68b4da62018-09-30 18:26:20 +0200389 vec_add2 (pm->pages, pp, 1);
Damjan Marion567e61d2018-10-24 17:08:26 +0200390 pp->n_free_blocks = 1 << (pm->def_log2_page_sz - PMALLOC_LOG2_BLOCK_SZ);
Damjan Marion68b4da62018-09-30 18:26:20 +0200391 pp->index = pp - pm->pages;
392 pp->arena_index = a->index;
Damjan Marion68b4da62018-09-30 18:26:20 +0200393 vec_add1 (a->page_indices, pp->index);
394 a->n_pages++;
Damjan Marion68b4da62018-09-30 18:26:20 +0200395 }
396
Damjan Marion567e61d2018-10-24 17:08:26 +0200397
398 /* if new arena is using smaller page size, we need to rebuild whole
399 lookup table */
400 if (a->log2_subpage_sz < pm->lookup_log2_page_sz)
401 {
402 pm->lookup_log2_page_sz = a->log2_subpage_sz;
403 pmalloc_update_lookup_table (pm, vec_len (pm->pages) - n_pages,
404 n_pages);
405 }
406 else
407 pmalloc_update_lookup_table (pm, 0, vec_len (pm->pages));
Damjan Marion68b4da62018-09-30 18:26:20 +0200408
409 /* return pointer to 1st page */
410 return pp - (n_pages - 1);
411
412error:
413 if (a->fd != -1)
414 close (a->fd);
415 return 0;
416}
417
418void *
419clib_pmalloc_create_shared_arena (clib_pmalloc_main_t * pm, char *name,
Damjan Marion567e61d2018-10-24 17:08:26 +0200420 uword size, u32 log2_page_sz, u32 numa_node)
Damjan Marion68b4da62018-09-30 18:26:20 +0200421{
422 clib_pmalloc_arena_t *a;
423 clib_pmalloc_page_t *pp;
Damjan Marion567e61d2018-10-24 17:08:26 +0200424 u32 n_pages;
425
426 clib_error_free (pm->error);
427
428 if (log2_page_sz == 0)
429 log2_page_sz = pm->def_log2_page_sz;
430 else if (log2_page_sz != pm->def_log2_page_sz &&
431 log2_page_sz != pm->sys_log2_page_sz)
432 {
433 pm->error = clib_error_create ("unsupported page size (%uKB)",
434 1 << (log2_page_sz - 10));
435 return 0;
436 }
437
438 n_pages = pmalloc_size2pages (size, pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200439
440 if (n_pages + vec_len (pm->pages) > pm->max_pages)
441 return 0;
442
443 if (pmalloc_validate_numa_node (&numa_node))
444 return 0;
445
446 pool_get (pm->arenas, a);
447 a->index = a - pm->arenas;
448 a->name = format (0, "%s%c", name, 0);
449 a->numa_node = numa_node;
450 a->flags = CLIB_PMALLOC_ARENA_F_SHARED_MEM;
Damjan Marion567e61d2018-10-24 17:08:26 +0200451 a->log2_subpage_sz = log2_page_sz;
452 a->subpages_per_page = 1U << (pm->def_log2_page_sz - log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200453
454 if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
455 {
456 vec_free (a->name);
457 memset (a, 0, sizeof (*a));
458 pool_put (pm->arenas, a);
459 return 0;
460 }
461
Kingwel Xie5efaeee2018-11-10 02:56:00 -0500462 return pm->base + ((uword) pp->index << pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200463}
464
465static inline void *
466clib_pmalloc_alloc_inline (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
467 uword size, uword align, u32 numa_node)
468{
469 clib_pmalloc_page_t *pp;
470 u32 n_blocks, block_align, *page_index;
471
472 ASSERT (is_pow2 (align));
473
474 if (pmalloc_validate_numa_node (&numa_node))
475 return 0;
476
477 if (a == 0)
478 {
Damjan Marion567e61d2018-10-24 17:08:26 +0200479 if (size > 1ULL << pm->def_log2_page_sz)
480 return 0;
481
Damjan Marion68b4da62018-09-30 18:26:20 +0200482 vec_validate_init_empty (pm->default_arena_for_numa_node,
483 numa_node, ~0);
484 if (pm->default_arena_for_numa_node[numa_node] == ~0)
485 {
486 pool_get (pm->arenas, a);
487 pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
488 a->name = format (0, "default-numa-%u%c", numa_node, 0);
489 a->numa_node = numa_node;
Damjan Marion567e61d2018-10-24 17:08:26 +0200490 a->log2_subpage_sz = pm->def_log2_page_sz;
491 a->subpages_per_page = 1;
Damjan Marion68b4da62018-09-30 18:26:20 +0200492 }
493 else
494 a = pool_elt_at_index (pm->arenas,
495 pm->default_arena_for_numa_node[numa_node]);
496 }
Damjan Marion567e61d2018-10-24 17:08:26 +0200497 else if (size > 1ULL << a->log2_subpage_sz)
498 return 0;
Damjan Marion68b4da62018-09-30 18:26:20 +0200499
500 n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
501 block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
502
503 vec_foreach (page_index, a->page_indices)
504 {
505 pp = vec_elt_at_index (pm->pages, *page_index);
506 void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
507 numa_node);
508
509 if (rv)
510 return rv;
511 }
512
513 if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
514 (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
515 return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
516
517 return 0;
518}
519
520void *
521clib_pmalloc_alloc_aligned_on_numa (clib_pmalloc_main_t * pm, uword size,
522 uword align, u32 numa_node)
523{
524 return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
525}
526
527void *
528clib_pmalloc_alloc_aligned (clib_pmalloc_main_t * pm, uword size, uword align)
529{
530 return clib_pmalloc_alloc_inline (pm, 0, size, align,
531 CLIB_PMALLOC_NUMA_LOCAL);
532}
533
534void *
535clib_pmalloc_alloc_from_arena (clib_pmalloc_main_t * pm, void *arena_va,
536 uword size, uword align)
537{
538 clib_pmalloc_arena_t *a = clib_pmalloc_get_arena (pm, arena_va);
539 return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
540}
541
Damjan Marion567e61d2018-10-24 17:08:26 +0200542static inline int
543pmalloc_chunks_mergeable (clib_pmalloc_arena_t * a, clib_pmalloc_page_t * pp,
544 u32 ci1, u32 ci2)
545{
546 clib_pmalloc_chunk_t *c1, *c2;
547
548 if (ci1 == ~0 || ci2 == ~0)
549 return 0;
550
551 c1 = get_chunk (pp, ci1);
552 c2 = get_chunk (pp, ci2);
553
554 if (c1->used || c2->used)
555 return 0;
556
557 if (c1->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ) !=
558 c2->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ))
559 return 0;
560
561 return 1;
562}
563
Damjan Marion68b4da62018-09-30 18:26:20 +0200564void
565clib_pmalloc_free (clib_pmalloc_main_t * pm, void *va)
566{
567 clib_pmalloc_page_t *pp;
568 clib_pmalloc_chunk_t *c;
Damjan Marion567e61d2018-10-24 17:08:26 +0200569 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +0200570 uword *p;
571 u32 chunk_index, page_index;
572
573 p = hash_get (pm->chunk_index_by_va, pointer_to_uword (va));
574
575 if (p == 0)
576 os_panic ();
577
578 chunk_index = p[0];
579 page_index = clib_pmalloc_get_page_index (pm, va);
580 hash_unset (pm->chunk_index_by_va, pointer_to_uword (va));
581
582 pp = vec_elt_at_index (pm->pages, page_index);
583 c = pool_elt_at_index (pp->chunks, chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200584 a = pool_elt_at_index (pm->arenas, pp->arena_index);
Damjan Marion68b4da62018-09-30 18:26:20 +0200585 c->used = 0;
586 pp->n_free_blocks += c->size;
587 pp->n_free_chunks++;
588
589 /* merge with next if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200590 if (pmalloc_chunks_mergeable (a, pp, chunk_index, c->next))
Damjan Marion68b4da62018-09-30 18:26:20 +0200591 {
592 clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
593 c->size += next->size;
594 c->next = next->next;
595 if (next->next != ~0)
596 get_chunk (pp, next->next)->prev = chunk_index;
597 memset (next, 0, sizeof (*next));
598 pool_put (pp->chunks, next);
599 pp->n_free_chunks--;
600 }
601
602 /* merge with prev if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200603 if (pmalloc_chunks_mergeable (a, pp, c->prev, chunk_index))
Damjan Marion68b4da62018-09-30 18:26:20 +0200604 {
605 clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
606 prev->size += c->size;
607 prev->next = c->next;
608 if (c->next != ~0)
609 get_chunk (pp, c->next)->prev = c->prev;
610 memset (c, 0, sizeof (*c));
611 pool_put (pp->chunks, c);
612 pp->n_free_chunks--;
613 }
614}
615
616static u8 *
Damjan Marion567e61d2018-10-24 17:08:26 +0200617format_log2_page_size (u8 * s, va_list * va)
618{
619 u32 log2_page_sz = va_arg (*va, u32);
620
621 if (log2_page_sz >= 30)
622 return format (s, "%uGB", 1 << (log2_page_sz - 30));
623
624 if (log2_page_sz >= 20)
625 return format (s, "%uMB", 1 << (log2_page_sz - 20));
626
627 if (log2_page_sz >= 10)
628 return format (s, "%uKB", 1 << (log2_page_sz - 10));
629
630 return format (s, "%uB", 1 << log2_page_sz);
631}
632
633
634static u8 *
Damjan Marion68b4da62018-09-30 18:26:20 +0200635format_pmalloc_page (u8 * s, va_list * va)
636{
637 clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
638 int verbose = va_arg (*va, int);
639 u32 indent = format_get_indent (s);
640
Damjan Marion68b4da62018-09-30 18:26:20 +0200641 if (pp->chunks == 0)
642 return s;
643
644 s = format (s, "free %u chunks %u free-chunks %d ",
645 (pp->n_free_blocks) << PMALLOC_LOG2_BLOCK_SZ,
646 pool_elts (pp->chunks), pp->n_free_chunks);
647
648 if (verbose >= 2)
649 {
650 clib_pmalloc_chunk_t *c;
651 c = pool_elt_at_index (pp->chunks, pp->first_chunk_index);
652 s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
653 format_white_space, indent + 2,
654 "chunk offset", "size", "used", "index", "prev", "next");
655 while (1)
656 {
657 s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
658 format_white_space, indent + 2,
659 c->start << PMALLOC_LOG2_BLOCK_SZ,
660 c->size << PMALLOC_LOG2_BLOCK_SZ,
661 c->used ? "yes" : "no",
662 c - pp->chunks, c->prev, c->next);
663 if (c->next == ~0)
664 break;
665 c = pool_elt_at_index (pp->chunks, c->next);
666 }
667 }
668 return s;
669}
670
671u8 *
672format_pmalloc (u8 * s, va_list * va)
673{
674 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
675 int verbose = va_arg (*va, int);
676 u32 indent = format_get_indent (s);
677
678 clib_pmalloc_page_t *pp;
679 clib_pmalloc_arena_t *a;
680
Damjan Marion567e61d2018-10-24 17:08:26 +0200681 s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
Damjan Marionc04e2b02018-10-25 15:56:04 +0200682 "lookup-page-size %U%s", vec_len (pm->pages), pm->max_pages,
Damjan Marion567e61d2018-10-24 17:08:26 +0200683 format_log2_page_size, pm->def_log2_page_sz,
Damjan Marionc04e2b02018-10-25 15:56:04 +0200684 format_log2_page_size, pm->lookup_log2_page_sz,
685 pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP ? " no-pagemap" : "");
Damjan Marion567e61d2018-10-24 17:08:26 +0200686
Damjan Marion68b4da62018-09-30 18:26:20 +0200687
688 if (verbose >= 2)
689 s = format (s, " va-start %p", pm->base);
690
691 if (pm->error)
692 s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
693 format_clib_error, pm->error);
694
695
696 /* *INDENT-OFF* */
697 pool_foreach (a, pm->arenas,
698 {
699 u32 *page_index;
Damjan Marion567e61d2018-10-24 17:08:26 +0200700 s = format (s, "\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
701 format_white_space, indent + 2, a->name,
702 vec_len (a->page_indices), format_log2_page_size,
703 a->log2_subpage_sz, a->numa_node);
Damjan Marion68b4da62018-09-30 18:26:20 +0200704 if (a->fd != -1)
705 s = format (s, " shared fd %d", a->fd);
706 if (verbose >= 1)
707 vec_foreach (page_index, a->page_indices)
708 {
709 pp = vec_elt_at_index (pm->pages, *page_index);
710 s = format (s, "\n%U%U", format_white_space, indent + 4,
711 format_pmalloc_page, pp, verbose);
712 }
713 });
714 /* *INDENT-ON* */
715
716 return s;
717}
718
Mohsin Kazmi6ec99c32018-11-07 16:55:18 +0100719u8 *
720format_pmalloc_map (u8 * s, va_list * va)
721{
722 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
723
724 u32 index;
725 s = format (s, "%16s %13s %8s", "virtual-addr", "physical-addr", "size");
726 vec_foreach_index (index, pm->lookup_table)
727 {
728 uword *lookup_val, pa, va;
729 lookup_val = vec_elt_at_index (pm->lookup_table, index);
Kingwel Xiedbc34b82018-11-11 22:55:58 -0500730 va =
731 pointer_to_uword (pm->base) +
732 ((uword) index << pm->lookup_log2_page_sz);
Mohsin Kazmi6ec99c32018-11-07 16:55:18 +0100733 pa = va - *lookup_val;
734 s =
735 format (s, "\n %16p %13p %8U", uword_to_pointer (va, u64),
736 uword_to_pointer (pa, u64), format_log2_page_size,
737 pm->lookup_log2_page_sz);
738 }
739 return s;
740}
741
Damjan Marion68b4da62018-09-30 18:26:20 +0200742/*
743 * fd.io coding-style-patch-verification: ON
744 *
745 * Local Variables:
746 * eval: (c-set-style "gnu")
747 * End:
748 */