blob: a881a65310a518e060274af2e911ffd072881cb3 [file] [log] [blame]
Damjan Marion68b4da62018-09-30 18:26:20 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <fcntl.h>
21#include <linux/mempolicy.h>
22#include <linux/memfd.h>
23
24#include <vppinfra/format.h>
25#include <vppinfra/linux/syscall.h>
26#include <vppinfra/linux/sysfs.h>
27#include <vppinfra/mem.h>
28#include <vppinfra/hash.h>
29#include <vppinfra/pmalloc.h>
30
31#if __SIZEOF_POINTER__ >= 8
32#define DEFAULT_RESERVED_MB 16384
33#else
34#define DEFAULT_RESERVED_MB 256
35#endif
36
37static inline clib_pmalloc_chunk_t *
38get_chunk (clib_pmalloc_page_t * pp, u32 index)
39{
40 return pool_elt_at_index (pp->chunks, index);
41}
42
Damjan Marion567e61d2018-10-24 17:08:26 +020043static inline uword
44pmalloc_size2pages (uword size, u32 log2_page_sz)
45{
46 return round_pow2 (size, 1ULL << log2_page_sz) >> log2_page_sz;
47}
48
Damjan Marion68b4da62018-09-30 18:26:20 +020049static inline int
50pmalloc_validate_numa_node (u32 * numa_node)
51{
52 if (*numa_node == CLIB_PMALLOC_NUMA_LOCAL)
53 {
54 u32 cpu;
55 if (getcpu (&cpu, numa_node, 0) != 0)
56 return 1;
57 }
58 return 0;
59}
60
61int
62clib_pmalloc_init (clib_pmalloc_main_t * pm, uword size)
63{
Damjan Marion68b4da62018-09-30 18:26:20 +020064 uword off, pagesize;
Damjan Marionc04e2b02018-10-25 15:56:04 +020065 u64 *pt = 0;
Damjan Marion68b4da62018-09-30 18:26:20 +020066
67 ASSERT (pm->error == 0);
68
Damjan Marion9787f5f2018-10-24 12:56:32 +020069 pagesize = clib_mem_get_default_hugepage_size ();
Damjan Marion567e61d2018-10-24 17:08:26 +020070 pm->def_log2_page_sz = min_log2 (pagesize);
71 pm->sys_log2_page_sz = min_log2 (sysconf (_SC_PAGESIZE));
72 pm->lookup_log2_page_sz = pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020073
Damjan Marionc04e2b02018-10-25 15:56:04 +020074 /* check if pagemap is accessible */
75 pt = clib_mem_vm_get_paddr (&pt, pm->sys_log2_page_sz, 1);
76 if (pt == 0 || pt[0] == 0)
77 pm->flags |= CLIB_PMALLOC_F_NO_PAGEMAP;
78
Damjan Marion68b4da62018-09-30 18:26:20 +020079 size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
80 size = round_pow2 (size, pagesize);
81
Damjan Marion567e61d2018-10-24 17:08:26 +020082 pm->max_pages = size >> pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020083
84 /* reserve VA space for future growth */
85 pm->base = mmap (0, size + pagesize, PROT_NONE,
86 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
87
88 if (pm->base == MAP_FAILED)
89 {
90 pm->error = clib_error_return_unix (0, "failed to reserve %u pages");
91 return -1;
92 }
93
94 off = round_pow2 (pointer_to_uword (pm->base), pagesize) -
95 pointer_to_uword (pm->base);
96
97 /* trim start and end of reservation to be page aligned */
98 if (off)
99 {
100 munmap (pm->base, off);
101 pm->base += off;
102 }
103
Damjan Marion878b65a2018-10-26 10:29:35 +0200104 munmap (pm->base + ((uword) pm->max_pages * pagesize), pagesize - off);
Damjan Marion68b4da62018-09-30 18:26:20 +0200105 return 0;
106}
107
108static inline void *
109alloc_chunk_from_page (clib_pmalloc_main_t * pm, clib_pmalloc_page_t * pp,
110 u32 n_blocks, u32 block_align, u32 numa_node)
111{
Damjan Marion567e61d2018-10-24 17:08:26 +0200112 clib_pmalloc_chunk_t *c = 0;
113 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +0200114 void *va;
115 u32 off;
116 u32 alloc_chunk_index;
117
Damjan Marion567e61d2018-10-24 17:08:26 +0200118 a = pool_elt_at_index (pm->arenas, pp->arena_index);
119
Damjan Marion68b4da62018-09-30 18:26:20 +0200120 if (pp->chunks == 0)
121 {
Damjan Marion567e61d2018-10-24 17:08:26 +0200122 u32 i, start = 0, prev = ~0;
123
124 for (i = 0; i < a->subpages_per_page; i++)
125 {
126 pool_get (pp->chunks, c);
127 c->start = start;
128 c->prev = prev;
129 c->size = pp->n_free_blocks / a->subpages_per_page;
130 start += c->size;
131 if (prev == ~0)
132 pp->first_chunk_index = c - pp->chunks;
133 else
134 pp->chunks[prev].next = c - pp->chunks;
135 prev = c - pp->chunks;
136 }
137 c->next = ~0;
138 pp->n_free_chunks = a->subpages_per_page;
Damjan Marion68b4da62018-09-30 18:26:20 +0200139 }
140
141 alloc_chunk_index = pp->first_chunk_index;
142
143next_chunk:
144 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
145 off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
146
147 if (c->used || n_blocks + off > c->size)
148 {
149 if (c->next == ~0)
150 return 0;
151 alloc_chunk_index = c->next;
152 goto next_chunk;
153 }
154
155 /* if alignment is needed create new empty chunk */
156 if (off)
157 {
158 u32 offset_chunk_index;
159 clib_pmalloc_chunk_t *co;
160 pool_get (pp->chunks, c);
161 pp->n_free_chunks++;
162 offset_chunk_index = alloc_chunk_index;
163 alloc_chunk_index = c - pp->chunks;
164
165 co = pool_elt_at_index (pp->chunks, offset_chunk_index);
166 c->size = co->size - off;
167 c->next = co->next;
168 c->start = co->start + off;
169 c->prev = offset_chunk_index;
170 co->size = off;
171 co->next = alloc_chunk_index;
172 }
173
174 c->used = 1;
175 if (c->size > n_blocks)
176 {
177 u32 tail_chunk_index;
178 clib_pmalloc_chunk_t *ct;
179 pool_get (pp->chunks, ct);
180 pp->n_free_chunks++;
181 tail_chunk_index = ct - pp->chunks;
182 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
183 ct->size = c->size - n_blocks;
184 ct->next = c->next;
185 ct->prev = alloc_chunk_index;
186 ct->start = c->start + n_blocks;
187
188 c->size = n_blocks;
189 c->next = tail_chunk_index;
190 if (ct->next != ~0)
191 pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
192 }
193 else if (c->next != ~0)
194 pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
195
196 c = get_chunk (pp, alloc_chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200197 va = pm->base + ((pp - pm->pages) << pm->def_log2_page_sz) +
Damjan Marion68b4da62018-09-30 18:26:20 +0200198 (c->start << PMALLOC_LOG2_BLOCK_SZ);
199 hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
200 pp->n_free_blocks -= n_blocks;
201 pp->n_free_chunks--;
202 return va;
203}
204
Damjan Marion567e61d2018-10-24 17:08:26 +0200205static void
206pmalloc_update_lookup_table (clib_pmalloc_main_t * pm, u32 first, u32 count)
207{
208 uword seek, va, pa, p;
209 int fd;
210 u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
211
212 vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
213 elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
214
Damjan Marion567e61d2018-10-24 17:08:26 +0200215 p = first * elts_per_page;
Damjan Marionc04e2b02018-10-25 15:56:04 +0200216 if (pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP)
217 {
Damjan Marion878b65a2018-10-26 10:29:35 +0200218 while (p < (uword) elts_per_page * count)
Damjan Marionc04e2b02018-10-25 15:56:04 +0200219 {
220 pm->lookup_table[p] = pointer_to_uword (pm->base) +
221 (p << pm->lookup_log2_page_sz);
222 p++;
223 }
224 return;
225 }
226
227 fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
Damjan Marion878b65a2018-10-26 10:29:35 +0200228 while (p < (uword) elts_per_page * count)
Damjan Marion567e61d2018-10-24 17:08:26 +0200229 {
230 va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
Damjan Marion878b65a2018-10-26 10:29:35 +0200231 pa = 0;
Damjan Marion567e61d2018-10-24 17:08:26 +0200232 seek = (va >> pm->sys_log2_page_sz) * sizeof (pa);
233 if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
234 read (fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
235 pa & (1ULL << 63) /* page present bit */ )
236 {
237 pa = (pa & pow2_mask (55)) << pm->sys_log2_page_sz;
238 }
239 pm->lookup_table[p] = va - pa;
240 p++;
241 }
242
243 if (fd != -1)
244 close (fd);
245}
246
Damjan Marion68b4da62018-09-30 18:26:20 +0200247static inline clib_pmalloc_page_t *
248pmalloc_map_pages (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
249 u32 numa_node, u32 n_pages)
250{
251 clib_pmalloc_page_t *pp = 0;
Damjan Marion567e61d2018-10-24 17:08:26 +0200252 int status, rv, i, mmap_flags;
Damjan Marion68b4da62018-09-30 18:26:20 +0200253 void *va;
254 int old_mpol = -1;
255 long unsigned int mask[16] = { 0 };
256 long unsigned int old_mask[16] = { 0 };
Damjan Marion567e61d2018-10-24 17:08:26 +0200257 uword size = (uword) n_pages << pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +0200258
259 clib_error_free (pm->error);
260
261 if (pm->max_pages <= vec_len (pm->pages))
262 {
263 pm->error = clib_error_return (0, "maximum number of pages reached");
264 return 0;
265 }
266
Damjan Marion567e61d2018-10-24 17:08:26 +0200267 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
268 {
269 pm->error = clib_sysfs_prealloc_hugepages (numa_node,
270 a->log2_subpage_sz, n_pages);
Damjan Marion68b4da62018-09-30 18:26:20 +0200271
Damjan Marion567e61d2018-10-24 17:08:26 +0200272 if (pm->error)
273 return 0;
274 }
Damjan Marion68b4da62018-09-30 18:26:20 +0200275
276 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1, 0, 0);
277 /* failure to get mempolicy means we can only proceed with numa 0 maps */
278 if (rv == -1 && numa_node != 0)
279 {
280 pm->error = clib_error_return_unix (0, "failed to get mempolicy");
281 return 0;
282 }
283
284 mask[0] = 1 << numa_node;
285 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
286 if (rv == -1 && numa_node != 0)
287 {
288 pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
289 "numa node %u", numa_node);
290 return 0;
291 }
292
Damjan Marionc04e2b02018-10-25 15:56:04 +0200293 mmap_flags = MAP_FIXED | MAP_ANONYMOUS;
294
295 if ((pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP) == 0)
296 mmap_flags |= MAP_LOCKED;
Damjan Marion567e61d2018-10-24 17:08:26 +0200297
298 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
Damjan Marionc04e2b02018-10-25 15:56:04 +0200299 mmap_flags |= MAP_HUGETLB | MAP_LOCKED;
Damjan Marion567e61d2018-10-24 17:08:26 +0200300
Damjan Marion68b4da62018-09-30 18:26:20 +0200301 if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
302 {
303 mmap_flags |= MAP_SHARED;
Damjan Marion567e61d2018-10-24 17:08:26 +0200304 if (mmap_flags & MAP_HUGETLB)
305 pm->error = clib_mem_create_hugetlb_fd ((char *) a->name, &a->fd);
306 else
307 pm->error = clib_mem_create_fd ((char *) a->name, &a->fd);
Damjan Marion68b4da62018-09-30 18:26:20 +0200308 if (a->fd == -1)
309 goto error;
310 }
311 else
312 {
313 mmap_flags |= MAP_PRIVATE;
314 a->fd = -1;
315 }
316
Damjan Marion567e61d2018-10-24 17:08:26 +0200317 va = pm->base + (((uword) vec_len (pm->pages)) << pm->def_log2_page_sz);
318 if (mmap (va, size, PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0) ==
319 MAP_FAILED)
Damjan Marion68b4da62018-09-30 18:26:20 +0200320 {
321 pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
322 "fd %d numa %d flags 0x%x", n_pages,
323 va, a->fd, numa_node, mmap_flags);
324 goto error;
325 }
326
Damjan Marion567e61d2018-10-24 17:08:26 +0200327 clib_memset (va, 0, size);
328
Damjan Marion68b4da62018-09-30 18:26:20 +0200329 rv = set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1);
330 if (rv == -1 && numa_node != 0)
331 {
332 pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
333 goto error;
334 }
335
336 /* we tolerate move_pages failure only if request os for numa node 0
337 to support non-numa kernels */
338 rv = move_pages (0, 1, &va, 0, &status, 0);
339 if ((rv == 0 && status != numa_node) || (rv != 0 && numa_node != 0))
340 {
341 pm->error = rv == -1 ?
342 clib_error_return_unix (0, "page allocated on wrong node, numa node "
343 "%u status %d", numa_node, status) :
344 clib_error_return (0, "page allocated on wrong node, numa node "
345 "%u status %d", numa_node, status);
346
347 /* unmap & reesrve */
Damjan Marion567e61d2018-10-24 17:08:26 +0200348 munmap (va, size);
349 mmap (va, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
350 -1, 0);
Damjan Marion68b4da62018-09-30 18:26:20 +0200351 goto error;
352 }
353
Damjan Marion68b4da62018-09-30 18:26:20 +0200354 for (i = 0; i < n_pages; i++)
355 {
Damjan Marion68b4da62018-09-30 18:26:20 +0200356 vec_add2 (pm->pages, pp, 1);
Damjan Marion567e61d2018-10-24 17:08:26 +0200357 pp->n_free_blocks = 1 << (pm->def_log2_page_sz - PMALLOC_LOG2_BLOCK_SZ);
Damjan Marion68b4da62018-09-30 18:26:20 +0200358 pp->index = pp - pm->pages;
359 pp->arena_index = a->index;
Damjan Marion68b4da62018-09-30 18:26:20 +0200360 vec_add1 (a->page_indices, pp->index);
361 a->n_pages++;
Damjan Marion68b4da62018-09-30 18:26:20 +0200362 }
363
Damjan Marion567e61d2018-10-24 17:08:26 +0200364
365 /* if new arena is using smaller page size, we need to rebuild whole
366 lookup table */
367 if (a->log2_subpage_sz < pm->lookup_log2_page_sz)
368 {
369 pm->lookup_log2_page_sz = a->log2_subpage_sz;
370 pmalloc_update_lookup_table (pm, vec_len (pm->pages) - n_pages,
371 n_pages);
372 }
373 else
374 pmalloc_update_lookup_table (pm, 0, vec_len (pm->pages));
Damjan Marion68b4da62018-09-30 18:26:20 +0200375
376 /* return pointer to 1st page */
377 return pp - (n_pages - 1);
378
379error:
380 if (a->fd != -1)
381 close (a->fd);
382 return 0;
383}
384
385void *
386clib_pmalloc_create_shared_arena (clib_pmalloc_main_t * pm, char *name,
Damjan Marion567e61d2018-10-24 17:08:26 +0200387 uword size, u32 log2_page_sz, u32 numa_node)
Damjan Marion68b4da62018-09-30 18:26:20 +0200388{
389 clib_pmalloc_arena_t *a;
390 clib_pmalloc_page_t *pp;
Damjan Marion567e61d2018-10-24 17:08:26 +0200391 u32 n_pages;
392
393 clib_error_free (pm->error);
394
395 if (log2_page_sz == 0)
396 log2_page_sz = pm->def_log2_page_sz;
397 else if (log2_page_sz != pm->def_log2_page_sz &&
398 log2_page_sz != pm->sys_log2_page_sz)
399 {
400 pm->error = clib_error_create ("unsupported page size (%uKB)",
401 1 << (log2_page_sz - 10));
402 return 0;
403 }
404
405 n_pages = pmalloc_size2pages (size, pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200406
407 if (n_pages + vec_len (pm->pages) > pm->max_pages)
408 return 0;
409
410 if (pmalloc_validate_numa_node (&numa_node))
411 return 0;
412
413 pool_get (pm->arenas, a);
414 a->index = a - pm->arenas;
415 a->name = format (0, "%s%c", name, 0);
416 a->numa_node = numa_node;
417 a->flags = CLIB_PMALLOC_ARENA_F_SHARED_MEM;
Damjan Marion567e61d2018-10-24 17:08:26 +0200418 a->log2_subpage_sz = log2_page_sz;
419 a->subpages_per_page = 1U << (pm->def_log2_page_sz - log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200420
421 if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
422 {
423 vec_free (a->name);
424 memset (a, 0, sizeof (*a));
425 pool_put (pm->arenas, a);
426 return 0;
427 }
428
Damjan Marion567e61d2018-10-24 17:08:26 +0200429 return pm->base + (pp->index << pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200430}
431
432static inline void *
433clib_pmalloc_alloc_inline (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
434 uword size, uword align, u32 numa_node)
435{
436 clib_pmalloc_page_t *pp;
437 u32 n_blocks, block_align, *page_index;
438
439 ASSERT (is_pow2 (align));
440
441 if (pmalloc_validate_numa_node (&numa_node))
442 return 0;
443
444 if (a == 0)
445 {
Damjan Marion567e61d2018-10-24 17:08:26 +0200446 if (size > 1ULL << pm->def_log2_page_sz)
447 return 0;
448
Damjan Marion68b4da62018-09-30 18:26:20 +0200449 vec_validate_init_empty (pm->default_arena_for_numa_node,
450 numa_node, ~0);
451 if (pm->default_arena_for_numa_node[numa_node] == ~0)
452 {
453 pool_get (pm->arenas, a);
454 pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
455 a->name = format (0, "default-numa-%u%c", numa_node, 0);
456 a->numa_node = numa_node;
Damjan Marion567e61d2018-10-24 17:08:26 +0200457 a->log2_subpage_sz = pm->def_log2_page_sz;
458 a->subpages_per_page = 1;
Damjan Marion68b4da62018-09-30 18:26:20 +0200459 }
460 else
461 a = pool_elt_at_index (pm->arenas,
462 pm->default_arena_for_numa_node[numa_node]);
463 }
Damjan Marion567e61d2018-10-24 17:08:26 +0200464 else if (size > 1ULL << a->log2_subpage_sz)
465 return 0;
Damjan Marion68b4da62018-09-30 18:26:20 +0200466
467 n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
468 block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
469
470 vec_foreach (page_index, a->page_indices)
471 {
472 pp = vec_elt_at_index (pm->pages, *page_index);
473 void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
474 numa_node);
475
476 if (rv)
477 return rv;
478 }
479
480 if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
481 (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
482 return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
483
484 return 0;
485}
486
487void *
488clib_pmalloc_alloc_aligned_on_numa (clib_pmalloc_main_t * pm, uword size,
489 uword align, u32 numa_node)
490{
491 return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
492}
493
494void *
495clib_pmalloc_alloc_aligned (clib_pmalloc_main_t * pm, uword size, uword align)
496{
497 return clib_pmalloc_alloc_inline (pm, 0, size, align,
498 CLIB_PMALLOC_NUMA_LOCAL);
499}
500
501void *
502clib_pmalloc_alloc_from_arena (clib_pmalloc_main_t * pm, void *arena_va,
503 uword size, uword align)
504{
505 clib_pmalloc_arena_t *a = clib_pmalloc_get_arena (pm, arena_va);
506 return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
507}
508
Damjan Marion567e61d2018-10-24 17:08:26 +0200509static inline int
510pmalloc_chunks_mergeable (clib_pmalloc_arena_t * a, clib_pmalloc_page_t * pp,
511 u32 ci1, u32 ci2)
512{
513 clib_pmalloc_chunk_t *c1, *c2;
514
515 if (ci1 == ~0 || ci2 == ~0)
516 return 0;
517
518 c1 = get_chunk (pp, ci1);
519 c2 = get_chunk (pp, ci2);
520
521 if (c1->used || c2->used)
522 return 0;
523
524 if (c1->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ) !=
525 c2->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ))
526 return 0;
527
528 return 1;
529}
530
Damjan Marion68b4da62018-09-30 18:26:20 +0200531void
532clib_pmalloc_free (clib_pmalloc_main_t * pm, void *va)
533{
534 clib_pmalloc_page_t *pp;
535 clib_pmalloc_chunk_t *c;
Damjan Marion567e61d2018-10-24 17:08:26 +0200536 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +0200537 uword *p;
538 u32 chunk_index, page_index;
539
540 p = hash_get (pm->chunk_index_by_va, pointer_to_uword (va));
541
542 if (p == 0)
543 os_panic ();
544
545 chunk_index = p[0];
546 page_index = clib_pmalloc_get_page_index (pm, va);
547 hash_unset (pm->chunk_index_by_va, pointer_to_uword (va));
548
549 pp = vec_elt_at_index (pm->pages, page_index);
550 c = pool_elt_at_index (pp->chunks, chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200551 a = pool_elt_at_index (pm->arenas, pp->arena_index);
Damjan Marion68b4da62018-09-30 18:26:20 +0200552 c->used = 0;
553 pp->n_free_blocks += c->size;
554 pp->n_free_chunks++;
555
556 /* merge with next if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200557 if (pmalloc_chunks_mergeable (a, pp, chunk_index, c->next))
Damjan Marion68b4da62018-09-30 18:26:20 +0200558 {
559 clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
560 c->size += next->size;
561 c->next = next->next;
562 if (next->next != ~0)
563 get_chunk (pp, next->next)->prev = chunk_index;
564 memset (next, 0, sizeof (*next));
565 pool_put (pp->chunks, next);
566 pp->n_free_chunks--;
567 }
568
569 /* merge with prev if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200570 if (pmalloc_chunks_mergeable (a, pp, c->prev, chunk_index))
Damjan Marion68b4da62018-09-30 18:26:20 +0200571 {
572 clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
573 prev->size += c->size;
574 prev->next = c->next;
575 if (c->next != ~0)
576 get_chunk (pp, c->next)->prev = c->prev;
577 memset (c, 0, sizeof (*c));
578 pool_put (pp->chunks, c);
579 pp->n_free_chunks--;
580 }
581}
582
583static u8 *
Damjan Marion567e61d2018-10-24 17:08:26 +0200584format_log2_page_size (u8 * s, va_list * va)
585{
586 u32 log2_page_sz = va_arg (*va, u32);
587
588 if (log2_page_sz >= 30)
589 return format (s, "%uGB", 1 << (log2_page_sz - 30));
590
591 if (log2_page_sz >= 20)
592 return format (s, "%uMB", 1 << (log2_page_sz - 20));
593
594 if (log2_page_sz >= 10)
595 return format (s, "%uKB", 1 << (log2_page_sz - 10));
596
597 return format (s, "%uB", 1 << log2_page_sz);
598}
599
600
601static u8 *
Damjan Marion68b4da62018-09-30 18:26:20 +0200602format_pmalloc_page (u8 * s, va_list * va)
603{
604 clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
605 int verbose = va_arg (*va, int);
606 u32 indent = format_get_indent (s);
607
608 s = format (s, "page %u: phys-addr %p ", pp->index, pp->pa);
609
610 if (pp->chunks == 0)
611 return s;
612
613 s = format (s, "free %u chunks %u free-chunks %d ",
614 (pp->n_free_blocks) << PMALLOC_LOG2_BLOCK_SZ,
615 pool_elts (pp->chunks), pp->n_free_chunks);
616
617 if (verbose >= 2)
618 {
619 clib_pmalloc_chunk_t *c;
620 c = pool_elt_at_index (pp->chunks, pp->first_chunk_index);
621 s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
622 format_white_space, indent + 2,
623 "chunk offset", "size", "used", "index", "prev", "next");
624 while (1)
625 {
626 s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
627 format_white_space, indent + 2,
628 c->start << PMALLOC_LOG2_BLOCK_SZ,
629 c->size << PMALLOC_LOG2_BLOCK_SZ,
630 c->used ? "yes" : "no",
631 c - pp->chunks, c->prev, c->next);
632 if (c->next == ~0)
633 break;
634 c = pool_elt_at_index (pp->chunks, c->next);
635 }
636 }
637 return s;
638}
639
640u8 *
641format_pmalloc (u8 * s, va_list * va)
642{
643 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
644 int verbose = va_arg (*va, int);
645 u32 indent = format_get_indent (s);
646
647 clib_pmalloc_page_t *pp;
648 clib_pmalloc_arena_t *a;
649
Damjan Marion567e61d2018-10-24 17:08:26 +0200650 s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
Damjan Marionc04e2b02018-10-25 15:56:04 +0200651 "lookup-page-size %U%s", vec_len (pm->pages), pm->max_pages,
Damjan Marion567e61d2018-10-24 17:08:26 +0200652 format_log2_page_size, pm->def_log2_page_sz,
Damjan Marionc04e2b02018-10-25 15:56:04 +0200653 format_log2_page_size, pm->lookup_log2_page_sz,
654 pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP ? " no-pagemap" : "");
Damjan Marion567e61d2018-10-24 17:08:26 +0200655
Damjan Marion68b4da62018-09-30 18:26:20 +0200656
657 if (verbose >= 2)
658 s = format (s, " va-start %p", pm->base);
659
660 if (pm->error)
661 s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
662 format_clib_error, pm->error);
663
664
665 /* *INDENT-OFF* */
666 pool_foreach (a, pm->arenas,
667 {
668 u32 *page_index;
Damjan Marion567e61d2018-10-24 17:08:26 +0200669 s = format (s, "\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
670 format_white_space, indent + 2, a->name,
671 vec_len (a->page_indices), format_log2_page_size,
672 a->log2_subpage_sz, a->numa_node);
Damjan Marion68b4da62018-09-30 18:26:20 +0200673 if (a->fd != -1)
674 s = format (s, " shared fd %d", a->fd);
675 if (verbose >= 1)
676 vec_foreach (page_index, a->page_indices)
677 {
678 pp = vec_elt_at_index (pm->pages, *page_index);
679 s = format (s, "\n%U%U", format_white_space, indent + 4,
680 format_pmalloc_page, pp, verbose);
681 }
682 });
683 /* *INDENT-ON* */
684
685 return s;
686}
687
688/*
689 * fd.io coding-style-patch-verification: ON
690 *
691 * Local Variables:
692 * eval: (c-set-style "gnu")
693 * End:
694 */