blob: 5812f9fa120ea63581d14aeef22681b0e39a64d1 [file] [log] [blame]
Damjan Marion68b4da62018-09-30 18:26:20 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <fcntl.h>
21#include <linux/mempolicy.h>
22#include <linux/memfd.h>
23
24#include <vppinfra/format.h>
25#include <vppinfra/linux/syscall.h>
26#include <vppinfra/linux/sysfs.h>
27#include <vppinfra/mem.h>
28#include <vppinfra/hash.h>
29#include <vppinfra/pmalloc.h>
30
31#if __SIZEOF_POINTER__ >= 8
32#define DEFAULT_RESERVED_MB 16384
33#else
34#define DEFAULT_RESERVED_MB 256
35#endif
36
37static inline clib_pmalloc_chunk_t *
38get_chunk (clib_pmalloc_page_t * pp, u32 index)
39{
40 return pool_elt_at_index (pp->chunks, index);
41}
42
Damjan Marion567e61d2018-10-24 17:08:26 +020043static inline uword
44pmalloc_size2pages (uword size, u32 log2_page_sz)
45{
46 return round_pow2 (size, 1ULL << log2_page_sz) >> log2_page_sz;
47}
48
Damjan Marion68b4da62018-09-30 18:26:20 +020049static inline int
50pmalloc_validate_numa_node (u32 * numa_node)
51{
52 if (*numa_node == CLIB_PMALLOC_NUMA_LOCAL)
53 {
54 u32 cpu;
55 if (getcpu (&cpu, numa_node, 0) != 0)
56 return 1;
57 }
58 return 0;
59}
60
61int
62clib_pmalloc_init (clib_pmalloc_main_t * pm, uword size)
63{
Damjan Marion68b4da62018-09-30 18:26:20 +020064 uword off, pagesize;
Damjan Marion68b4da62018-09-30 18:26:20 +020065
66 ASSERT (pm->error == 0);
67
Damjan Marion9787f5f2018-10-24 12:56:32 +020068 pagesize = clib_mem_get_default_hugepage_size ();
Damjan Marion567e61d2018-10-24 17:08:26 +020069 pm->def_log2_page_sz = min_log2 (pagesize);
70 pm->sys_log2_page_sz = min_log2 (sysconf (_SC_PAGESIZE));
71 pm->lookup_log2_page_sz = pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020072
73 size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
74 size = round_pow2 (size, pagesize);
75
Damjan Marion567e61d2018-10-24 17:08:26 +020076 pm->max_pages = size >> pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +020077
78 /* reserve VA space for future growth */
79 pm->base = mmap (0, size + pagesize, PROT_NONE,
80 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
81
82 if (pm->base == MAP_FAILED)
83 {
84 pm->error = clib_error_return_unix (0, "failed to reserve %u pages");
85 return -1;
86 }
87
88 off = round_pow2 (pointer_to_uword (pm->base), pagesize) -
89 pointer_to_uword (pm->base);
90
91 /* trim start and end of reservation to be page aligned */
92 if (off)
93 {
94 munmap (pm->base, off);
95 pm->base += off;
96 }
97
98 munmap (pm->base + (pm->max_pages * pagesize), pagesize - off);
99 return 0;
100}
101
102static inline void *
103alloc_chunk_from_page (clib_pmalloc_main_t * pm, clib_pmalloc_page_t * pp,
104 u32 n_blocks, u32 block_align, u32 numa_node)
105{
Damjan Marion567e61d2018-10-24 17:08:26 +0200106 clib_pmalloc_chunk_t *c = 0;
107 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +0200108 void *va;
109 u32 off;
110 u32 alloc_chunk_index;
111
Damjan Marion567e61d2018-10-24 17:08:26 +0200112 a = pool_elt_at_index (pm->arenas, pp->arena_index);
113
Damjan Marion68b4da62018-09-30 18:26:20 +0200114 if (pp->chunks == 0)
115 {
Damjan Marion567e61d2018-10-24 17:08:26 +0200116 u32 i, start = 0, prev = ~0;
117
118 for (i = 0; i < a->subpages_per_page; i++)
119 {
120 pool_get (pp->chunks, c);
121 c->start = start;
122 c->prev = prev;
123 c->size = pp->n_free_blocks / a->subpages_per_page;
124 start += c->size;
125 if (prev == ~0)
126 pp->first_chunk_index = c - pp->chunks;
127 else
128 pp->chunks[prev].next = c - pp->chunks;
129 prev = c - pp->chunks;
130 }
131 c->next = ~0;
132 pp->n_free_chunks = a->subpages_per_page;
Damjan Marion68b4da62018-09-30 18:26:20 +0200133 }
134
135 alloc_chunk_index = pp->first_chunk_index;
136
137next_chunk:
138 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
139 off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
140
141 if (c->used || n_blocks + off > c->size)
142 {
143 if (c->next == ~0)
144 return 0;
145 alloc_chunk_index = c->next;
146 goto next_chunk;
147 }
148
149 /* if alignment is needed create new empty chunk */
150 if (off)
151 {
152 u32 offset_chunk_index;
153 clib_pmalloc_chunk_t *co;
154 pool_get (pp->chunks, c);
155 pp->n_free_chunks++;
156 offset_chunk_index = alloc_chunk_index;
157 alloc_chunk_index = c - pp->chunks;
158
159 co = pool_elt_at_index (pp->chunks, offset_chunk_index);
160 c->size = co->size - off;
161 c->next = co->next;
162 c->start = co->start + off;
163 c->prev = offset_chunk_index;
164 co->size = off;
165 co->next = alloc_chunk_index;
166 }
167
168 c->used = 1;
169 if (c->size > n_blocks)
170 {
171 u32 tail_chunk_index;
172 clib_pmalloc_chunk_t *ct;
173 pool_get (pp->chunks, ct);
174 pp->n_free_chunks++;
175 tail_chunk_index = ct - pp->chunks;
176 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
177 ct->size = c->size - n_blocks;
178 ct->next = c->next;
179 ct->prev = alloc_chunk_index;
180 ct->start = c->start + n_blocks;
181
182 c->size = n_blocks;
183 c->next = tail_chunk_index;
184 if (ct->next != ~0)
185 pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
186 }
187 else if (c->next != ~0)
188 pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
189
190 c = get_chunk (pp, alloc_chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200191 va = pm->base + ((pp - pm->pages) << pm->def_log2_page_sz) +
Damjan Marion68b4da62018-09-30 18:26:20 +0200192 (c->start << PMALLOC_LOG2_BLOCK_SZ);
193 hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
194 pp->n_free_blocks -= n_blocks;
195 pp->n_free_chunks--;
196 return va;
197}
198
Damjan Marion567e61d2018-10-24 17:08:26 +0200199static void
200pmalloc_update_lookup_table (clib_pmalloc_main_t * pm, u32 first, u32 count)
201{
202 uword seek, va, pa, p;
203 int fd;
204 u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
205
206 vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
207 elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
208
209 fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
210
211 p = first * elts_per_page;
212 while (p < elts_per_page * count)
213 {
214 va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
215 seek = (va >> pm->sys_log2_page_sz) * sizeof (pa);
216 if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
217 read (fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
218 pa & (1ULL << 63) /* page present bit */ )
219 {
220 pa = (pa & pow2_mask (55)) << pm->sys_log2_page_sz;
221 }
222 pm->lookup_table[p] = va - pa;
223 p++;
224 }
225
226 if (fd != -1)
227 close (fd);
228}
229
Damjan Marion68b4da62018-09-30 18:26:20 +0200230static inline clib_pmalloc_page_t *
231pmalloc_map_pages (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
232 u32 numa_node, u32 n_pages)
233{
234 clib_pmalloc_page_t *pp = 0;
Damjan Marion567e61d2018-10-24 17:08:26 +0200235 int status, rv, i, mmap_flags;
Damjan Marion68b4da62018-09-30 18:26:20 +0200236 void *va;
237 int old_mpol = -1;
238 long unsigned int mask[16] = { 0 };
239 long unsigned int old_mask[16] = { 0 };
Damjan Marion567e61d2018-10-24 17:08:26 +0200240 uword size = (uword) n_pages << pm->def_log2_page_sz;
Damjan Marion68b4da62018-09-30 18:26:20 +0200241
242 clib_error_free (pm->error);
243
244 if (pm->max_pages <= vec_len (pm->pages))
245 {
246 pm->error = clib_error_return (0, "maximum number of pages reached");
247 return 0;
248 }
249
Damjan Marion567e61d2018-10-24 17:08:26 +0200250 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
251 {
252 pm->error = clib_sysfs_prealloc_hugepages (numa_node,
253 a->log2_subpage_sz, n_pages);
Damjan Marion68b4da62018-09-30 18:26:20 +0200254
Damjan Marion567e61d2018-10-24 17:08:26 +0200255 if (pm->error)
256 return 0;
257 }
Damjan Marion68b4da62018-09-30 18:26:20 +0200258
259 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1, 0, 0);
260 /* failure to get mempolicy means we can only proceed with numa 0 maps */
261 if (rv == -1 && numa_node != 0)
262 {
263 pm->error = clib_error_return_unix (0, "failed to get mempolicy");
264 return 0;
265 }
266
267 mask[0] = 1 << numa_node;
268 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
269 if (rv == -1 && numa_node != 0)
270 {
271 pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
272 "numa node %u", numa_node);
273 return 0;
274 }
275
Damjan Marion567e61d2018-10-24 17:08:26 +0200276 mmap_flags = MAP_FIXED | MAP_ANONYMOUS | MAP_LOCKED;
277
278 if (a->log2_subpage_sz != pm->sys_log2_page_sz)
279 mmap_flags |= MAP_HUGETLB;
280
Damjan Marion68b4da62018-09-30 18:26:20 +0200281 if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
282 {
283 mmap_flags |= MAP_SHARED;
Damjan Marion567e61d2018-10-24 17:08:26 +0200284 if (mmap_flags & MAP_HUGETLB)
285 pm->error = clib_mem_create_hugetlb_fd ((char *) a->name, &a->fd);
286 else
287 pm->error = clib_mem_create_fd ((char *) a->name, &a->fd);
Damjan Marion68b4da62018-09-30 18:26:20 +0200288 if (a->fd == -1)
289 goto error;
290 }
291 else
292 {
293 mmap_flags |= MAP_PRIVATE;
294 a->fd = -1;
295 }
296
Damjan Marion567e61d2018-10-24 17:08:26 +0200297 va = pm->base + (((uword) vec_len (pm->pages)) << pm->def_log2_page_sz);
298 if (mmap (va, size, PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0) ==
299 MAP_FAILED)
Damjan Marion68b4da62018-09-30 18:26:20 +0200300 {
301 pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
302 "fd %d numa %d flags 0x%x", n_pages,
303 va, a->fd, numa_node, mmap_flags);
304 goto error;
305 }
306
Damjan Marion567e61d2018-10-24 17:08:26 +0200307 clib_memset (va, 0, size);
308
Damjan Marion68b4da62018-09-30 18:26:20 +0200309 rv = set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1);
310 if (rv == -1 && numa_node != 0)
311 {
312 pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
313 goto error;
314 }
315
316 /* we tolerate move_pages failure only if request os for numa node 0
317 to support non-numa kernels */
318 rv = move_pages (0, 1, &va, 0, &status, 0);
319 if ((rv == 0 && status != numa_node) || (rv != 0 && numa_node != 0))
320 {
321 pm->error = rv == -1 ?
322 clib_error_return_unix (0, "page allocated on wrong node, numa node "
323 "%u status %d", numa_node, status) :
324 clib_error_return (0, "page allocated on wrong node, numa node "
325 "%u status %d", numa_node, status);
326
327 /* unmap & reesrve */
Damjan Marion567e61d2018-10-24 17:08:26 +0200328 munmap (va, size);
329 mmap (va, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
330 -1, 0);
Damjan Marion68b4da62018-09-30 18:26:20 +0200331 goto error;
332 }
333
Damjan Marion68b4da62018-09-30 18:26:20 +0200334 for (i = 0; i < n_pages; i++)
335 {
Damjan Marion68b4da62018-09-30 18:26:20 +0200336 vec_add2 (pm->pages, pp, 1);
Damjan Marion567e61d2018-10-24 17:08:26 +0200337 pp->n_free_blocks = 1 << (pm->def_log2_page_sz - PMALLOC_LOG2_BLOCK_SZ);
Damjan Marion68b4da62018-09-30 18:26:20 +0200338 pp->index = pp - pm->pages;
339 pp->arena_index = a->index;
Damjan Marion68b4da62018-09-30 18:26:20 +0200340 vec_add1 (a->page_indices, pp->index);
341 a->n_pages++;
Damjan Marion68b4da62018-09-30 18:26:20 +0200342 }
343
Damjan Marion567e61d2018-10-24 17:08:26 +0200344
345 /* if new arena is using smaller page size, we need to rebuild whole
346 lookup table */
347 if (a->log2_subpage_sz < pm->lookup_log2_page_sz)
348 {
349 pm->lookup_log2_page_sz = a->log2_subpage_sz;
350 pmalloc_update_lookup_table (pm, vec_len (pm->pages) - n_pages,
351 n_pages);
352 }
353 else
354 pmalloc_update_lookup_table (pm, 0, vec_len (pm->pages));
Damjan Marion68b4da62018-09-30 18:26:20 +0200355
356 /* return pointer to 1st page */
357 return pp - (n_pages - 1);
358
359error:
360 if (a->fd != -1)
361 close (a->fd);
362 return 0;
363}
364
365void *
366clib_pmalloc_create_shared_arena (clib_pmalloc_main_t * pm, char *name,
Damjan Marion567e61d2018-10-24 17:08:26 +0200367 uword size, u32 log2_page_sz, u32 numa_node)
Damjan Marion68b4da62018-09-30 18:26:20 +0200368{
369 clib_pmalloc_arena_t *a;
370 clib_pmalloc_page_t *pp;
Damjan Marion567e61d2018-10-24 17:08:26 +0200371 u32 n_pages;
372
373 clib_error_free (pm->error);
374
375 if (log2_page_sz == 0)
376 log2_page_sz = pm->def_log2_page_sz;
377 else if (log2_page_sz != pm->def_log2_page_sz &&
378 log2_page_sz != pm->sys_log2_page_sz)
379 {
380 pm->error = clib_error_create ("unsupported page size (%uKB)",
381 1 << (log2_page_sz - 10));
382 return 0;
383 }
384
385 n_pages = pmalloc_size2pages (size, pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200386
387 if (n_pages + vec_len (pm->pages) > pm->max_pages)
388 return 0;
389
390 if (pmalloc_validate_numa_node (&numa_node))
391 return 0;
392
393 pool_get (pm->arenas, a);
394 a->index = a - pm->arenas;
395 a->name = format (0, "%s%c", name, 0);
396 a->numa_node = numa_node;
397 a->flags = CLIB_PMALLOC_ARENA_F_SHARED_MEM;
Damjan Marion567e61d2018-10-24 17:08:26 +0200398 a->log2_subpage_sz = log2_page_sz;
399 a->subpages_per_page = 1U << (pm->def_log2_page_sz - log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200400
401 if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
402 {
403 vec_free (a->name);
404 memset (a, 0, sizeof (*a));
405 pool_put (pm->arenas, a);
406 return 0;
407 }
408
Damjan Marion567e61d2018-10-24 17:08:26 +0200409 return pm->base + (pp->index << pm->def_log2_page_sz);
Damjan Marion68b4da62018-09-30 18:26:20 +0200410}
411
412static inline void *
413clib_pmalloc_alloc_inline (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
414 uword size, uword align, u32 numa_node)
415{
416 clib_pmalloc_page_t *pp;
417 u32 n_blocks, block_align, *page_index;
418
419 ASSERT (is_pow2 (align));
420
421 if (pmalloc_validate_numa_node (&numa_node))
422 return 0;
423
424 if (a == 0)
425 {
Damjan Marion567e61d2018-10-24 17:08:26 +0200426 if (size > 1ULL << pm->def_log2_page_sz)
427 return 0;
428
Damjan Marion68b4da62018-09-30 18:26:20 +0200429 vec_validate_init_empty (pm->default_arena_for_numa_node,
430 numa_node, ~0);
431 if (pm->default_arena_for_numa_node[numa_node] == ~0)
432 {
433 pool_get (pm->arenas, a);
434 pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
435 a->name = format (0, "default-numa-%u%c", numa_node, 0);
436 a->numa_node = numa_node;
Damjan Marion567e61d2018-10-24 17:08:26 +0200437 a->log2_subpage_sz = pm->def_log2_page_sz;
438 a->subpages_per_page = 1;
Damjan Marion68b4da62018-09-30 18:26:20 +0200439 }
440 else
441 a = pool_elt_at_index (pm->arenas,
442 pm->default_arena_for_numa_node[numa_node]);
443 }
Damjan Marion567e61d2018-10-24 17:08:26 +0200444 else if (size > 1ULL << a->log2_subpage_sz)
445 return 0;
Damjan Marion68b4da62018-09-30 18:26:20 +0200446
447 n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
448 block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
449
450 vec_foreach (page_index, a->page_indices)
451 {
452 pp = vec_elt_at_index (pm->pages, *page_index);
453 void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
454 numa_node);
455
456 if (rv)
457 return rv;
458 }
459
460 if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
461 (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
462 return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
463
464 return 0;
465}
466
467void *
468clib_pmalloc_alloc_aligned_on_numa (clib_pmalloc_main_t * pm, uword size,
469 uword align, u32 numa_node)
470{
471 return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
472}
473
474void *
475clib_pmalloc_alloc_aligned (clib_pmalloc_main_t * pm, uword size, uword align)
476{
477 return clib_pmalloc_alloc_inline (pm, 0, size, align,
478 CLIB_PMALLOC_NUMA_LOCAL);
479}
480
481void *
482clib_pmalloc_alloc_from_arena (clib_pmalloc_main_t * pm, void *arena_va,
483 uword size, uword align)
484{
485 clib_pmalloc_arena_t *a = clib_pmalloc_get_arena (pm, arena_va);
486 return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
487}
488
Damjan Marion567e61d2018-10-24 17:08:26 +0200489static inline int
490pmalloc_chunks_mergeable (clib_pmalloc_arena_t * a, clib_pmalloc_page_t * pp,
491 u32 ci1, u32 ci2)
492{
493 clib_pmalloc_chunk_t *c1, *c2;
494
495 if (ci1 == ~0 || ci2 == ~0)
496 return 0;
497
498 c1 = get_chunk (pp, ci1);
499 c2 = get_chunk (pp, ci2);
500
501 if (c1->used || c2->used)
502 return 0;
503
504 if (c1->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ) !=
505 c2->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ))
506 return 0;
507
508 return 1;
509}
510
Damjan Marion68b4da62018-09-30 18:26:20 +0200511void
512clib_pmalloc_free (clib_pmalloc_main_t * pm, void *va)
513{
514 clib_pmalloc_page_t *pp;
515 clib_pmalloc_chunk_t *c;
Damjan Marion567e61d2018-10-24 17:08:26 +0200516 clib_pmalloc_arena_t *a;
Damjan Marion68b4da62018-09-30 18:26:20 +0200517 uword *p;
518 u32 chunk_index, page_index;
519
520 p = hash_get (pm->chunk_index_by_va, pointer_to_uword (va));
521
522 if (p == 0)
523 os_panic ();
524
525 chunk_index = p[0];
526 page_index = clib_pmalloc_get_page_index (pm, va);
527 hash_unset (pm->chunk_index_by_va, pointer_to_uword (va));
528
529 pp = vec_elt_at_index (pm->pages, page_index);
530 c = pool_elt_at_index (pp->chunks, chunk_index);
Damjan Marion567e61d2018-10-24 17:08:26 +0200531 a = pool_elt_at_index (pm->arenas, pp->arena_index);
Damjan Marion68b4da62018-09-30 18:26:20 +0200532 c->used = 0;
533 pp->n_free_blocks += c->size;
534 pp->n_free_chunks++;
535
536 /* merge with next if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200537 if (pmalloc_chunks_mergeable (a, pp, chunk_index, c->next))
Damjan Marion68b4da62018-09-30 18:26:20 +0200538 {
539 clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
540 c->size += next->size;
541 c->next = next->next;
542 if (next->next != ~0)
543 get_chunk (pp, next->next)->prev = chunk_index;
544 memset (next, 0, sizeof (*next));
545 pool_put (pp->chunks, next);
546 pp->n_free_chunks--;
547 }
548
549 /* merge with prev if free */
Damjan Marion567e61d2018-10-24 17:08:26 +0200550 if (pmalloc_chunks_mergeable (a, pp, c->prev, chunk_index))
Damjan Marion68b4da62018-09-30 18:26:20 +0200551 {
552 clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
553 prev->size += c->size;
554 prev->next = c->next;
555 if (c->next != ~0)
556 get_chunk (pp, c->next)->prev = c->prev;
557 memset (c, 0, sizeof (*c));
558 pool_put (pp->chunks, c);
559 pp->n_free_chunks--;
560 }
561}
562
563static u8 *
Damjan Marion567e61d2018-10-24 17:08:26 +0200564format_log2_page_size (u8 * s, va_list * va)
565{
566 u32 log2_page_sz = va_arg (*va, u32);
567
568 if (log2_page_sz >= 30)
569 return format (s, "%uGB", 1 << (log2_page_sz - 30));
570
571 if (log2_page_sz >= 20)
572 return format (s, "%uMB", 1 << (log2_page_sz - 20));
573
574 if (log2_page_sz >= 10)
575 return format (s, "%uKB", 1 << (log2_page_sz - 10));
576
577 return format (s, "%uB", 1 << log2_page_sz);
578}
579
580
581static u8 *
Damjan Marion68b4da62018-09-30 18:26:20 +0200582format_pmalloc_page (u8 * s, va_list * va)
583{
584 clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
585 int verbose = va_arg (*va, int);
586 u32 indent = format_get_indent (s);
587
588 s = format (s, "page %u: phys-addr %p ", pp->index, pp->pa);
589
590 if (pp->chunks == 0)
591 return s;
592
593 s = format (s, "free %u chunks %u free-chunks %d ",
594 (pp->n_free_blocks) << PMALLOC_LOG2_BLOCK_SZ,
595 pool_elts (pp->chunks), pp->n_free_chunks);
596
597 if (verbose >= 2)
598 {
599 clib_pmalloc_chunk_t *c;
600 c = pool_elt_at_index (pp->chunks, pp->first_chunk_index);
601 s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
602 format_white_space, indent + 2,
603 "chunk offset", "size", "used", "index", "prev", "next");
604 while (1)
605 {
606 s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
607 format_white_space, indent + 2,
608 c->start << PMALLOC_LOG2_BLOCK_SZ,
609 c->size << PMALLOC_LOG2_BLOCK_SZ,
610 c->used ? "yes" : "no",
611 c - pp->chunks, c->prev, c->next);
612 if (c->next == ~0)
613 break;
614 c = pool_elt_at_index (pp->chunks, c->next);
615 }
616 }
617 return s;
618}
619
620u8 *
621format_pmalloc (u8 * s, va_list * va)
622{
623 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
624 int verbose = va_arg (*va, int);
625 u32 indent = format_get_indent (s);
626
627 clib_pmalloc_page_t *pp;
628 clib_pmalloc_arena_t *a;
629
Damjan Marion567e61d2018-10-24 17:08:26 +0200630 s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
631 "lookup-page-size %U", vec_len (pm->pages), pm->max_pages,
632 format_log2_page_size, pm->def_log2_page_sz,
633 format_log2_page_size, pm->lookup_log2_page_sz);
634
Damjan Marion68b4da62018-09-30 18:26:20 +0200635
636 if (verbose >= 2)
637 s = format (s, " va-start %p", pm->base);
638
639 if (pm->error)
640 s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
641 format_clib_error, pm->error);
642
643
644 /* *INDENT-OFF* */
645 pool_foreach (a, pm->arenas,
646 {
647 u32 *page_index;
Damjan Marion567e61d2018-10-24 17:08:26 +0200648 s = format (s, "\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
649 format_white_space, indent + 2, a->name,
650 vec_len (a->page_indices), format_log2_page_size,
651 a->log2_subpage_sz, a->numa_node);
Damjan Marion68b4da62018-09-30 18:26:20 +0200652 if (a->fd != -1)
653 s = format (s, " shared fd %d", a->fd);
654 if (verbose >= 1)
655 vec_foreach (page_index, a->page_indices)
656 {
657 pp = vec_elt_at_index (pm->pages, *page_index);
658 s = format (s, "\n%U%U", format_white_space, indent + 4,
659 format_pmalloc_page, pp, verbose);
660 }
661 });
662 /* *INDENT-ON* */
663
664 return s;
665}
666
667/*
668 * fd.io coding-style-patch-verification: ON
669 *
670 * Local Variables:
671 * eval: (c-set-style "gnu")
672 * End:
673 */