blob: d354cce8a7d9d2bd4b1a657ad4bba706610b0c7a [file] [log] [blame]
Damjan Marion68b4da62018-09-30 18:26:20 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <fcntl.h>
21#include <linux/mempolicy.h>
22#include <linux/memfd.h>
23
24#include <vppinfra/format.h>
25#include <vppinfra/linux/syscall.h>
26#include <vppinfra/linux/sysfs.h>
27#include <vppinfra/mem.h>
28#include <vppinfra/hash.h>
29#include <vppinfra/pmalloc.h>
30
31#if __SIZEOF_POINTER__ >= 8
32#define DEFAULT_RESERVED_MB 16384
33#else
34#define DEFAULT_RESERVED_MB 256
35#endif
36
37static inline clib_pmalloc_chunk_t *
38get_chunk (clib_pmalloc_page_t * pp, u32 index)
39{
40 return pool_elt_at_index (pp->chunks, index);
41}
42
43static inline int
44pmalloc_validate_numa_node (u32 * numa_node)
45{
46 if (*numa_node == CLIB_PMALLOC_NUMA_LOCAL)
47 {
48 u32 cpu;
49 if (getcpu (&cpu, numa_node, 0) != 0)
50 return 1;
51 }
52 return 0;
53}
54
55int
56clib_pmalloc_init (clib_pmalloc_main_t * pm, uword size)
57{
58 struct stat st;
59 uword off, pagesize;
60 int fd;
61
62 ASSERT (pm->error == 0);
63
64 pm->log2_page_sz = 21;
65 pm->error = clib_mem_create_hugetlb_fd ("detect_hugepage_size", &fd);
66
67 if (pm->error)
68 return -1;
69
70 if (fd != -1)
71 {
72 if (fstat (fd, &st) == -1)
73 pm->log2_page_sz = min_log2 (st.st_blksize);
74 close (fd);
75 }
76
77 pagesize = 1ULL << pm->log2_page_sz;
78
79 size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
80 size = round_pow2 (size, pagesize);
81
82 pm->max_pages = size >> pm->log2_page_sz;
83
84 /* reserve VA space for future growth */
85 pm->base = mmap (0, size + pagesize, PROT_NONE,
86 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
87
88 if (pm->base == MAP_FAILED)
89 {
90 pm->error = clib_error_return_unix (0, "failed to reserve %u pages");
91 return -1;
92 }
93
94 off = round_pow2 (pointer_to_uword (pm->base), pagesize) -
95 pointer_to_uword (pm->base);
96
97 /* trim start and end of reservation to be page aligned */
98 if (off)
99 {
100 munmap (pm->base, off);
101 pm->base += off;
102 }
103
104 munmap (pm->base + (pm->max_pages * pagesize), pagesize - off);
105 return 0;
106}
107
108static inline void *
109alloc_chunk_from_page (clib_pmalloc_main_t * pm, clib_pmalloc_page_t * pp,
110 u32 n_blocks, u32 block_align, u32 numa_node)
111{
112 clib_pmalloc_chunk_t *c;
113 void *va;
114 u32 off;
115 u32 alloc_chunk_index;
116
117 if (pp->chunks == 0)
118 {
119 pool_get (pp->chunks, c);
120 pp->n_free_chunks = 1;
121 pp->first_chunk_index = c - pp->chunks;
122 c->prev = c->next = ~0;
123 c->size = pp->n_free_blocks;
124 }
125
126 alloc_chunk_index = pp->first_chunk_index;
127
128next_chunk:
129 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
130 off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
131
132 if (c->used || n_blocks + off > c->size)
133 {
134 if (c->next == ~0)
135 return 0;
136 alloc_chunk_index = c->next;
137 goto next_chunk;
138 }
139
140 /* if alignment is needed create new empty chunk */
141 if (off)
142 {
143 u32 offset_chunk_index;
144 clib_pmalloc_chunk_t *co;
145 pool_get (pp->chunks, c);
146 pp->n_free_chunks++;
147 offset_chunk_index = alloc_chunk_index;
148 alloc_chunk_index = c - pp->chunks;
149
150 co = pool_elt_at_index (pp->chunks, offset_chunk_index);
151 c->size = co->size - off;
152 c->next = co->next;
153 c->start = co->start + off;
154 c->prev = offset_chunk_index;
155 co->size = off;
156 co->next = alloc_chunk_index;
157 }
158
159 c->used = 1;
160 if (c->size > n_blocks)
161 {
162 u32 tail_chunk_index;
163 clib_pmalloc_chunk_t *ct;
164 pool_get (pp->chunks, ct);
165 pp->n_free_chunks++;
166 tail_chunk_index = ct - pp->chunks;
167 c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
168 ct->size = c->size - n_blocks;
169 ct->next = c->next;
170 ct->prev = alloc_chunk_index;
171 ct->start = c->start + n_blocks;
172
173 c->size = n_blocks;
174 c->next = tail_chunk_index;
175 if (ct->next != ~0)
176 pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
177 }
178 else if (c->next != ~0)
179 pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
180
181 c = get_chunk (pp, alloc_chunk_index);
182 va = pm->base + ((pp - pm->pages) << pm->log2_page_sz) +
183 (c->start << PMALLOC_LOG2_BLOCK_SZ);
184 hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
185 pp->n_free_blocks -= n_blocks;
186 pp->n_free_chunks--;
187 return va;
188}
189
190static inline clib_pmalloc_page_t *
191pmalloc_map_pages (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
192 u32 numa_node, u32 n_pages)
193{
194 clib_pmalloc_page_t *pp = 0;
195 u64 seek, pa, sys_page_size;
196 int pagemap_fd, status, rv, i, mmap_flags;
197 void *va;
198 int old_mpol = -1;
199 long unsigned int mask[16] = { 0 };
200 long unsigned int old_mask[16] = { 0 };
201
202 clib_error_free (pm->error);
203
204 if (pm->max_pages <= vec_len (pm->pages))
205 {
206 pm->error = clib_error_return (0, "maximum number of pages reached");
207 return 0;
208 }
209
210 pm->error = clib_sysfs_prealloc_hugepages (numa_node, pm->log2_page_sz,
211 n_pages);
212
213 if (pm->error)
214 return 0;
215
216 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1, 0, 0);
217 /* failure to get mempolicy means we can only proceed with numa 0 maps */
218 if (rv == -1 && numa_node != 0)
219 {
220 pm->error = clib_error_return_unix (0, "failed to get mempolicy");
221 return 0;
222 }
223
224 mask[0] = 1 << numa_node;
225 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
226 if (rv == -1 && numa_node != 0)
227 {
228 pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
229 "numa node %u", numa_node);
230 return 0;
231 }
232
233 mmap_flags = MAP_FIXED | MAP_HUGETLB | MAP_LOCKED | MAP_ANONYMOUS;
234 if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
235 {
236 mmap_flags |= MAP_SHARED;
237 pm->error = clib_mem_create_hugetlb_fd ((char *) a->name, &a->fd);
238 if (a->fd == -1)
239 goto error;
240 }
241 else
242 {
243 mmap_flags |= MAP_PRIVATE;
244 a->fd = -1;
245 }
246
247 va = pm->base + (vec_len (pm->pages) << pm->log2_page_sz);
248 if (mmap (va, n_pages << pm->log2_page_sz, PROT_READ | PROT_WRITE,
249 mmap_flags, a->fd, 0) == MAP_FAILED)
250 {
251 pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
252 "fd %d numa %d flags 0x%x", n_pages,
253 va, a->fd, numa_node, mmap_flags);
254 goto error;
255 }
256
257 rv = set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1);
258 if (rv == -1 && numa_node != 0)
259 {
260 pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
261 goto error;
262 }
263
264 /* we tolerate move_pages failure only if request os for numa node 0
265 to support non-numa kernels */
266 rv = move_pages (0, 1, &va, 0, &status, 0);
267 if ((rv == 0 && status != numa_node) || (rv != 0 && numa_node != 0))
268 {
269 pm->error = rv == -1 ?
270 clib_error_return_unix (0, "page allocated on wrong node, numa node "
271 "%u status %d", numa_node, status) :
272 clib_error_return (0, "page allocated on wrong node, numa node "
273 "%u status %d", numa_node, status);
274
275 /* unmap & reesrve */
276 munmap (va, n_pages << pm->log2_page_sz);
277 mmap (va, n_pages << pm->log2_page_sz, PROT_NONE,
278 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
279 goto error;
280 }
281
282 memset (va, 0, n_pages << pm->log2_page_sz);
283 sys_page_size = sysconf (_SC_PAGESIZE);
284 pagemap_fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
285
286 for (i = 0; i < n_pages; i++)
287 {
288 uword page_va = pointer_to_uword ((u8 *) va + (i << pm->log2_page_sz));
289 vec_add2 (pm->pages, pp, 1);
290 pp->n_free_blocks = 1 << (pm->log2_page_sz - PMALLOC_LOG2_BLOCK_SZ);
291 pp->index = pp - pm->pages;
292 pp->arena_index = a->index;
293
294 vec_add1 (a->page_indices, pp->index);
295 a->n_pages++;
296
297 seek = (page_va / sys_page_size) * sizeof (pa);
298 if (pagemap_fd != -1 &&
299 lseek (pagemap_fd, seek, SEEK_SET) == seek &&
300 read (pagemap_fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
301 pa & (1ULL << 63) /* page present bit */ )
302 {
303 pp->pa = (pa & pow2_mask (55)) * sys_page_size;
304 }
305 vec_add1_aligned (pm->va_pa_diffs, pp->pa ? page_va - pp->pa : 0,
306 CLIB_CACHE_LINE_BYTES);
307 }
308
309 if (pagemap_fd != -1)
310 close (pagemap_fd);
311
312 /* return pointer to 1st page */
313 return pp - (n_pages - 1);
314
315error:
316 if (a->fd != -1)
317 close (a->fd);
318 return 0;
319}
320
321void *
322clib_pmalloc_create_shared_arena (clib_pmalloc_main_t * pm, char *name,
323 uword size, u32 numa_node)
324{
325 clib_pmalloc_arena_t *a;
326 clib_pmalloc_page_t *pp;
327 u32 n_pages = round_pow2 (size, 1 << pm->log2_page_sz) >> pm->log2_page_sz;
328
329 if (n_pages + vec_len (pm->pages) > pm->max_pages)
330 return 0;
331
332 if (pmalloc_validate_numa_node (&numa_node))
333 return 0;
334
335 pool_get (pm->arenas, a);
336 a->index = a - pm->arenas;
337 a->name = format (0, "%s%c", name, 0);
338 a->numa_node = numa_node;
339 a->flags = CLIB_PMALLOC_ARENA_F_SHARED_MEM;
340 a->log2_page_sz = pm->log2_page_sz;
341
342 if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
343 {
344 vec_free (a->name);
345 memset (a, 0, sizeof (*a));
346 pool_put (pm->arenas, a);
347 return 0;
348 }
349
350 return pm->base + (pp->index << pm->log2_page_sz);
351}
352
353static inline void *
354clib_pmalloc_alloc_inline (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
355 uword size, uword align, u32 numa_node)
356{
357 clib_pmalloc_page_t *pp;
358 u32 n_blocks, block_align, *page_index;
359
360 ASSERT (is_pow2 (align));
361
362 if (pmalloc_validate_numa_node (&numa_node))
363 return 0;
364
365 if (a == 0)
366 {
367 vec_validate_init_empty (pm->default_arena_for_numa_node,
368 numa_node, ~0);
369 if (pm->default_arena_for_numa_node[numa_node] == ~0)
370 {
371 pool_get (pm->arenas, a);
372 pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
373 a->name = format (0, "default-numa-%u%c", numa_node, 0);
374 a->numa_node = numa_node;
375 }
376 else
377 a = pool_elt_at_index (pm->arenas,
378 pm->default_arena_for_numa_node[numa_node]);
379 }
380
381 n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
382 block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
383
384 vec_foreach (page_index, a->page_indices)
385 {
386 pp = vec_elt_at_index (pm->pages, *page_index);
387 void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
388 numa_node);
389
390 if (rv)
391 return rv;
392 }
393
394 if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
395 (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
396 return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
397
398 return 0;
399}
400
401void *
402clib_pmalloc_alloc_aligned_on_numa (clib_pmalloc_main_t * pm, uword size,
403 uword align, u32 numa_node)
404{
405 return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
406}
407
408void *
409clib_pmalloc_alloc_aligned (clib_pmalloc_main_t * pm, uword size, uword align)
410{
411 return clib_pmalloc_alloc_inline (pm, 0, size, align,
412 CLIB_PMALLOC_NUMA_LOCAL);
413}
414
415void *
416clib_pmalloc_alloc_from_arena (clib_pmalloc_main_t * pm, void *arena_va,
417 uword size, uword align)
418{
419 clib_pmalloc_arena_t *a = clib_pmalloc_get_arena (pm, arena_va);
420 return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
421}
422
423void
424clib_pmalloc_free (clib_pmalloc_main_t * pm, void *va)
425{
426 clib_pmalloc_page_t *pp;
427 clib_pmalloc_chunk_t *c;
428 uword *p;
429 u32 chunk_index, page_index;
430
431 p = hash_get (pm->chunk_index_by_va, pointer_to_uword (va));
432
433 if (p == 0)
434 os_panic ();
435
436 chunk_index = p[0];
437 page_index = clib_pmalloc_get_page_index (pm, va);
438 hash_unset (pm->chunk_index_by_va, pointer_to_uword (va));
439
440 pp = vec_elt_at_index (pm->pages, page_index);
441 c = pool_elt_at_index (pp->chunks, chunk_index);
442 c->used = 0;
443 pp->n_free_blocks += c->size;
444 pp->n_free_chunks++;
445
446 /* merge with next if free */
447 if (c->next != ~0 && get_chunk (pp, c->next)->used == 0)
448 {
449 clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
450 c->size += next->size;
451 c->next = next->next;
452 if (next->next != ~0)
453 get_chunk (pp, next->next)->prev = chunk_index;
454 memset (next, 0, sizeof (*next));
455 pool_put (pp->chunks, next);
456 pp->n_free_chunks--;
457 }
458
459 /* merge with prev if free */
460 if (c->prev != ~0 && get_chunk (pp, c->prev)->used == 0)
461 {
462 clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
463 prev->size += c->size;
464 prev->next = c->next;
465 if (c->next != ~0)
466 get_chunk (pp, c->next)->prev = c->prev;
467 memset (c, 0, sizeof (*c));
468 pool_put (pp->chunks, c);
469 pp->n_free_chunks--;
470 }
471}
472
473static u8 *
474format_pmalloc_page (u8 * s, va_list * va)
475{
476 clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
477 int verbose = va_arg (*va, int);
478 u32 indent = format_get_indent (s);
479
480 s = format (s, "page %u: phys-addr %p ", pp->index, pp->pa);
481
482 if (pp->chunks == 0)
483 return s;
484
485 s = format (s, "free %u chunks %u free-chunks %d ",
486 (pp->n_free_blocks) << PMALLOC_LOG2_BLOCK_SZ,
487 pool_elts (pp->chunks), pp->n_free_chunks);
488
489 if (verbose >= 2)
490 {
491 clib_pmalloc_chunk_t *c;
492 c = pool_elt_at_index (pp->chunks, pp->first_chunk_index);
493 s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
494 format_white_space, indent + 2,
495 "chunk offset", "size", "used", "index", "prev", "next");
496 while (1)
497 {
498 s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
499 format_white_space, indent + 2,
500 c->start << PMALLOC_LOG2_BLOCK_SZ,
501 c->size << PMALLOC_LOG2_BLOCK_SZ,
502 c->used ? "yes" : "no",
503 c - pp->chunks, c->prev, c->next);
504 if (c->next == ~0)
505 break;
506 c = pool_elt_at_index (pp->chunks, c->next);
507 }
508 }
509 return s;
510}
511
512u8 *
513format_pmalloc (u8 * s, va_list * va)
514{
515 clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
516 int verbose = va_arg (*va, int);
517 u32 indent = format_get_indent (s);
518
519 clib_pmalloc_page_t *pp;
520 clib_pmalloc_arena_t *a;
521
522 s = format (s, "used-pages %u reserved-pages %u pagesize %uKB",
523 vec_len (pm->pages), pm->max_pages,
524 1 << (pm->log2_page_sz - 10));
525
526 if (verbose >= 2)
527 s = format (s, " va-start %p", pm->base);
528
529 if (pm->error)
530 s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
531 format_clib_error, pm->error);
532
533
534 /* *INDENT-OFF* */
535 pool_foreach (a, pm->arenas,
536 {
537 u32 *page_index;
538 s = format (s, "\n%Uarena '%s' pages %u numa-node %u",
539 format_white_space, indent + 2,
540 a->name, vec_len (a->page_indices), a->numa_node);
541 if (a->fd != -1)
542 s = format (s, " shared fd %d", a->fd);
543 if (verbose >= 1)
544 vec_foreach (page_index, a->page_indices)
545 {
546 pp = vec_elt_at_index (pm->pages, *page_index);
547 s = format (s, "\n%U%U", format_white_space, indent + 4,
548 format_pmalloc_page, pp, verbose);
549 }
550 });
551 /* *INDENT-ON* */
552
553 return s;
554}
555
556/*
557 * fd.io coding-style-patch-verification: ON
558 *
559 * Local Variables:
560 * eval: (c-set-style "gnu")
561 * End:
562 */