blob: 76195a21118aa248b3e186db82bcd8b62e130678 [file] [log] [blame]
Damjan Marion01914ce2017-09-14 19:04:50 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <unistd.h>
21#include <sys/mount.h>
22#include <sys/mman.h>
23#include <fcntl.h>
24#include <linux/mempolicy.h>
25#include <linux/memfd.h>
26
27#include <vppinfra/clib.h>
28#include <vppinfra/mem.h>
Florin Corasd3e83a92018-01-16 02:40:18 -080029#include <vppinfra/time.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020030#include <vppinfra/format.h>
31#include <vppinfra/clib_error.h>
32#include <vppinfra/linux/syscall.h>
33#include <vppinfra/linux/sysfs.h>
34
35#ifndef F_LINUX_SPECIFIC_BASE
36#define F_LINUX_SPECIFIC_BASE 1024
37#endif
38
39#ifndef F_ADD_SEALS
40#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
41#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
42
43#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
44#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
45#define F_SEAL_GROW 0x0004 /* prevent file from growing */
46#define F_SEAL_WRITE 0x0008 /* prevent writes */
47#endif
48
Damjan Marionc63e2a42020-09-16 21:36:00 +020049#ifndef MFD_HUGETLB
50#define MFD_HUGETLB 0x0004U
51#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020052
Damjan Marion6bfd0762020-09-11 22:16:53 +020053#ifndef MAP_HUGE_SHIFT
54#define MAP_HUGE_SHIFT 26
55#endif
56
Damjan Marionbdbb0c52020-09-17 10:40:44 +020057#ifndef MFD_HUGE_SHIFT
58#define MFD_HUGE_SHIFT 26
59#endif
60
Damjan Marion6bfd0762020-09-11 22:16:53 +020061#ifndef MAP_FIXED_NOREPLACE
62#define MAP_FIXED_NOREPLACE 0x100000
63#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020064
65uword
66clib_mem_get_default_hugepage_size (void)
67{
68 unformat_input_t input;
69 static u32 size = 0;
70 int fd;
71
72 if (size)
73 goto done;
74
Dave Barach036343b2019-01-01 09:45:08 -050075 /*
76 * If the kernel doesn't support hugepages, /proc/meminfo won't
77 * say anything about it. Use the regular page size as a default.
78 */
79 size = clib_mem_get_page_size () / 1024;
80
Damjan Marion9787f5f2018-10-24 12:56:32 +020081 if ((fd = open ("/proc/meminfo", 0)) == -1)
82 return 0;
83
84 unformat_init_clib_file (&input, fd);
85
86 while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
87 {
88 if (unformat (&input, "Hugepagesize:%_%u kB", &size))
89 ;
90 else
91 unformat_skip_line (&input);
92 }
93 unformat_free (&input);
94 close (fd);
95done:
96 return 1024ULL * size;
97}
98
Damjan Marionc63e2a42020-09-16 21:36:00 +020099static clib_mem_page_sz_t
100legacy_get_log2_default_hugepage_size (void)
101{
102 clib_mem_page_sz_t log2_page_size = CLIB_MEM_PAGE_SZ_UNKNOWN;
103 FILE *fp;
104 char tmp[33] = { };
105
106 if ((fp = fopen ("/proc/meminfo", "r")) == NULL)
107 return CLIB_MEM_PAGE_SZ_UNKNOWN;
108
109 while (fscanf (fp, "%32s", tmp) > 0)
110 if (strncmp ("Hugepagesize:", tmp, 13) == 0)
111 {
112 u32 size;
113 if (fscanf (fp, "%u", &size) > 0)
114 log2_page_size = 10 + min_log2 (size);
115 break;
116 }
117
118 fclose (fp);
119 return log2_page_size;
120}
121
122void
123clib_mem_main_init ()
124{
125 clib_mem_main_t *mm = &clib_mem_main;
126 uword page_size;
127 void *va;
128 int fd;
129
130 if (mm->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
131 return;
132
133 /* system page size */
134 page_size = sysconf (_SC_PAGESIZE);
135 mm->log2_page_sz = min_log2 (page_size);
136
137 /* default system hugeppage size */
138 if ((fd = memfd_create ("test", MFD_HUGETLB)) != -1)
139 {
140 mm->log2_default_hugepage_sz = clib_mem_get_fd_log2_page_size (fd);
141 close (fd);
142 }
143 else /* likely kernel older than 4.14 */
144 mm->log2_default_hugepage_sz = legacy_get_log2_default_hugepage_size ();
145
146 /* numa nodes */
147 va = mmap (0, page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
148 MAP_ANONYMOUS, -1, 0);
149 if (va == MAP_FAILED)
150 return;
151
152 if (mlock (va, page_size))
153 goto done;
154
155 for (int i = 0; i < CLIB_MAX_NUMAS; i++)
156 {
157 int status;
158 if (move_pages (0, 1, &va, &i, &status, 0) == 0)
159 mm->numa_node_bitmap |= 1ULL << i;
160 }
161
162done:
163 munmap (va, page_size);
164}
165
Dave Barach9466c452018-08-24 17:21:14 -0400166u64
Damjan Marion567e61d2018-10-24 17:08:26 +0200167clib_mem_get_fd_page_size (int fd)
Damjan Marion01914ce2017-09-14 19:04:50 +0200168{
169 struct stat st = { 0 };
Chris Lukeb2bcad62017-09-18 08:51:22 -0400170 if (fstat (fd, &st) == -1)
Damjan Marion01914ce2017-09-14 19:04:50 +0200171 return 0;
Florin Corasd3e83a92018-01-16 02:40:18 -0800172 return st.st_blksize;
173}
174
Damjan Marion6bfd0762020-09-11 22:16:53 +0200175clib_mem_page_sz_t
Damjan Marion567e61d2018-10-24 17:08:26 +0200176clib_mem_get_fd_log2_page_size (int fd)
Florin Corasd3e83a92018-01-16 02:40:18 -0800177{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200178 uword page_size = clib_mem_get_fd_page_size (fd);
179 return page_size ? min_log2 (page_size) : CLIB_MEM_PAGE_SZ_UNKNOWN;
Florin Corasd3e83a92018-01-16 02:40:18 -0800180}
181
Florin Corasb384b542018-01-15 01:08:33 -0800182void
Damjan Marionb5095042020-09-11 22:13:46 +0200183clib_mem_vm_randomize_va (uword * requested_va,
184 clib_mem_page_sz_t log2_page_size)
Florin Corasd3e83a92018-01-16 02:40:18 -0800185{
186 u8 bit_mask = 15;
187
188 if (log2_page_size <= 12)
189 bit_mask = 15;
190 else if (log2_page_size > 12 && log2_page_size <= 16)
191 bit_mask = 3;
192 else
193 bit_mask = 0;
194
Haiyang Tana5ab5032018-10-15 06:17:55 -0700195 *requested_va +=
196 (clib_cpu_time_now () & bit_mask) * (1ull << log2_page_size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200197}
198
Damjan Marion1636b162018-10-19 12:54:42 +0200199clib_error_t *
Damjan Marion01914ce2017-09-14 19:04:50 +0200200clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a)
201{
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200202 clib_mem_main_t *mm = &clib_mem_main;
Damjan Marion01914ce2017-09-14 19:04:50 +0200203 int fd = -1;
204 clib_error_t *err = 0;
205 void *addr = 0;
206 u8 *filename = 0;
Damjan Marion7b185362018-03-04 16:41:35 +0100207 int mmap_flags = 0;
Damjan Marion01914ce2017-09-14 19:04:50 +0200208 int log2_page_size;
209 int n_pages;
210 int old_mpol = -1;
Dave Barach9466c452018-08-24 17:21:14 -0400211 long unsigned int old_mask[16] = { 0 };
Damjan Marion01914ce2017-09-14 19:04:50 +0200212
213 /* save old numa mem policy if needed */
214 if (a->flags & (CLIB_MEM_VM_F_NUMA_PREFER | CLIB_MEM_VM_F_NUMA_FORCE))
215 {
216 int rv;
Damjan Marion2e172ea2018-01-03 15:48:34 +0000217 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1,
218 0, 0);
Damjan Marion01914ce2017-09-14 19:04:50 +0200219
220 if (rv == -1)
221 {
Damjan Marion2e172ea2018-01-03 15:48:34 +0000222 if (a->numa_node != 0 && (a->flags & CLIB_MEM_VM_F_NUMA_FORCE) != 0)
Damjan Marion01914ce2017-09-14 19:04:50 +0200223 {
224 err = clib_error_return_unix (0, "get_mempolicy");
225 goto error;
226 }
227 else
228 old_mpol = -1;
229 }
230 }
231
Damjan Marion7b185362018-03-04 16:41:35 +0100232 if (a->flags & CLIB_MEM_VM_F_LOCKED)
233 mmap_flags |= MAP_LOCKED;
234
Damjan Marion01914ce2017-09-14 19:04:50 +0200235 /* if we are creating shared segment, we need file descriptor */
236 if (a->flags & CLIB_MEM_VM_F_SHARED)
237 {
Damjan Marion7b185362018-03-04 16:41:35 +0100238 mmap_flags |= MAP_SHARED;
Damjan Marion01914ce2017-09-14 19:04:50 +0200239 /* if hugepages are needed we need to create mount point */
240 if (a->flags & CLIB_MEM_VM_F_HUGETLB)
241 {
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200242 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT_HUGE;
Damjan Marion01914ce2017-09-14 19:04:50 +0200243 mmap_flags |= MAP_LOCKED;
244 }
245 else
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200246 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
247
248 if ((fd = clib_mem_vm_create_fd (log2_page_size, "%s", a->name)) == -1)
Damjan Marion01914ce2017-09-14 19:04:50 +0200249 {
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200250 err = clib_error_return (0, "%U", format_clib_error, mm->error);
251 goto error;
Damjan Marion01914ce2017-09-14 19:04:50 +0200252 }
Chris Luke879ace32017-09-26 13:15:16 -0400253
Damjan Marion567e61d2018-10-24 17:08:26 +0200254 log2_page_size = clib_mem_get_fd_log2_page_size (fd);
Chris Luke879ace32017-09-26 13:15:16 -0400255 if (log2_page_size == 0)
256 {
257 err = clib_error_return_unix (0, "cannot determine page size");
258 goto error;
259 }
Florin Corasd3e83a92018-01-16 02:40:18 -0800260
261 if (a->requested_va)
262 {
263 clib_mem_vm_randomize_va (&a->requested_va, log2_page_size);
264 mmap_flags |= MAP_FIXED;
265 }
Damjan Marion01914ce2017-09-14 19:04:50 +0200266 }
267 else /* not CLIB_MEM_VM_F_SHARED */
268 {
Damjan Marion7b185362018-03-04 16:41:35 +0100269 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
Damjan Marion01914ce2017-09-14 19:04:50 +0200270 if (a->flags & CLIB_MEM_VM_F_HUGETLB)
271 {
Damjan Marion7b185362018-03-04 16:41:35 +0100272 mmap_flags |= MAP_HUGETLB;
Damjan Marion01914ce2017-09-14 19:04:50 +0200273 log2_page_size = 21;
274 }
275 else
276 {
Damjan Marion01914ce2017-09-14 19:04:50 +0200277 log2_page_size = min_log2 (sysconf (_SC_PAGESIZE));
278 }
279 }
280
281 n_pages = ((a->size - 1) >> log2_page_size) + 1;
282
Damjan Marion01914ce2017-09-14 19:04:50 +0200283 if (a->flags & CLIB_MEM_VM_F_HUGETLB_PREALLOC)
284 {
Damjan Marion6f3f1cb2018-10-22 13:01:46 +0200285 err = clib_sysfs_prealloc_hugepages (a->numa_node, log2_page_size,
Damjan Marion01914ce2017-09-14 19:04:50 +0200286 n_pages);
287 if (err)
288 goto error;
289
290 }
291
292 if (fd != -1)
Lee Roberts45a09462018-03-07 19:47:00 -0700293 if ((ftruncate (fd, (u64) n_pages * (1 << log2_page_size))) == -1)
Damjan Marion01914ce2017-09-14 19:04:50 +0200294 {
295 err = clib_error_return_unix (0, "ftruncate");
296 goto error;
297 }
298
299 if (old_mpol != -1)
300 {
301 int rv;
Dave Barach9466c452018-08-24 17:21:14 -0400302 long unsigned int mask[16] = { 0 };
Damjan Marion01914ce2017-09-14 19:04:50 +0200303 mask[0] = 1 << a->numa_node;
304 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
Damjan Marion915e3f12018-04-18 09:21:24 +0200305 if (rv == -1 && a->numa_node != 0 &&
306 (a->flags & CLIB_MEM_VM_F_NUMA_FORCE) != 0)
Damjan Marion01914ce2017-09-14 19:04:50 +0200307 {
308 err = clib_error_return_unix (0, "set_mempolicy");
309 goto error;
310 }
311 }
312
Florin Corasd3e83a92018-01-16 02:40:18 -0800313 addr = mmap (uword_to_pointer (a->requested_va, void *), a->size,
314 (PROT_READ | PROT_WRITE), mmap_flags, fd, 0);
Damjan Marion01914ce2017-09-14 19:04:50 +0200315 if (addr == MAP_FAILED)
316 {
317 err = clib_error_return_unix (0, "mmap");
318 goto error;
319 }
320
Damjan Marion2e172ea2018-01-03 15:48:34 +0000321 /* re-apply old numa memory policy */
Damjan Marion01914ce2017-09-14 19:04:50 +0200322 if (old_mpol != -1 &&
323 set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1) == -1)
324 {
325 err = clib_error_return_unix (0, "set_mempolicy");
326 goto error;
327 }
328
329 a->log2_page_size = log2_page_size;
330 a->n_pages = n_pages;
331 a->addr = addr;
332 a->fd = fd;
BenoƮt Ganne1557d9a2020-02-07 11:58:16 +0100333 CLIB_MEM_UNPOISON (addr, a->size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200334 goto done;
335
336error:
337 if (fd != -1)
338 close (fd);
339
340done:
341 vec_free (filename);
342 return err;
343}
344
Haiyang Tan642829d2018-10-09 19:09:45 -0700345void
346clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a)
347{
348 if (a != 0)
349 {
Haiyang Tana5ab5032018-10-15 06:17:55 -0700350 clib_mem_vm_free (a->addr, 1ull << a->log2_page_size);
Haiyang Tan642829d2018-10-09 19:09:45 -0700351 if (a->fd != -1)
352 close (a->fd);
353 }
354}
355
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200356static int
357legacy_memfd_create (u8 * name)
358{
359 clib_mem_main_t *mm = &clib_mem_main;
360 int fd = -1;
361 char *mount_dir;
362 u8 *filename;
363
364 /* create mount directory */
365 if ((mount_dir = mkdtemp ("/tmp/hugepage_mount.XXXXXX")) == 0)
366 {
367 vec_reset_length (mm->error);
368 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
369 return -1;
370 }
371
372 if (mount ("none", mount_dir, "hugetlbfs", 0, NULL))
373 {
374 rmdir ((char *) mount_dir);
375 vec_reset_length (mm->error);
376 mm->error = clib_error_return_unix (mm->error, "mount");
377 return -1;
378 }
379
380 filename = format (0, "%s/%s%c", mount_dir, name, 0);
381
382 if ((fd = open ((char *) filename, O_CREAT | O_RDWR, 0755)) == -1)
383 {
384 vec_reset_length (mm->error);
385 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
386 }
387
388 umount2 ((char *) mount_dir, MNT_DETACH);
389 rmdir ((char *) mount_dir);
390 vec_free (filename);
391
392 return fd;
393}
394
395int
396clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...)
397{
398 clib_mem_main_t *mm = &clib_mem_main;
399 int fd;
400 unsigned int memfd_flags;
401 va_list va;
402 u8 *s = 0;
403
404 if (log2_page_size == mm->log2_page_sz)
405 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
406
407 switch (log2_page_size)
408 {
409 case CLIB_MEM_PAGE_SZ_UNKNOWN:
410 return -1;
411 case CLIB_MEM_PAGE_SZ_DEFAULT:
412 memfd_flags = MFD_ALLOW_SEALING;
413 break;
414 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
415 memfd_flags = MFD_HUGETLB;
416 break;
417 default:
418 memfd_flags = MFD_HUGETLB | log2_page_size << MFD_HUGE_SHIFT;
419 }
420
421 va_start (va, fmt);
422 s = va_format (0, fmt, &va);
423 va_end (va);
424
425 /* memfd_create maximum string size is 249 chars without trailing zero */
426 if (vec_len (s) > 249)
427 _vec_len (s) = 249;
428 vec_add1 (s, 0);
429
430 /* memfd_create introduced in kernel 3.17, we don't support older kernels */
431 fd = memfd_create ((char *) s, memfd_flags);
432
433 /* kernel versions < 4.14 does not support memfd_create for huge pages */
434 if (fd == -1 && errno == EINVAL &&
435 log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
436 {
437 fd = legacy_memfd_create (s);
438 }
439 else if (fd == -1)
440 {
441 vec_reset_length (mm->error);
442 mm->error = clib_error_return_unix (mm->error, "memfd_create");
443 vec_free (s);
444 return -1;
445 }
446
447 vec_free (s);
448
449 if ((memfd_flags & MFD_ALLOW_SEALING) &&
450 ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1))
451 {
452 vec_reset_length (mm->error);
453 mm->error = clib_error_return_unix (mm->error, "fcntl (F_ADD_SEALS)");
454 close (fd);
455 return -1;
456 }
457
458 return fd;
459}
460
Dave Barach16e4a4a2020-04-16 12:00:14 -0400461uword
Damjan Marionb5095042020-09-11 22:13:46 +0200462clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400463{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200464 clib_mem_main_t *mm = &clib_mem_main;
465 uword pagesize = 1ULL << log2_page_sz;
466 uword sys_page_sz = 1ULL << mm->log2_page_sz;
467 uword n_bytes;
468 void *base = 0, *p;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400469
470 size = round_pow2 (size, pagesize);
471
Damjan Marion6bfd0762020-09-11 22:16:53 +0200472 /* in adition of requested reservation, we also rserve one system page
473 * (typically 4K) adjacent to the start off reservation */
Dave Barach16e4a4a2020-04-16 12:00:14 -0400474
Damjan Marion6bfd0762020-09-11 22:16:53 +0200475 if (start)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400476 {
Damjan Marion6bfd0762020-09-11 22:16:53 +0200477 /* start address is provided, so we just need to make sure we are not
478 * replacing existing map */
479 if (start & pow2_mask (log2_page_sz))
480 return ~0;
481
482 base = (void *) start - sys_page_sz;
483 base = mmap (base, size + sys_page_sz, PROT_NONE,
484 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
485 return (base == MAP_FAILED) ? ~0 : start;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400486 }
487
Damjan Marion6bfd0762020-09-11 22:16:53 +0200488 /* to make sure that we get reservation aligned to page_size we need to
489 * request one additional page as mmap will return us address which is
490 * aligned only to system page size */
491 base = mmap (0, size + pagesize, PROT_NONE,
492 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
Dave Barach16e4a4a2020-04-16 12:00:14 -0400493
Damjan Marion6bfd0762020-09-11 22:16:53 +0200494 if (base == MAP_FAILED)
495 return ~0;
496
497 /* return additional space at the end of allocation */
498 p = base + size + pagesize;
499 n_bytes = (uword) p & pow2_mask (log2_page_sz);
500 if (n_bytes)
501 {
502 p -= n_bytes;
503 munmap (p, n_bytes);
504 }
505
506 /* return additional space at the start of allocation */
507 n_bytes = pagesize - sys_page_sz - n_bytes;
508 if (n_bytes)
509 {
510 munmap (base, n_bytes);
511 base += n_bytes;
512 }
513
514 return (uword) base + sys_page_sz;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400515}
516
Damjan Marion6bfd0762020-09-11 22:16:53 +0200517clib_mem_vm_map_hdr_t *
518clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t * hdr)
519{
520 clib_mem_main_t *mm = &clib_mem_main;
521 uword sys_page_sz = 1 << mm->log2_page_sz;
522 clib_mem_vm_map_hdr_t *next;
523 if (hdr == 0)
524 {
525 hdr = mm->first_map;
526 if (hdr)
527 mprotect (hdr, sys_page_sz, PROT_READ);
528 return hdr;
529 }
530 next = hdr->next;
531 mprotect (hdr, sys_page_sz, PROT_NONE);
532 if (next)
533 mprotect (next, sys_page_sz, PROT_READ);
534 return next;
535}
536
537void *
538clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
539 uword size, int fd, uword offset, char *name)
540{
541 clib_mem_main_t *mm = &clib_mem_main;
542 clib_mem_vm_map_hdr_t *hdr;
543 uword sys_page_sz = 1 << mm->log2_page_sz;
544 int mmap_flags = MAP_FIXED, is_huge = 0;
545
546 if (fd != -1)
547 {
548 mmap_flags |= MAP_SHARED;
549 log2_page_sz = clib_mem_get_fd_log2_page_size (fd);
550 if (log2_page_sz > mm->log2_page_sz)
551 is_huge = 1;
552 }
553 else
554 {
555 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
556
557 if (log2_page_sz == mm->log2_page_sz)
558 log2_page_sz = CLIB_MEM_PAGE_SZ_DEFAULT;
559
560 switch (log2_page_sz)
561 {
562 case CLIB_MEM_PAGE_SZ_UNKNOWN:
563 /* will fail later */
564 break;
565 case CLIB_MEM_PAGE_SZ_DEFAULT:
566 log2_page_sz = mm->log2_page_sz;
567 break;
568 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
569 mmap_flags |= MAP_HUGETLB;
570 log2_page_sz = mm->log2_default_hugepage_sz;
571 is_huge = 1;
572 break;
573 default:
574 mmap_flags |= MAP_HUGETLB;
575 mmap_flags |= log2_page_sz << MAP_HUGE_SHIFT;
576 is_huge = 1;
577 }
578 }
579
580 if (log2_page_sz == CLIB_MEM_PAGE_SZ_UNKNOWN)
581 return CLIB_MEM_VM_MAP_FAILED;
582
583 size = round_pow2 (size, 1 << log2_page_sz);
584
585 base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
586
587 if (base == (void *) ~0)
588 return CLIB_MEM_VM_MAP_FAILED;
589
590 base = mmap (base, size, PROT_READ | PROT_WRITE, mmap_flags, fd, offset);
591
592 if (base == MAP_FAILED)
593 return CLIB_MEM_VM_MAP_FAILED;
594
595 if (is_huge && (mlock (base, size) != 0))
596 {
597 munmap (base, size);
598 return CLIB_MEM_VM_MAP_FAILED;
599 }
600
601 hdr = mmap (base - sys_page_sz, sys_page_sz, PROT_READ | PROT_WRITE,
602 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
603
604 if (hdr != base - sys_page_sz)
605 {
606 munmap (base, size);
607 return CLIB_MEM_VM_MAP_FAILED;
608 }
609
610 if (mm->last_map)
611 {
612 mprotect (mm->last_map, sys_page_sz, PROT_READ | PROT_WRITE);
613 mm->last_map->next = hdr;
614 mprotect (mm->last_map, sys_page_sz, PROT_NONE);
615 }
616 else
617 mm->first_map = hdr;
618
619 hdr->next = 0;
620 hdr->prev = mm->last_map;
621 mm->last_map = hdr;
622
623 hdr->base_addr = (uword) base;
624 hdr->log2_page_sz = log2_page_sz;
625 hdr->num_pages = size >> log2_page_sz;
Damjan Marion5ef25162020-09-17 13:29:33 +0200626 hdr->fd = fd;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200627 snprintf (hdr->name, CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1, "%s", (char *) name);
628 hdr->name[CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1] = 0;
629 mprotect (hdr, sys_page_sz, PROT_NONE);
630
631 CLIB_MEM_UNPOISON (base, size);
632 return base;
633}
634
635int
636clib_mem_vm_unmap (void *base)
637{
638 clib_mem_main_t *mm = &clib_mem_main;
639 uword size, sys_page_sz = 1 << mm->log2_page_sz;
640 clib_mem_vm_map_hdr_t *hdr = base - sys_page_sz;;
641
642 if (mprotect (hdr, sys_page_sz, PROT_READ | PROT_WRITE) != 0)
643 return -1;
644
645 size = hdr->num_pages << hdr->log2_page_sz;
646 if (munmap ((void *) hdr->base_addr, size) != 0)
647 return -1;
648
649 if (hdr->next)
650 {
651 mprotect (hdr->next, sys_page_sz, PROT_READ | PROT_WRITE);
652 hdr->next->prev = hdr->prev;
653 mprotect (hdr->next, sys_page_sz, PROT_NONE);
654 }
655 else
656 mm->last_map = hdr->prev;
657
658 if (hdr->prev)
659 {
660 mprotect (hdr->prev, sys_page_sz, PROT_READ | PROT_WRITE);
661 hdr->prev->next = hdr->next;
662 mprotect (hdr->prev, sys_page_sz, PROT_NONE);
663 }
664 else
665 mm->first_map = hdr->next;
666
667 if (munmap (hdr, sys_page_sz) != 0)
668 return -1;
669
670 return 0;
671}
672
673void
674clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
675 uword n_pages, clib_mem_page_stats_t * stats)
676{
677 int i, *status = 0;
678 void **ptr = 0;
679
680 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
681
682 vec_validate (status, n_pages - 1);
683 vec_validate (ptr, n_pages - 1);
684
685 for (i = 0; i < n_pages; i++)
686 ptr[i] = start + (i << log2_page_size);
687
688 clib_memset (stats, 0, sizeof (clib_mem_page_stats_t));
689
690 if (move_pages (0, n_pages, ptr, 0, status, 0) != 0)
691 {
692 stats->unknown = n_pages;
693 return;
694 }
695
696 for (i = 0; i < n_pages; i++)
697 {
698 if (status[i] >= 0 && status[i] < CLIB_MAX_NUMAS)
699 {
700 stats->mapped++;
701 stats->per_numa[status[i]]++;
702 }
703 else if (status[i] == -EFAULT)
704 stats->not_mapped++;
705 else
706 stats->unknown++;
707 }
708}
709
710
Damjan Marion01914ce2017-09-14 19:04:50 +0200711u64 *
Damjan Marion6bfd0762020-09-11 22:16:53 +0200712clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
713 int n_pages)
Damjan Marion01914ce2017-09-14 19:04:50 +0200714{
715 int pagesize = sysconf (_SC_PAGESIZE);
716 int fd;
717 int i;
718 u64 *r = 0;
719
Damjan Marion6bfd0762020-09-11 22:16:53 +0200720 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
721
Damjan Marion01914ce2017-09-14 19:04:50 +0200722 if ((fd = open ((char *) "/proc/self/pagemap", O_RDONLY)) == -1)
723 return 0;
724
725 for (i = 0; i < n_pages; i++)
726 {
727 u64 seek, pagemap = 0;
728 uword vaddr = pointer_to_uword (mem) + (((u64) i) << log2_page_size);
729 seek = ((u64) vaddr / pagesize) * sizeof (u64);
730 if (lseek (fd, seek, SEEK_SET) != seek)
731 goto done;
732
733 if (read (fd, &pagemap, sizeof (pagemap)) != (sizeof (pagemap)))
734 goto done;
735
736 if ((pagemap & (1ULL << 63)) == 0)
737 goto done;
738
739 pagemap &= pow2_mask (55);
740 vec_add1 (r, pagemap * pagesize);
741 }
742
743done:
744 close (fd);
745 if (vec_len (r) != n_pages)
746 {
747 vec_free (r);
748 return 0;
749 }
750 return r;
751}
752
Florin Corasd3e83a92018-01-16 02:40:18 -0800753clib_error_t *
754clib_mem_vm_ext_map (clib_mem_vm_map_t * a)
755{
Florin Coras6fe89982020-02-07 23:28:41 +0000756 long unsigned int old_mask[16] = { 0 };
Florin Corasd3e83a92018-01-16 02:40:18 -0800757 int mmap_flags = MAP_SHARED;
Florin Coras6fe89982020-02-07 23:28:41 +0000758 clib_error_t *err = 0;
759 int old_mpol = -1;
Florin Corasd3e83a92018-01-16 02:40:18 -0800760 void *addr;
Florin Coras6fe89982020-02-07 23:28:41 +0000761 int rv;
762
763 if (a->numa_node)
764 {
765 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1, 0,
766 0);
767
768 if (rv == -1)
769 {
770 err = clib_error_return_unix (0, "get_mempolicy");
771 goto done;
772 }
773 }
Damjan Marion01914ce2017-09-14 19:04:50 +0200774
Florin Corasd3e83a92018-01-16 02:40:18 -0800775 if (a->requested_va)
776 mmap_flags |= MAP_FIXED;
777
Florin Coras6fe89982020-02-07 23:28:41 +0000778 if (old_mpol != -1)
779 {
780 long unsigned int mask[16] = { 0 };
781 mask[0] = 1 << a->numa_node;
782 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
783 if (rv == -1)
784 {
785 err = clib_error_return_unix (0, "set_mempolicy");
786 goto done;
787 }
788 }
789
Florin Corasd3e83a92018-01-16 02:40:18 -0800790 addr = (void *) mmap (uword_to_pointer (a->requested_va, void *), a->size,
791 PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0);
792
793 if (addr == MAP_FAILED)
794 return clib_error_return_unix (0, "mmap");
795
Florin Coras6fe89982020-02-07 23:28:41 +0000796 /* re-apply old numa memory policy */
797 if (old_mpol != -1 &&
798 set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1) == -1)
799 {
800 err = clib_error_return_unix (0, "set_mempolicy");
801 goto done;
802 }
803
Florin Corasd3e83a92018-01-16 02:40:18 -0800804 a->addr = addr;
BenoƮt Ganne1557d9a2020-02-07 11:58:16 +0100805 CLIB_MEM_UNPOISON (addr, a->size);
Florin Coras6fe89982020-02-07 23:28:41 +0000806
807done:
808 return err;
Florin Corasd3e83a92018-01-16 02:40:18 -0800809}
Damjan Marion01914ce2017-09-14 19:04:50 +0200810
811/*
812 * fd.io coding-style-patch-verification: ON
813 *
814 * Local Variables:
815 * eval: (c-set-style "gnu")
816 * End:
817 */