blob: 8b2fd14d210cc6b48c4677cf034d5b6e12f38f6f [file] [log] [blame]
Damjan Marion01914ce2017-09-14 19:04:50 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <unistd.h>
21#include <sys/mount.h>
22#include <sys/mman.h>
23#include <fcntl.h>
24#include <linux/mempolicy.h>
25#include <linux/memfd.h>
26
27#include <vppinfra/clib.h>
28#include <vppinfra/mem.h>
Florin Corasd3e83a92018-01-16 02:40:18 -080029#include <vppinfra/time.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020030#include <vppinfra/format.h>
31#include <vppinfra/clib_error.h>
32#include <vppinfra/linux/syscall.h>
33#include <vppinfra/linux/sysfs.h>
34
35#ifndef F_LINUX_SPECIFIC_BASE
36#define F_LINUX_SPECIFIC_BASE 1024
37#endif
38
39#ifndef F_ADD_SEALS
40#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
41#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
42
43#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
44#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
45#define F_SEAL_GROW 0x0004 /* prevent file from growing */
46#define F_SEAL_WRITE 0x0008 /* prevent writes */
47#endif
48
Damjan Marionc63e2a42020-09-16 21:36:00 +020049#ifndef MFD_HUGETLB
50#define MFD_HUGETLB 0x0004U
51#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020052
Damjan Marion6bfd0762020-09-11 22:16:53 +020053#ifndef MAP_HUGE_SHIFT
54#define MAP_HUGE_SHIFT 26
55#endif
56
Damjan Marionbdbb0c52020-09-17 10:40:44 +020057#ifndef MFD_HUGE_SHIFT
58#define MFD_HUGE_SHIFT 26
59#endif
60
Damjan Marion6bfd0762020-09-11 22:16:53 +020061#ifndef MAP_FIXED_NOREPLACE
62#define MAP_FIXED_NOREPLACE 0x100000
63#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020064
65uword
66clib_mem_get_default_hugepage_size (void)
67{
68 unformat_input_t input;
69 static u32 size = 0;
70 int fd;
71
72 if (size)
73 goto done;
74
Dave Barach036343b2019-01-01 09:45:08 -050075 /*
76 * If the kernel doesn't support hugepages, /proc/meminfo won't
77 * say anything about it. Use the regular page size as a default.
78 */
79 size = clib_mem_get_page_size () / 1024;
80
Damjan Marion9787f5f2018-10-24 12:56:32 +020081 if ((fd = open ("/proc/meminfo", 0)) == -1)
82 return 0;
83
84 unformat_init_clib_file (&input, fd);
85
86 while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
87 {
88 if (unformat (&input, "Hugepagesize:%_%u kB", &size))
89 ;
90 else
91 unformat_skip_line (&input);
92 }
93 unformat_free (&input);
94 close (fd);
95done:
96 return 1024ULL * size;
97}
98
Damjan Marionc63e2a42020-09-16 21:36:00 +020099static clib_mem_page_sz_t
100legacy_get_log2_default_hugepage_size (void)
101{
102 clib_mem_page_sz_t log2_page_size = CLIB_MEM_PAGE_SZ_UNKNOWN;
103 FILE *fp;
104 char tmp[33] = { };
105
106 if ((fp = fopen ("/proc/meminfo", "r")) == NULL)
107 return CLIB_MEM_PAGE_SZ_UNKNOWN;
108
109 while (fscanf (fp, "%32s", tmp) > 0)
110 if (strncmp ("Hugepagesize:", tmp, 13) == 0)
111 {
112 u32 size;
113 if (fscanf (fp, "%u", &size) > 0)
114 log2_page_size = 10 + min_log2 (size);
115 break;
116 }
117
118 fclose (fp);
119 return log2_page_size;
120}
121
122void
123clib_mem_main_init ()
124{
125 clib_mem_main_t *mm = &clib_mem_main;
126 uword page_size;
127 void *va;
128 int fd;
129
130 if (mm->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
131 return;
132
133 /* system page size */
134 page_size = sysconf (_SC_PAGESIZE);
135 mm->log2_page_sz = min_log2 (page_size);
136
137 /* default system hugeppage size */
138 if ((fd = memfd_create ("test", MFD_HUGETLB)) != -1)
139 {
140 mm->log2_default_hugepage_sz = clib_mem_get_fd_log2_page_size (fd);
141 close (fd);
142 }
143 else /* likely kernel older than 4.14 */
144 mm->log2_default_hugepage_sz = legacy_get_log2_default_hugepage_size ();
145
146 /* numa nodes */
147 va = mmap (0, page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
148 MAP_ANONYMOUS, -1, 0);
149 if (va == MAP_FAILED)
150 return;
151
152 if (mlock (va, page_size))
153 goto done;
154
155 for (int i = 0; i < CLIB_MAX_NUMAS; i++)
156 {
157 int status;
158 if (move_pages (0, 1, &va, &i, &status, 0) == 0)
159 mm->numa_node_bitmap |= 1ULL << i;
160 }
161
162done:
163 munmap (va, page_size);
164}
165
Dave Barach9466c452018-08-24 17:21:14 -0400166u64
Damjan Marion567e61d2018-10-24 17:08:26 +0200167clib_mem_get_fd_page_size (int fd)
Damjan Marion01914ce2017-09-14 19:04:50 +0200168{
169 struct stat st = { 0 };
Chris Lukeb2bcad62017-09-18 08:51:22 -0400170 if (fstat (fd, &st) == -1)
Damjan Marion01914ce2017-09-14 19:04:50 +0200171 return 0;
Florin Corasd3e83a92018-01-16 02:40:18 -0800172 return st.st_blksize;
173}
174
Damjan Marion6bfd0762020-09-11 22:16:53 +0200175clib_mem_page_sz_t
Damjan Marion567e61d2018-10-24 17:08:26 +0200176clib_mem_get_fd_log2_page_size (int fd)
Florin Corasd3e83a92018-01-16 02:40:18 -0800177{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200178 uword page_size = clib_mem_get_fd_page_size (fd);
179 return page_size ? min_log2 (page_size) : CLIB_MEM_PAGE_SZ_UNKNOWN;
Florin Corasd3e83a92018-01-16 02:40:18 -0800180}
181
Florin Corasb384b542018-01-15 01:08:33 -0800182void
Damjan Marionb5095042020-09-11 22:13:46 +0200183clib_mem_vm_randomize_va (uword * requested_va,
184 clib_mem_page_sz_t log2_page_size)
Florin Corasd3e83a92018-01-16 02:40:18 -0800185{
186 u8 bit_mask = 15;
187
188 if (log2_page_size <= 12)
189 bit_mask = 15;
190 else if (log2_page_size > 12 && log2_page_size <= 16)
191 bit_mask = 3;
192 else
193 bit_mask = 0;
194
Haiyang Tana5ab5032018-10-15 06:17:55 -0700195 *requested_va +=
196 (clib_cpu_time_now () & bit_mask) * (1ull << log2_page_size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200197}
198
Damjan Marion1636b162018-10-19 12:54:42 +0200199clib_error_t *
Damjan Marion01914ce2017-09-14 19:04:50 +0200200clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a)
201{
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200202 clib_mem_main_t *mm = &clib_mem_main;
Damjan Marion01914ce2017-09-14 19:04:50 +0200203 int fd = -1;
204 clib_error_t *err = 0;
205 void *addr = 0;
206 u8 *filename = 0;
Damjan Marion7b185362018-03-04 16:41:35 +0100207 int mmap_flags = 0;
Damjan Marion01914ce2017-09-14 19:04:50 +0200208 int log2_page_size;
209 int n_pages;
210 int old_mpol = -1;
Dave Barach9466c452018-08-24 17:21:14 -0400211 long unsigned int old_mask[16] = { 0 };
Damjan Marion01914ce2017-09-14 19:04:50 +0200212
213 /* save old numa mem policy if needed */
214 if (a->flags & (CLIB_MEM_VM_F_NUMA_PREFER | CLIB_MEM_VM_F_NUMA_FORCE))
215 {
216 int rv;
Damjan Marion2e172ea2018-01-03 15:48:34 +0000217 rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1,
218 0, 0);
Damjan Marion01914ce2017-09-14 19:04:50 +0200219
220 if (rv == -1)
221 {
Damjan Marion2e172ea2018-01-03 15:48:34 +0000222 if (a->numa_node != 0 && (a->flags & CLIB_MEM_VM_F_NUMA_FORCE) != 0)
Damjan Marion01914ce2017-09-14 19:04:50 +0200223 {
224 err = clib_error_return_unix (0, "get_mempolicy");
225 goto error;
226 }
227 else
228 old_mpol = -1;
229 }
230 }
231
Damjan Marion7b185362018-03-04 16:41:35 +0100232 if (a->flags & CLIB_MEM_VM_F_LOCKED)
233 mmap_flags |= MAP_LOCKED;
234
Damjan Marion01914ce2017-09-14 19:04:50 +0200235 /* if we are creating shared segment, we need file descriptor */
236 if (a->flags & CLIB_MEM_VM_F_SHARED)
237 {
Damjan Marion7b185362018-03-04 16:41:35 +0100238 mmap_flags |= MAP_SHARED;
Damjan Marion01914ce2017-09-14 19:04:50 +0200239 /* if hugepages are needed we need to create mount point */
240 if (a->flags & CLIB_MEM_VM_F_HUGETLB)
241 {
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200242 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT_HUGE;
Damjan Marion01914ce2017-09-14 19:04:50 +0200243 mmap_flags |= MAP_LOCKED;
244 }
245 else
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200246 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
247
248 if ((fd = clib_mem_vm_create_fd (log2_page_size, "%s", a->name)) == -1)
Damjan Marion01914ce2017-09-14 19:04:50 +0200249 {
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200250 err = clib_error_return (0, "%U", format_clib_error, mm->error);
251 goto error;
Damjan Marion01914ce2017-09-14 19:04:50 +0200252 }
Chris Luke879ace32017-09-26 13:15:16 -0400253
Damjan Marion567e61d2018-10-24 17:08:26 +0200254 log2_page_size = clib_mem_get_fd_log2_page_size (fd);
Chris Luke879ace32017-09-26 13:15:16 -0400255 if (log2_page_size == 0)
256 {
257 err = clib_error_return_unix (0, "cannot determine page size");
258 goto error;
259 }
Florin Corasd3e83a92018-01-16 02:40:18 -0800260
261 if (a->requested_va)
262 {
263 clib_mem_vm_randomize_va (&a->requested_va, log2_page_size);
264 mmap_flags |= MAP_FIXED;
265 }
Damjan Marion01914ce2017-09-14 19:04:50 +0200266 }
267 else /* not CLIB_MEM_VM_F_SHARED */
268 {
Damjan Marion7b185362018-03-04 16:41:35 +0100269 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
Damjan Marion01914ce2017-09-14 19:04:50 +0200270 if (a->flags & CLIB_MEM_VM_F_HUGETLB)
271 {
Damjan Marion7b185362018-03-04 16:41:35 +0100272 mmap_flags |= MAP_HUGETLB;
Damjan Marion01914ce2017-09-14 19:04:50 +0200273 log2_page_size = 21;
274 }
275 else
276 {
Damjan Marion01914ce2017-09-14 19:04:50 +0200277 log2_page_size = min_log2 (sysconf (_SC_PAGESIZE));
278 }
279 }
280
281 n_pages = ((a->size - 1) >> log2_page_size) + 1;
282
Damjan Marion01914ce2017-09-14 19:04:50 +0200283 if (a->flags & CLIB_MEM_VM_F_HUGETLB_PREALLOC)
284 {
Damjan Marion6f3f1cb2018-10-22 13:01:46 +0200285 err = clib_sysfs_prealloc_hugepages (a->numa_node, log2_page_size,
Damjan Marion01914ce2017-09-14 19:04:50 +0200286 n_pages);
287 if (err)
288 goto error;
289
290 }
291
292 if (fd != -1)
Lee Roberts45a09462018-03-07 19:47:00 -0700293 if ((ftruncate (fd, (u64) n_pages * (1 << log2_page_size))) == -1)
Damjan Marion01914ce2017-09-14 19:04:50 +0200294 {
295 err = clib_error_return_unix (0, "ftruncate");
296 goto error;
297 }
298
299 if (old_mpol != -1)
300 {
301 int rv;
Dave Barach9466c452018-08-24 17:21:14 -0400302 long unsigned int mask[16] = { 0 };
Damjan Marion01914ce2017-09-14 19:04:50 +0200303 mask[0] = 1 << a->numa_node;
304 rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
Damjan Marion915e3f12018-04-18 09:21:24 +0200305 if (rv == -1 && a->numa_node != 0 &&
306 (a->flags & CLIB_MEM_VM_F_NUMA_FORCE) != 0)
Damjan Marion01914ce2017-09-14 19:04:50 +0200307 {
308 err = clib_error_return_unix (0, "set_mempolicy");
309 goto error;
310 }
311 }
312
Florin Corasd3e83a92018-01-16 02:40:18 -0800313 addr = mmap (uword_to_pointer (a->requested_va, void *), a->size,
314 (PROT_READ | PROT_WRITE), mmap_flags, fd, 0);
Damjan Marion01914ce2017-09-14 19:04:50 +0200315 if (addr == MAP_FAILED)
316 {
317 err = clib_error_return_unix (0, "mmap");
318 goto error;
319 }
320
Damjan Marion2e172ea2018-01-03 15:48:34 +0000321 /* re-apply old numa memory policy */
Damjan Marion01914ce2017-09-14 19:04:50 +0200322 if (old_mpol != -1 &&
323 set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1) == -1)
324 {
325 err = clib_error_return_unix (0, "set_mempolicy");
326 goto error;
327 }
328
329 a->log2_page_size = log2_page_size;
330 a->n_pages = n_pages;
331 a->addr = addr;
332 a->fd = fd;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100333 CLIB_MEM_UNPOISON (addr, a->size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200334 goto done;
335
336error:
337 if (fd != -1)
338 close (fd);
339
340done:
341 vec_free (filename);
342 return err;
343}
344
Haiyang Tan642829d2018-10-09 19:09:45 -0700345void
346clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a)
347{
348 if (a != 0)
349 {
Haiyang Tana5ab5032018-10-15 06:17:55 -0700350 clib_mem_vm_free (a->addr, 1ull << a->log2_page_size);
Haiyang Tan642829d2018-10-09 19:09:45 -0700351 if (a->fd != -1)
352 close (a->fd);
353 }
354}
355
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200356static int
357legacy_memfd_create (u8 * name)
358{
359 clib_mem_main_t *mm = &clib_mem_main;
360 int fd = -1;
361 char *mount_dir;
Benoît Ganne2b92c702020-09-28 17:34:17 +0200362 u8 *temp;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200363 u8 *filename;
364
Benoît Ganne2b92c702020-09-28 17:34:17 +0200365 /*
366 * Since mkdtemp will modify template string "/tmp/hugepage_mount.XXXXXX",
367 * it must not be a string constant, but should be declared as
368 * a character array.
369 */
370 temp = format (0, "/tmp/hugepage_mount.XXXXXX%c", 0);
371
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200372 /* create mount directory */
Benoît Ganne2b92c702020-09-28 17:34:17 +0200373 if ((mount_dir = mkdtemp ((char *) temp)) == 0)
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200374 {
Benoît Ganne2b92c702020-09-28 17:34:17 +0200375 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200376 vec_reset_length (mm->error);
377 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
Damjan Marion561ae5d2020-09-24 13:53:46 +0200378 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200379 }
380
381 if (mount ("none", mount_dir, "hugetlbfs", 0, NULL))
382 {
Benoît Ganne2b92c702020-09-28 17:34:17 +0200383 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200384 rmdir ((char *) mount_dir);
385 vec_reset_length (mm->error);
386 mm->error = clib_error_return_unix (mm->error, "mount");
Damjan Marion561ae5d2020-09-24 13:53:46 +0200387 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200388 }
389
390 filename = format (0, "%s/%s%c", mount_dir, name, 0);
391
392 if ((fd = open ((char *) filename, O_CREAT | O_RDWR, 0755)) == -1)
393 {
394 vec_reset_length (mm->error);
395 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
396 }
397
398 umount2 ((char *) mount_dir, MNT_DETACH);
399 rmdir ((char *) mount_dir);
400 vec_free (filename);
Benoît Ganne2b92c702020-09-28 17:34:17 +0200401 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200402
403 return fd;
404}
405
406int
407clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...)
408{
409 clib_mem_main_t *mm = &clib_mem_main;
410 int fd;
411 unsigned int memfd_flags;
412 va_list va;
413 u8 *s = 0;
414
415 if (log2_page_size == mm->log2_page_sz)
416 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
Benoît Ganne2b92c702020-09-28 17:34:17 +0200417 else if (log2_page_size == mm->log2_default_hugepage_sz)
418 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT_HUGE;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200419
420 switch (log2_page_size)
421 {
422 case CLIB_MEM_PAGE_SZ_UNKNOWN:
Damjan Marion561ae5d2020-09-24 13:53:46 +0200423 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200424 case CLIB_MEM_PAGE_SZ_DEFAULT:
425 memfd_flags = MFD_ALLOW_SEALING;
426 break;
427 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
428 memfd_flags = MFD_HUGETLB;
429 break;
430 default:
431 memfd_flags = MFD_HUGETLB | log2_page_size << MFD_HUGE_SHIFT;
432 }
433
434 va_start (va, fmt);
435 s = va_format (0, fmt, &va);
436 va_end (va);
437
438 /* memfd_create maximum string size is 249 chars without trailing zero */
439 if (vec_len (s) > 249)
440 _vec_len (s) = 249;
441 vec_add1 (s, 0);
442
443 /* memfd_create introduced in kernel 3.17, we don't support older kernels */
444 fd = memfd_create ((char *) s, memfd_flags);
445
446 /* kernel versions < 4.14 does not support memfd_create for huge pages */
447 if (fd == -1 && errno == EINVAL &&
448 log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
449 {
450 fd = legacy_memfd_create (s);
451 }
452 else if (fd == -1)
453 {
454 vec_reset_length (mm->error);
455 mm->error = clib_error_return_unix (mm->error, "memfd_create");
456 vec_free (s);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200457 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200458 }
459
460 vec_free (s);
461
462 if ((memfd_flags & MFD_ALLOW_SEALING) &&
463 ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1))
464 {
465 vec_reset_length (mm->error);
466 mm->error = clib_error_return_unix (mm->error, "fcntl (F_ADD_SEALS)");
467 close (fd);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200468 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200469 }
470
471 return fd;
472}
473
Dave Barach16e4a4a2020-04-16 12:00:14 -0400474uword
Damjan Marionb5095042020-09-11 22:13:46 +0200475clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400476{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200477 clib_mem_main_t *mm = &clib_mem_main;
478 uword pagesize = 1ULL << log2_page_sz;
479 uword sys_page_sz = 1ULL << mm->log2_page_sz;
480 uword n_bytes;
481 void *base = 0, *p;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400482
483 size = round_pow2 (size, pagesize);
484
Damjan Marion6bfd0762020-09-11 22:16:53 +0200485 /* in adition of requested reservation, we also rserve one system page
486 * (typically 4K) adjacent to the start off reservation */
Dave Barach16e4a4a2020-04-16 12:00:14 -0400487
Damjan Marion6bfd0762020-09-11 22:16:53 +0200488 if (start)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400489 {
Damjan Marion6bfd0762020-09-11 22:16:53 +0200490 /* start address is provided, so we just need to make sure we are not
491 * replacing existing map */
492 if (start & pow2_mask (log2_page_sz))
493 return ~0;
494
495 base = (void *) start - sys_page_sz;
496 base = mmap (base, size + sys_page_sz, PROT_NONE,
497 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
498 return (base == MAP_FAILED) ? ~0 : start;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400499 }
500
Damjan Marion6bfd0762020-09-11 22:16:53 +0200501 /* to make sure that we get reservation aligned to page_size we need to
502 * request one additional page as mmap will return us address which is
503 * aligned only to system page size */
504 base = mmap (0, size + pagesize, PROT_NONE,
505 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
Dave Barach16e4a4a2020-04-16 12:00:14 -0400506
Damjan Marion6bfd0762020-09-11 22:16:53 +0200507 if (base == MAP_FAILED)
508 return ~0;
509
510 /* return additional space at the end of allocation */
511 p = base + size + pagesize;
512 n_bytes = (uword) p & pow2_mask (log2_page_sz);
513 if (n_bytes)
514 {
515 p -= n_bytes;
516 munmap (p, n_bytes);
517 }
518
519 /* return additional space at the start of allocation */
520 n_bytes = pagesize - sys_page_sz - n_bytes;
521 if (n_bytes)
522 {
523 munmap (base, n_bytes);
524 base += n_bytes;
525 }
526
527 return (uword) base + sys_page_sz;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400528}
529
Damjan Marion6bfd0762020-09-11 22:16:53 +0200530clib_mem_vm_map_hdr_t *
531clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t * hdr)
532{
533 clib_mem_main_t *mm = &clib_mem_main;
534 uword sys_page_sz = 1 << mm->log2_page_sz;
535 clib_mem_vm_map_hdr_t *next;
536 if (hdr == 0)
537 {
538 hdr = mm->first_map;
539 if (hdr)
540 mprotect (hdr, sys_page_sz, PROT_READ);
541 return hdr;
542 }
543 next = hdr->next;
544 mprotect (hdr, sys_page_sz, PROT_NONE);
545 if (next)
546 mprotect (next, sys_page_sz, PROT_READ);
547 return next;
548}
549
550void *
551clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
552 uword size, int fd, uword offset, char *name)
553{
554 clib_mem_main_t *mm = &clib_mem_main;
555 clib_mem_vm_map_hdr_t *hdr;
556 uword sys_page_sz = 1 << mm->log2_page_sz;
557 int mmap_flags = MAP_FIXED, is_huge = 0;
558
559 if (fd != -1)
560 {
561 mmap_flags |= MAP_SHARED;
562 log2_page_sz = clib_mem_get_fd_log2_page_size (fd);
563 if (log2_page_sz > mm->log2_page_sz)
564 is_huge = 1;
565 }
566 else
567 {
568 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
569
570 if (log2_page_sz == mm->log2_page_sz)
571 log2_page_sz = CLIB_MEM_PAGE_SZ_DEFAULT;
572
573 switch (log2_page_sz)
574 {
575 case CLIB_MEM_PAGE_SZ_UNKNOWN:
576 /* will fail later */
577 break;
578 case CLIB_MEM_PAGE_SZ_DEFAULT:
579 log2_page_sz = mm->log2_page_sz;
580 break;
581 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
582 mmap_flags |= MAP_HUGETLB;
583 log2_page_sz = mm->log2_default_hugepage_sz;
584 is_huge = 1;
585 break;
586 default:
587 mmap_flags |= MAP_HUGETLB;
588 mmap_flags |= log2_page_sz << MAP_HUGE_SHIFT;
589 is_huge = 1;
590 }
591 }
592
593 if (log2_page_sz == CLIB_MEM_PAGE_SZ_UNKNOWN)
594 return CLIB_MEM_VM_MAP_FAILED;
595
596 size = round_pow2 (size, 1 << log2_page_sz);
597
598 base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
599
600 if (base == (void *) ~0)
601 return CLIB_MEM_VM_MAP_FAILED;
602
603 base = mmap (base, size, PROT_READ | PROT_WRITE, mmap_flags, fd, offset);
604
605 if (base == MAP_FAILED)
606 return CLIB_MEM_VM_MAP_FAILED;
607
608 if (is_huge && (mlock (base, size) != 0))
609 {
610 munmap (base, size);
611 return CLIB_MEM_VM_MAP_FAILED;
612 }
613
614 hdr = mmap (base - sys_page_sz, sys_page_sz, PROT_READ | PROT_WRITE,
615 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
616
617 if (hdr != base - sys_page_sz)
618 {
619 munmap (base, size);
620 return CLIB_MEM_VM_MAP_FAILED;
621 }
622
623 if (mm->last_map)
624 {
625 mprotect (mm->last_map, sys_page_sz, PROT_READ | PROT_WRITE);
626 mm->last_map->next = hdr;
627 mprotect (mm->last_map, sys_page_sz, PROT_NONE);
628 }
629 else
630 mm->first_map = hdr;
631
632 hdr->next = 0;
633 hdr->prev = mm->last_map;
634 mm->last_map = hdr;
635
636 hdr->base_addr = (uword) base;
637 hdr->log2_page_sz = log2_page_sz;
638 hdr->num_pages = size >> log2_page_sz;
Damjan Marion5ef25162020-09-17 13:29:33 +0200639 hdr->fd = fd;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200640 snprintf (hdr->name, CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1, "%s", (char *) name);
641 hdr->name[CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1] = 0;
642 mprotect (hdr, sys_page_sz, PROT_NONE);
643
644 CLIB_MEM_UNPOISON (base, size);
645 return base;
646}
647
648int
649clib_mem_vm_unmap (void *base)
650{
651 clib_mem_main_t *mm = &clib_mem_main;
652 uword size, sys_page_sz = 1 << mm->log2_page_sz;
653 clib_mem_vm_map_hdr_t *hdr = base - sys_page_sz;;
654
655 if (mprotect (hdr, sys_page_sz, PROT_READ | PROT_WRITE) != 0)
Damjan Marion561ae5d2020-09-24 13:53:46 +0200656 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200657
658 size = hdr->num_pages << hdr->log2_page_sz;
659 if (munmap ((void *) hdr->base_addr, size) != 0)
Damjan Marion561ae5d2020-09-24 13:53:46 +0200660 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200661
662 if (hdr->next)
663 {
664 mprotect (hdr->next, sys_page_sz, PROT_READ | PROT_WRITE);
665 hdr->next->prev = hdr->prev;
666 mprotect (hdr->next, sys_page_sz, PROT_NONE);
667 }
668 else
669 mm->last_map = hdr->prev;
670
671 if (hdr->prev)
672 {
673 mprotect (hdr->prev, sys_page_sz, PROT_READ | PROT_WRITE);
674 hdr->prev->next = hdr->next;
675 mprotect (hdr->prev, sys_page_sz, PROT_NONE);
676 }
677 else
678 mm->first_map = hdr->next;
679
680 if (munmap (hdr, sys_page_sz) != 0)
Damjan Marion561ae5d2020-09-24 13:53:46 +0200681 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200682
683 return 0;
684}
685
686void
687clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
688 uword n_pages, clib_mem_page_stats_t * stats)
689{
690 int i, *status = 0;
691 void **ptr = 0;
692
693 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
694
695 vec_validate (status, n_pages - 1);
696 vec_validate (ptr, n_pages - 1);
697
698 for (i = 0; i < n_pages; i++)
699 ptr[i] = start + (i << log2_page_size);
700
701 clib_memset (stats, 0, sizeof (clib_mem_page_stats_t));
702
703 if (move_pages (0, n_pages, ptr, 0, status, 0) != 0)
704 {
705 stats->unknown = n_pages;
706 return;
707 }
708
709 for (i = 0; i < n_pages; i++)
710 {
711 if (status[i] >= 0 && status[i] < CLIB_MAX_NUMAS)
712 {
713 stats->mapped++;
714 stats->per_numa[status[i]]++;
715 }
716 else if (status[i] == -EFAULT)
717 stats->not_mapped++;
718 else
719 stats->unknown++;
720 }
721}
722
723
Damjan Marion01914ce2017-09-14 19:04:50 +0200724u64 *
Damjan Marion6bfd0762020-09-11 22:16:53 +0200725clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
726 int n_pages)
Damjan Marion01914ce2017-09-14 19:04:50 +0200727{
728 int pagesize = sysconf (_SC_PAGESIZE);
729 int fd;
730 int i;
731 u64 *r = 0;
732
Damjan Marion6bfd0762020-09-11 22:16:53 +0200733 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
734
Damjan Marion01914ce2017-09-14 19:04:50 +0200735 if ((fd = open ((char *) "/proc/self/pagemap", O_RDONLY)) == -1)
736 return 0;
737
738 for (i = 0; i < n_pages; i++)
739 {
740 u64 seek, pagemap = 0;
741 uword vaddr = pointer_to_uword (mem) + (((u64) i) << log2_page_size);
742 seek = ((u64) vaddr / pagesize) * sizeof (u64);
743 if (lseek (fd, seek, SEEK_SET) != seek)
744 goto done;
745
746 if (read (fd, &pagemap, sizeof (pagemap)) != (sizeof (pagemap)))
747 goto done;
748
749 if ((pagemap & (1ULL << 63)) == 0)
750 goto done;
751
752 pagemap &= pow2_mask (55);
753 vec_add1 (r, pagemap * pagesize);
754 }
755
756done:
757 close (fd);
758 if (vec_len (r) != n_pages)
759 {
760 vec_free (r);
761 return 0;
762 }
763 return r;
764}
765
Damjan Marion561ae5d2020-09-24 13:53:46 +0200766int
767clib_mem_set_numa_affinity (u8 numa_node, int force)
768{
769 clib_mem_main_t *mm = &clib_mem_main;
770 long unsigned int mask[16] = { 0 };
771 int mask_len = sizeof (mask) * 8 + 1;
772
773 /* no numa support */
774 if (mm->numa_node_bitmap == 0)
775 {
776 if (numa_node)
777 {
778 vec_reset_length (mm->error);
779 mm->error = clib_error_return (mm->error, "%s: numa not supported",
780 (char *) __func__);
781 return CLIB_MEM_ERROR;
782 }
783 else
784 return 0;
785 }
786
787 mask[0] = 1 << numa_node;
788
789 if (set_mempolicy (force ? MPOL_BIND : MPOL_PREFERRED, mask, mask_len))
790 goto error;
791
792 vec_reset_length (mm->error);
793 return 0;
794
795error:
796 vec_reset_length (mm->error);
797 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
798 return CLIB_MEM_ERROR;
799}
800
801int
802clib_mem_set_default_numa_affinity ()
803{
804 clib_mem_main_t *mm = &clib_mem_main;
805
806 if (set_mempolicy (MPOL_DEFAULT, 0, 0))
807 {
808 vec_reset_length (mm->error);
809 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
810 return CLIB_MEM_ERROR;
811 }
812 return 0;
813}
814
Damjan Marion01914ce2017-09-14 19:04:50 +0200815/*
816 * fd.io coding-style-patch-verification: ON
817 *
818 * Local Variables:
819 * eval: (c-set-style "gnu")
820 * End:
821 */