blob: 1ef90da16f224ef3f8c3bd7f58d84894105d8500 [file] [log] [blame]
Damjan Marion01914ce2017-09-14 19:04:50 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <unistd.h>
21#include <sys/mount.h>
22#include <sys/mman.h>
23#include <fcntl.h>
24#include <linux/mempolicy.h>
25#include <linux/memfd.h>
26
27#include <vppinfra/clib.h>
28#include <vppinfra/mem.h>
Florin Corasd3e83a92018-01-16 02:40:18 -080029#include <vppinfra/time.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020030#include <vppinfra/format.h>
31#include <vppinfra/clib_error.h>
32#include <vppinfra/linux/syscall.h>
33#include <vppinfra/linux/sysfs.h>
34
35#ifndef F_LINUX_SPECIFIC_BASE
36#define F_LINUX_SPECIFIC_BASE 1024
37#endif
38
39#ifndef F_ADD_SEALS
40#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
41#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
42
43#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
44#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
45#define F_SEAL_GROW 0x0004 /* prevent file from growing */
46#define F_SEAL_WRITE 0x0008 /* prevent writes */
47#endif
48
Damjan Marionc63e2a42020-09-16 21:36:00 +020049#ifndef MFD_HUGETLB
50#define MFD_HUGETLB 0x0004U
51#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020052
Damjan Marion6bfd0762020-09-11 22:16:53 +020053#ifndef MAP_HUGE_SHIFT
54#define MAP_HUGE_SHIFT 26
55#endif
56
Damjan Marionbdbb0c52020-09-17 10:40:44 +020057#ifndef MFD_HUGE_SHIFT
58#define MFD_HUGE_SHIFT 26
59#endif
60
Damjan Marion6bfd0762020-09-11 22:16:53 +020061#ifndef MAP_FIXED_NOREPLACE
62#define MAP_FIXED_NOREPLACE 0x100000
63#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020064
Damjan Mariondae1c7e2020-10-17 13:32:25 +020065__clib_export uword
Damjan Marion9787f5f2018-10-24 12:56:32 +020066clib_mem_get_default_hugepage_size (void)
67{
68 unformat_input_t input;
69 static u32 size = 0;
70 int fd;
71
72 if (size)
73 goto done;
74
Dave Barach036343b2019-01-01 09:45:08 -050075 /*
76 * If the kernel doesn't support hugepages, /proc/meminfo won't
77 * say anything about it. Use the regular page size as a default.
78 */
79 size = clib_mem_get_page_size () / 1024;
80
Damjan Marion9787f5f2018-10-24 12:56:32 +020081 if ((fd = open ("/proc/meminfo", 0)) == -1)
82 return 0;
83
84 unformat_init_clib_file (&input, fd);
85
86 while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
87 {
88 if (unformat (&input, "Hugepagesize:%_%u kB", &size))
89 ;
90 else
91 unformat_skip_line (&input);
92 }
93 unformat_free (&input);
94 close (fd);
95done:
96 return 1024ULL * size;
97}
98
Damjan Marionc63e2a42020-09-16 21:36:00 +020099static clib_mem_page_sz_t
100legacy_get_log2_default_hugepage_size (void)
101{
102 clib_mem_page_sz_t log2_page_size = CLIB_MEM_PAGE_SZ_UNKNOWN;
103 FILE *fp;
104 char tmp[33] = { };
105
106 if ((fp = fopen ("/proc/meminfo", "r")) == NULL)
107 return CLIB_MEM_PAGE_SZ_UNKNOWN;
108
109 while (fscanf (fp, "%32s", tmp) > 0)
110 if (strncmp ("Hugepagesize:", tmp, 13) == 0)
111 {
112 u32 size;
113 if (fscanf (fp, "%u", &size) > 0)
114 log2_page_size = 10 + min_log2 (size);
115 break;
116 }
117
118 fclose (fp);
119 return log2_page_size;
120}
121
122void
123clib_mem_main_init ()
124{
125 clib_mem_main_t *mm = &clib_mem_main;
126 uword page_size;
127 void *va;
128 int fd;
129
130 if (mm->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
131 return;
132
133 /* system page size */
134 page_size = sysconf (_SC_PAGESIZE);
135 mm->log2_page_sz = min_log2 (page_size);
136
137 /* default system hugeppage size */
138 if ((fd = memfd_create ("test", MFD_HUGETLB)) != -1)
139 {
140 mm->log2_default_hugepage_sz = clib_mem_get_fd_log2_page_size (fd);
141 close (fd);
142 }
143 else /* likely kernel older than 4.14 */
144 mm->log2_default_hugepage_sz = legacy_get_log2_default_hugepage_size ();
145
146 /* numa nodes */
147 va = mmap (0, page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
148 MAP_ANONYMOUS, -1, 0);
149 if (va == MAP_FAILED)
150 return;
151
152 if (mlock (va, page_size))
153 goto done;
154
155 for (int i = 0; i < CLIB_MAX_NUMAS; i++)
156 {
157 int status;
158 if (move_pages (0, 1, &va, &i, &status, 0) == 0)
159 mm->numa_node_bitmap |= 1ULL << i;
160 }
161
162done:
163 munmap (va, page_size);
164}
165
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200166__clib_export u64
Damjan Marion567e61d2018-10-24 17:08:26 +0200167clib_mem_get_fd_page_size (int fd)
Damjan Marion01914ce2017-09-14 19:04:50 +0200168{
169 struct stat st = { 0 };
Chris Lukeb2bcad62017-09-18 08:51:22 -0400170 if (fstat (fd, &st) == -1)
Damjan Marion01914ce2017-09-14 19:04:50 +0200171 return 0;
Florin Corasd3e83a92018-01-16 02:40:18 -0800172 return st.st_blksize;
173}
174
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200175__clib_export clib_mem_page_sz_t
Damjan Marion567e61d2018-10-24 17:08:26 +0200176clib_mem_get_fd_log2_page_size (int fd)
Florin Corasd3e83a92018-01-16 02:40:18 -0800177{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200178 uword page_size = clib_mem_get_fd_page_size (fd);
179 return page_size ? min_log2 (page_size) : CLIB_MEM_PAGE_SZ_UNKNOWN;
Florin Corasd3e83a92018-01-16 02:40:18 -0800180}
181
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200182__clib_export void
Damjan Marionb5095042020-09-11 22:13:46 +0200183clib_mem_vm_randomize_va (uword * requested_va,
184 clib_mem_page_sz_t log2_page_size)
Florin Corasd3e83a92018-01-16 02:40:18 -0800185{
186 u8 bit_mask = 15;
187
188 if (log2_page_size <= 12)
189 bit_mask = 15;
190 else if (log2_page_size > 12 && log2_page_size <= 16)
191 bit_mask = 3;
192 else
193 bit_mask = 0;
194
Haiyang Tana5ab5032018-10-15 06:17:55 -0700195 *requested_va +=
196 (clib_cpu_time_now () & bit_mask) * (1ull << log2_page_size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200197}
198
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200199static int
200legacy_memfd_create (u8 * name)
201{
202 clib_mem_main_t *mm = &clib_mem_main;
203 int fd = -1;
204 char *mount_dir;
Benoît Ganne2b92c702020-09-28 17:34:17 +0200205 u8 *temp;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200206 u8 *filename;
207
Benoît Ganne2b92c702020-09-28 17:34:17 +0200208 /*
209 * Since mkdtemp will modify template string "/tmp/hugepage_mount.XXXXXX",
210 * it must not be a string constant, but should be declared as
211 * a character array.
212 */
213 temp = format (0, "/tmp/hugepage_mount.XXXXXX%c", 0);
214
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200215 /* create mount directory */
Benoît Ganne2b92c702020-09-28 17:34:17 +0200216 if ((mount_dir = mkdtemp ((char *) temp)) == 0)
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200217 {
Benoît Ganne2b92c702020-09-28 17:34:17 +0200218 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200219 vec_reset_length (mm->error);
220 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
Damjan Marion561ae5d2020-09-24 13:53:46 +0200221 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200222 }
223
224 if (mount ("none", mount_dir, "hugetlbfs", 0, NULL))
225 {
Benoît Ganne2b92c702020-09-28 17:34:17 +0200226 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200227 rmdir ((char *) mount_dir);
228 vec_reset_length (mm->error);
229 mm->error = clib_error_return_unix (mm->error, "mount");
Damjan Marion561ae5d2020-09-24 13:53:46 +0200230 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200231 }
232
233 filename = format (0, "%s/%s%c", mount_dir, name, 0);
234
235 if ((fd = open ((char *) filename, O_CREAT | O_RDWR, 0755)) == -1)
236 {
237 vec_reset_length (mm->error);
238 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
239 }
240
241 umount2 ((char *) mount_dir, MNT_DETACH);
242 rmdir ((char *) mount_dir);
243 vec_free (filename);
Benoît Ganne2b92c702020-09-28 17:34:17 +0200244 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200245
246 return fd;
247}
248
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200249__clib_export int
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200250clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...)
251{
252 clib_mem_main_t *mm = &clib_mem_main;
253 int fd;
254 unsigned int memfd_flags;
255 va_list va;
256 u8 *s = 0;
257
258 if (log2_page_size == mm->log2_page_sz)
259 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
Benoît Ganne2b92c702020-09-28 17:34:17 +0200260 else if (log2_page_size == mm->log2_default_hugepage_sz)
261 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT_HUGE;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200262
263 switch (log2_page_size)
264 {
265 case CLIB_MEM_PAGE_SZ_UNKNOWN:
Damjan Marion561ae5d2020-09-24 13:53:46 +0200266 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200267 case CLIB_MEM_PAGE_SZ_DEFAULT:
268 memfd_flags = MFD_ALLOW_SEALING;
269 break;
270 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
271 memfd_flags = MFD_HUGETLB;
272 break;
273 default:
274 memfd_flags = MFD_HUGETLB | log2_page_size << MFD_HUGE_SHIFT;
275 }
276
277 va_start (va, fmt);
278 s = va_format (0, fmt, &va);
279 va_end (va);
280
281 /* memfd_create maximum string size is 249 chars without trailing zero */
282 if (vec_len (s) > 249)
283 _vec_len (s) = 249;
284 vec_add1 (s, 0);
285
286 /* memfd_create introduced in kernel 3.17, we don't support older kernels */
287 fd = memfd_create ((char *) s, memfd_flags);
288
289 /* kernel versions < 4.14 does not support memfd_create for huge pages */
290 if (fd == -1 && errno == EINVAL &&
291 log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
292 {
293 fd = legacy_memfd_create (s);
294 }
295 else if (fd == -1)
296 {
297 vec_reset_length (mm->error);
298 mm->error = clib_error_return_unix (mm->error, "memfd_create");
299 vec_free (s);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200300 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200301 }
302
303 vec_free (s);
304
305 if ((memfd_flags & MFD_ALLOW_SEALING) &&
306 ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1))
307 {
308 vec_reset_length (mm->error);
309 mm->error = clib_error_return_unix (mm->error, "fcntl (F_ADD_SEALS)");
310 close (fd);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200311 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200312 }
313
314 return fd;
315}
316
Dave Barach16e4a4a2020-04-16 12:00:14 -0400317uword
Damjan Marionb5095042020-09-11 22:13:46 +0200318clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400319{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200320 clib_mem_main_t *mm = &clib_mem_main;
321 uword pagesize = 1ULL << log2_page_sz;
322 uword sys_page_sz = 1ULL << mm->log2_page_sz;
323 uword n_bytes;
324 void *base = 0, *p;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400325
326 size = round_pow2 (size, pagesize);
327
Damjan Marion6bfd0762020-09-11 22:16:53 +0200328 /* in adition of requested reservation, we also rserve one system page
329 * (typically 4K) adjacent to the start off reservation */
Dave Barach16e4a4a2020-04-16 12:00:14 -0400330
Damjan Marion6bfd0762020-09-11 22:16:53 +0200331 if (start)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400332 {
Damjan Marion6bfd0762020-09-11 22:16:53 +0200333 /* start address is provided, so we just need to make sure we are not
334 * replacing existing map */
335 if (start & pow2_mask (log2_page_sz))
336 return ~0;
337
338 base = (void *) start - sys_page_sz;
339 base = mmap (base, size + sys_page_sz, PROT_NONE,
340 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
341 return (base == MAP_FAILED) ? ~0 : start;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400342 }
343
Damjan Marion6bfd0762020-09-11 22:16:53 +0200344 /* to make sure that we get reservation aligned to page_size we need to
345 * request one additional page as mmap will return us address which is
346 * aligned only to system page size */
347 base = mmap (0, size + pagesize, PROT_NONE,
348 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
Dave Barach16e4a4a2020-04-16 12:00:14 -0400349
Damjan Marion6bfd0762020-09-11 22:16:53 +0200350 if (base == MAP_FAILED)
351 return ~0;
352
353 /* return additional space at the end of allocation */
354 p = base + size + pagesize;
355 n_bytes = (uword) p & pow2_mask (log2_page_sz);
356 if (n_bytes)
357 {
358 p -= n_bytes;
359 munmap (p, n_bytes);
360 }
361
362 /* return additional space at the start of allocation */
363 n_bytes = pagesize - sys_page_sz - n_bytes;
364 if (n_bytes)
365 {
366 munmap (base, n_bytes);
367 base += n_bytes;
368 }
369
370 return (uword) base + sys_page_sz;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400371}
372
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200373__clib_export clib_mem_vm_map_hdr_t *
Damjan Marion6bfd0762020-09-11 22:16:53 +0200374clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t * hdr)
375{
376 clib_mem_main_t *mm = &clib_mem_main;
Dave Barach27c35e32020-10-07 09:37:36 -0400377 uword sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200378 clib_mem_vm_map_hdr_t *next;
379 if (hdr == 0)
380 {
381 hdr = mm->first_map;
382 if (hdr)
383 mprotect (hdr, sys_page_sz, PROT_READ);
384 return hdr;
385 }
386 next = hdr->next;
387 mprotect (hdr, sys_page_sz, PROT_NONE);
388 if (next)
389 mprotect (next, sys_page_sz, PROT_READ);
390 return next;
391}
392
393void *
394clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
395 uword size, int fd, uword offset, char *name)
396{
397 clib_mem_main_t *mm = &clib_mem_main;
398 clib_mem_vm_map_hdr_t *hdr;
Dave Barach27c35e32020-10-07 09:37:36 -0400399 uword sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200400 int mmap_flags = MAP_FIXED, is_huge = 0;
401
402 if (fd != -1)
403 {
404 mmap_flags |= MAP_SHARED;
405 log2_page_sz = clib_mem_get_fd_log2_page_size (fd);
406 if (log2_page_sz > mm->log2_page_sz)
407 is_huge = 1;
408 }
409 else
410 {
411 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
412
413 if (log2_page_sz == mm->log2_page_sz)
414 log2_page_sz = CLIB_MEM_PAGE_SZ_DEFAULT;
415
416 switch (log2_page_sz)
417 {
418 case CLIB_MEM_PAGE_SZ_UNKNOWN:
419 /* will fail later */
420 break;
421 case CLIB_MEM_PAGE_SZ_DEFAULT:
422 log2_page_sz = mm->log2_page_sz;
423 break;
424 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
425 mmap_flags |= MAP_HUGETLB;
426 log2_page_sz = mm->log2_default_hugepage_sz;
427 is_huge = 1;
428 break;
429 default:
430 mmap_flags |= MAP_HUGETLB;
431 mmap_flags |= log2_page_sz << MAP_HUGE_SHIFT;
432 is_huge = 1;
433 }
434 }
435
436 if (log2_page_sz == CLIB_MEM_PAGE_SZ_UNKNOWN)
437 return CLIB_MEM_VM_MAP_FAILED;
438
Dave Barach27c35e32020-10-07 09:37:36 -0400439 size = round_pow2 (size, 1ULL << log2_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200440
441 base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
442
443 if (base == (void *) ~0)
444 return CLIB_MEM_VM_MAP_FAILED;
445
446 base = mmap (base, size, PROT_READ | PROT_WRITE, mmap_flags, fd, offset);
447
448 if (base == MAP_FAILED)
449 return CLIB_MEM_VM_MAP_FAILED;
450
451 if (is_huge && (mlock (base, size) != 0))
452 {
453 munmap (base, size);
454 return CLIB_MEM_VM_MAP_FAILED;
455 }
456
457 hdr = mmap (base - sys_page_sz, sys_page_sz, PROT_READ | PROT_WRITE,
458 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
459
460 if (hdr != base - sys_page_sz)
461 {
462 munmap (base, size);
463 return CLIB_MEM_VM_MAP_FAILED;
464 }
465
466 if (mm->last_map)
467 {
468 mprotect (mm->last_map, sys_page_sz, PROT_READ | PROT_WRITE);
469 mm->last_map->next = hdr;
470 mprotect (mm->last_map, sys_page_sz, PROT_NONE);
471 }
472 else
473 mm->first_map = hdr;
474
Benoît Ganne568617b2020-10-21 10:02:18 +0200475 CLIB_MEM_UNPOISON (hdr, sys_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200476 hdr->next = 0;
477 hdr->prev = mm->last_map;
478 mm->last_map = hdr;
479
480 hdr->base_addr = (uword) base;
481 hdr->log2_page_sz = log2_page_sz;
482 hdr->num_pages = size >> log2_page_sz;
Damjan Marion5ef25162020-09-17 13:29:33 +0200483 hdr->fd = fd;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200484 snprintf (hdr->name, CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1, "%s", (char *) name);
485 hdr->name[CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1] = 0;
486 mprotect (hdr, sys_page_sz, PROT_NONE);
487
488 CLIB_MEM_UNPOISON (base, size);
489 return base;
490}
491
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200492__clib_export int
Damjan Marion6bfd0762020-09-11 22:16:53 +0200493clib_mem_vm_unmap (void *base)
494{
495 clib_mem_main_t *mm = &clib_mem_main;
Dave Barach27c35e32020-10-07 09:37:36 -0400496 uword size, sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200497 clib_mem_vm_map_hdr_t *hdr = base - sys_page_sz;;
498
499 if (mprotect (hdr, sys_page_sz, PROT_READ | PROT_WRITE) != 0)
Damjan Marion561ae5d2020-09-24 13:53:46 +0200500 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200501
502 size = hdr->num_pages << hdr->log2_page_sz;
503 if (munmap ((void *) hdr->base_addr, size) != 0)
Damjan Marion561ae5d2020-09-24 13:53:46 +0200504 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200505
506 if (hdr->next)
507 {
508 mprotect (hdr->next, sys_page_sz, PROT_READ | PROT_WRITE);
509 hdr->next->prev = hdr->prev;
510 mprotect (hdr->next, sys_page_sz, PROT_NONE);
511 }
512 else
513 mm->last_map = hdr->prev;
514
515 if (hdr->prev)
516 {
517 mprotect (hdr->prev, sys_page_sz, PROT_READ | PROT_WRITE);
518 hdr->prev->next = hdr->next;
519 mprotect (hdr->prev, sys_page_sz, PROT_NONE);
520 }
521 else
522 mm->first_map = hdr->next;
523
524 if (munmap (hdr, sys_page_sz) != 0)
Damjan Marion561ae5d2020-09-24 13:53:46 +0200525 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200526
527 return 0;
528}
529
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200530__clib_export void
Damjan Marion6bfd0762020-09-11 22:16:53 +0200531clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
532 uword n_pages, clib_mem_page_stats_t * stats)
533{
534 int i, *status = 0;
535 void **ptr = 0;
536
537 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
538
539 vec_validate (status, n_pages - 1);
540 vec_validate (ptr, n_pages - 1);
541
542 for (i = 0; i < n_pages; i++)
543 ptr[i] = start + (i << log2_page_size);
544
545 clib_memset (stats, 0, sizeof (clib_mem_page_stats_t));
Damjan Marionbfa75d62020-10-06 17:46:06 +0200546 stats->total = n_pages;
547 stats->log2_page_sz = log2_page_size;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200548
549 if (move_pages (0, n_pages, ptr, 0, status, 0) != 0)
550 {
551 stats->unknown = n_pages;
552 return;
553 }
554
555 for (i = 0; i < n_pages; i++)
556 {
557 if (status[i] >= 0 && status[i] < CLIB_MAX_NUMAS)
558 {
559 stats->mapped++;
560 stats->per_numa[status[i]]++;
561 }
562 else if (status[i] == -EFAULT)
563 stats->not_mapped++;
564 else
565 stats->unknown++;
566 }
567}
568
569
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200570__clib_export u64 *
Damjan Marion6bfd0762020-09-11 22:16:53 +0200571clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
572 int n_pages)
Damjan Marion01914ce2017-09-14 19:04:50 +0200573{
574 int pagesize = sysconf (_SC_PAGESIZE);
575 int fd;
576 int i;
577 u64 *r = 0;
578
Damjan Marion6bfd0762020-09-11 22:16:53 +0200579 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
580
Damjan Marion01914ce2017-09-14 19:04:50 +0200581 if ((fd = open ((char *) "/proc/self/pagemap", O_RDONLY)) == -1)
582 return 0;
583
584 for (i = 0; i < n_pages; i++)
585 {
586 u64 seek, pagemap = 0;
587 uword vaddr = pointer_to_uword (mem) + (((u64) i) << log2_page_size);
588 seek = ((u64) vaddr / pagesize) * sizeof (u64);
589 if (lseek (fd, seek, SEEK_SET) != seek)
590 goto done;
591
592 if (read (fd, &pagemap, sizeof (pagemap)) != (sizeof (pagemap)))
593 goto done;
594
595 if ((pagemap & (1ULL << 63)) == 0)
596 goto done;
597
598 pagemap &= pow2_mask (55);
599 vec_add1 (r, pagemap * pagesize);
600 }
601
602done:
603 close (fd);
604 if (vec_len (r) != n_pages)
605 {
606 vec_free (r);
607 return 0;
608 }
609 return r;
610}
611
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200612__clib_export int
Damjan Marion561ae5d2020-09-24 13:53:46 +0200613clib_mem_set_numa_affinity (u8 numa_node, int force)
614{
615 clib_mem_main_t *mm = &clib_mem_main;
616 long unsigned int mask[16] = { 0 };
617 int mask_len = sizeof (mask) * 8 + 1;
618
619 /* no numa support */
620 if (mm->numa_node_bitmap == 0)
621 {
622 if (numa_node)
623 {
624 vec_reset_length (mm->error);
625 mm->error = clib_error_return (mm->error, "%s: numa not supported",
626 (char *) __func__);
627 return CLIB_MEM_ERROR;
628 }
629 else
630 return 0;
631 }
632
633 mask[0] = 1 << numa_node;
634
635 if (set_mempolicy (force ? MPOL_BIND : MPOL_PREFERRED, mask, mask_len))
636 goto error;
637
638 vec_reset_length (mm->error);
639 return 0;
640
641error:
642 vec_reset_length (mm->error);
643 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
644 return CLIB_MEM_ERROR;
645}
646
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200647__clib_export int
Damjan Marion561ae5d2020-09-24 13:53:46 +0200648clib_mem_set_default_numa_affinity ()
649{
650 clib_mem_main_t *mm = &clib_mem_main;
651
652 if (set_mempolicy (MPOL_DEFAULT, 0, 0))
653 {
654 vec_reset_length (mm->error);
655 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
656 return CLIB_MEM_ERROR;
657 }
658 return 0;
659}
660
Damjan Marion01914ce2017-09-14 19:04:50 +0200661/*
662 * fd.io coding-style-patch-verification: ON
663 *
664 * Local Variables:
665 * eval: (c-set-style "gnu")
666 * End:
667 */