blob: 036890f9c8d5d9d15a6aa7fe9038a49915ae8f08 [file] [log] [blame]
Damjan Marion01914ce2017-09-14 19:04:50 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <unistd.h>
21#include <sys/mount.h>
22#include <sys/mman.h>
23#include <fcntl.h>
24#include <linux/mempolicy.h>
25#include <linux/memfd.h>
26
27#include <vppinfra/clib.h>
28#include <vppinfra/mem.h>
Damjan Marion70ae0a92020-10-26 10:39:30 +010029#include <vppinfra/lock.h>
Florin Corasd3e83a92018-01-16 02:40:18 -080030#include <vppinfra/time.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020031#include <vppinfra/format.h>
32#include <vppinfra/clib_error.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020033#include <vppinfra/linux/sysfs.h>
34
35#ifndef F_LINUX_SPECIFIC_BASE
36#define F_LINUX_SPECIFIC_BASE 1024
37#endif
38
39#ifndef F_ADD_SEALS
40#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
41#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
42
43#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
44#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
45#define F_SEAL_GROW 0x0004 /* prevent file from growing */
46#define F_SEAL_WRITE 0x0008 /* prevent writes */
47#endif
48
Damjan Marionc63e2a42020-09-16 21:36:00 +020049#ifndef MFD_HUGETLB
50#define MFD_HUGETLB 0x0004U
51#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020052
Damjan Marion6bfd0762020-09-11 22:16:53 +020053#ifndef MAP_HUGE_SHIFT
54#define MAP_HUGE_SHIFT 26
55#endif
56
Damjan Marionbdbb0c52020-09-17 10:40:44 +020057#ifndef MFD_HUGE_SHIFT
58#define MFD_HUGE_SHIFT 26
59#endif
60
Damjan Marion6bfd0762020-09-11 22:16:53 +020061#ifndef MAP_FIXED_NOREPLACE
62#define MAP_FIXED_NOREPLACE 0x100000
63#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020064
Damjan Marion70ae0a92020-10-26 10:39:30 +010065static void
66map_lock ()
67{
68 while (clib_atomic_test_and_set (&clib_mem_main.map_lock))
69 CLIB_PAUSE ();
70}
71
72static void
73map_unlock ()
74{
75 clib_atomic_release (&clib_mem_main.map_lock);
76}
77
Damjan Mariondae1c7e2020-10-17 13:32:25 +020078__clib_export uword
Damjan Marion9787f5f2018-10-24 12:56:32 +020079clib_mem_get_default_hugepage_size (void)
80{
81 unformat_input_t input;
82 static u32 size = 0;
83 int fd;
84
85 if (size)
86 goto done;
87
Dave Barach036343b2019-01-01 09:45:08 -050088 /*
89 * If the kernel doesn't support hugepages, /proc/meminfo won't
90 * say anything about it. Use the regular page size as a default.
91 */
92 size = clib_mem_get_page_size () / 1024;
93
Damjan Marion9787f5f2018-10-24 12:56:32 +020094 if ((fd = open ("/proc/meminfo", 0)) == -1)
95 return 0;
96
97 unformat_init_clib_file (&input, fd);
98
99 while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
100 {
101 if (unformat (&input, "Hugepagesize:%_%u kB", &size))
102 ;
103 else
104 unformat_skip_line (&input);
105 }
106 unformat_free (&input);
107 close (fd);
108done:
109 return 1024ULL * size;
110}
111
Damjan Marionc63e2a42020-09-16 21:36:00 +0200112static clib_mem_page_sz_t
113legacy_get_log2_default_hugepage_size (void)
114{
115 clib_mem_page_sz_t log2_page_size = CLIB_MEM_PAGE_SZ_UNKNOWN;
116 FILE *fp;
117 char tmp[33] = { };
118
119 if ((fp = fopen ("/proc/meminfo", "r")) == NULL)
120 return CLIB_MEM_PAGE_SZ_UNKNOWN;
121
122 while (fscanf (fp, "%32s", tmp) > 0)
123 if (strncmp ("Hugepagesize:", tmp, 13) == 0)
124 {
125 u32 size;
126 if (fscanf (fp, "%u", &size) > 0)
127 log2_page_size = 10 + min_log2 (size);
128 break;
129 }
130
131 fclose (fp);
132 return log2_page_size;
133}
134
135void
136clib_mem_main_init ()
137{
138 clib_mem_main_t *mm = &clib_mem_main;
139 uword page_size;
140 void *va;
141 int fd;
142
143 if (mm->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
144 return;
145
146 /* system page size */
147 page_size = sysconf (_SC_PAGESIZE);
148 mm->log2_page_sz = min_log2 (page_size);
149
150 /* default system hugeppage size */
Damjan Marionf8cb7012020-10-09 17:16:55 +0200151 if ((fd = syscall (__NR_memfd_create, "test", MFD_HUGETLB)) != -1)
Damjan Marionc63e2a42020-09-16 21:36:00 +0200152 {
153 mm->log2_default_hugepage_sz = clib_mem_get_fd_log2_page_size (fd);
154 close (fd);
155 }
156 else /* likely kernel older than 4.14 */
157 mm->log2_default_hugepage_sz = legacy_get_log2_default_hugepage_size ();
158
159 /* numa nodes */
160 va = mmap (0, page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
161 MAP_ANONYMOUS, -1, 0);
162 if (va == MAP_FAILED)
163 return;
164
165 if (mlock (va, page_size))
166 goto done;
167
168 for (int i = 0; i < CLIB_MAX_NUMAS; i++)
169 {
170 int status;
Damjan Marionf8cb7012020-10-09 17:16:55 +0200171 if (syscall (__NR_move_pages, 0, 1, &va, &i, &status, 0) == 0)
Damjan Marionc63e2a42020-09-16 21:36:00 +0200172 mm->numa_node_bitmap |= 1ULL << i;
173 }
174
175done:
176 munmap (va, page_size);
177}
178
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200179__clib_export u64
Damjan Marion567e61d2018-10-24 17:08:26 +0200180clib_mem_get_fd_page_size (int fd)
Damjan Marion01914ce2017-09-14 19:04:50 +0200181{
182 struct stat st = { 0 };
Chris Lukeb2bcad62017-09-18 08:51:22 -0400183 if (fstat (fd, &st) == -1)
Damjan Marion01914ce2017-09-14 19:04:50 +0200184 return 0;
Florin Corasd3e83a92018-01-16 02:40:18 -0800185 return st.st_blksize;
186}
187
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200188__clib_export clib_mem_page_sz_t
Damjan Marion567e61d2018-10-24 17:08:26 +0200189clib_mem_get_fd_log2_page_size (int fd)
Florin Corasd3e83a92018-01-16 02:40:18 -0800190{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200191 uword page_size = clib_mem_get_fd_page_size (fd);
192 return page_size ? min_log2 (page_size) : CLIB_MEM_PAGE_SZ_UNKNOWN;
Florin Corasd3e83a92018-01-16 02:40:18 -0800193}
194
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200195__clib_export void
Damjan Marionb5095042020-09-11 22:13:46 +0200196clib_mem_vm_randomize_va (uword * requested_va,
197 clib_mem_page_sz_t log2_page_size)
Florin Corasd3e83a92018-01-16 02:40:18 -0800198{
199 u8 bit_mask = 15;
200
201 if (log2_page_size <= 12)
202 bit_mask = 15;
203 else if (log2_page_size > 12 && log2_page_size <= 16)
204 bit_mask = 3;
205 else
206 bit_mask = 0;
207
Haiyang Tana5ab5032018-10-15 06:17:55 -0700208 *requested_va +=
209 (clib_cpu_time_now () & bit_mask) * (1ull << log2_page_size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200210}
211
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200212static int
213legacy_memfd_create (u8 * name)
214{
215 clib_mem_main_t *mm = &clib_mem_main;
216 int fd = -1;
217 char *mount_dir;
Benoît Ganne2b92c702020-09-28 17:34:17 +0200218 u8 *temp;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200219 u8 *filename;
220
Benoît Ganne2b92c702020-09-28 17:34:17 +0200221 /*
222 * Since mkdtemp will modify template string "/tmp/hugepage_mount.XXXXXX",
223 * it must not be a string constant, but should be declared as
224 * a character array.
225 */
226 temp = format (0, "/tmp/hugepage_mount.XXXXXX%c", 0);
227
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200228 /* create mount directory */
Benoît Ganne2b92c702020-09-28 17:34:17 +0200229 if ((mount_dir = mkdtemp ((char *) temp)) == 0)
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200230 {
Benoît Ganne2b92c702020-09-28 17:34:17 +0200231 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200232 vec_reset_length (mm->error);
233 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
Damjan Marion561ae5d2020-09-24 13:53:46 +0200234 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200235 }
236
237 if (mount ("none", mount_dir, "hugetlbfs", 0, NULL))
238 {
Benoît Ganne2b92c702020-09-28 17:34:17 +0200239 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200240 rmdir ((char *) mount_dir);
241 vec_reset_length (mm->error);
242 mm->error = clib_error_return_unix (mm->error, "mount");
Damjan Marion561ae5d2020-09-24 13:53:46 +0200243 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200244 }
245
246 filename = format (0, "%s/%s%c", mount_dir, name, 0);
247
248 if ((fd = open ((char *) filename, O_CREAT | O_RDWR, 0755)) == -1)
249 {
250 vec_reset_length (mm->error);
251 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
252 }
253
254 umount2 ((char *) mount_dir, MNT_DETACH);
255 rmdir ((char *) mount_dir);
256 vec_free (filename);
Benoît Ganne2b92c702020-09-28 17:34:17 +0200257 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200258
259 return fd;
260}
261
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200262__clib_export int
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200263clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...)
264{
265 clib_mem_main_t *mm = &clib_mem_main;
266 int fd;
267 unsigned int memfd_flags;
268 va_list va;
269 u8 *s = 0;
270
271 if (log2_page_size == mm->log2_page_sz)
272 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
Benoît Ganne2b92c702020-09-28 17:34:17 +0200273 else if (log2_page_size == mm->log2_default_hugepage_sz)
274 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT_HUGE;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200275
276 switch (log2_page_size)
277 {
278 case CLIB_MEM_PAGE_SZ_UNKNOWN:
Damjan Marion561ae5d2020-09-24 13:53:46 +0200279 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200280 case CLIB_MEM_PAGE_SZ_DEFAULT:
281 memfd_flags = MFD_ALLOW_SEALING;
282 break;
283 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
284 memfd_flags = MFD_HUGETLB;
285 break;
286 default:
287 memfd_flags = MFD_HUGETLB | log2_page_size << MFD_HUGE_SHIFT;
288 }
289
290 va_start (va, fmt);
291 s = va_format (0, fmt, &va);
292 va_end (va);
293
294 /* memfd_create maximum string size is 249 chars without trailing zero */
295 if (vec_len (s) > 249)
296 _vec_len (s) = 249;
297 vec_add1 (s, 0);
298
299 /* memfd_create introduced in kernel 3.17, we don't support older kernels */
Damjan Marionf8cb7012020-10-09 17:16:55 +0200300 fd = syscall (__NR_memfd_create, (char *) s, memfd_flags);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200301
302 /* kernel versions < 4.14 does not support memfd_create for huge pages */
303 if (fd == -1 && errno == EINVAL &&
304 log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
305 {
306 fd = legacy_memfd_create (s);
307 }
308 else if (fd == -1)
309 {
310 vec_reset_length (mm->error);
311 mm->error = clib_error_return_unix (mm->error, "memfd_create");
312 vec_free (s);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200313 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200314 }
315
316 vec_free (s);
317
318 if ((memfd_flags & MFD_ALLOW_SEALING) &&
319 ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1))
320 {
321 vec_reset_length (mm->error);
322 mm->error = clib_error_return_unix (mm->error, "fcntl (F_ADD_SEALS)");
323 close (fd);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200324 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200325 }
326
327 return fd;
328}
329
Dave Barach16e4a4a2020-04-16 12:00:14 -0400330uword
Damjan Marionb5095042020-09-11 22:13:46 +0200331clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400332{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200333 clib_mem_main_t *mm = &clib_mem_main;
334 uword pagesize = 1ULL << log2_page_sz;
335 uword sys_page_sz = 1ULL << mm->log2_page_sz;
336 uword n_bytes;
337 void *base = 0, *p;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400338
339 size = round_pow2 (size, pagesize);
340
Damjan Marion6bfd0762020-09-11 22:16:53 +0200341 /* in adition of requested reservation, we also rserve one system page
342 * (typically 4K) adjacent to the start off reservation */
Dave Barach16e4a4a2020-04-16 12:00:14 -0400343
Damjan Marion6bfd0762020-09-11 22:16:53 +0200344 if (start)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400345 {
Damjan Marion6bfd0762020-09-11 22:16:53 +0200346 /* start address is provided, so we just need to make sure we are not
347 * replacing existing map */
348 if (start & pow2_mask (log2_page_sz))
349 return ~0;
350
351 base = (void *) start - sys_page_sz;
352 base = mmap (base, size + sys_page_sz, PROT_NONE,
353 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
354 return (base == MAP_FAILED) ? ~0 : start;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400355 }
356
Damjan Marion6bfd0762020-09-11 22:16:53 +0200357 /* to make sure that we get reservation aligned to page_size we need to
358 * request one additional page as mmap will return us address which is
359 * aligned only to system page size */
360 base = mmap (0, size + pagesize, PROT_NONE,
361 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
Dave Barach16e4a4a2020-04-16 12:00:14 -0400362
Damjan Marion6bfd0762020-09-11 22:16:53 +0200363 if (base == MAP_FAILED)
364 return ~0;
365
366 /* return additional space at the end of allocation */
367 p = base + size + pagesize;
368 n_bytes = (uword) p & pow2_mask (log2_page_sz);
369 if (n_bytes)
370 {
371 p -= n_bytes;
372 munmap (p, n_bytes);
373 }
374
375 /* return additional space at the start of allocation */
376 n_bytes = pagesize - sys_page_sz - n_bytes;
377 if (n_bytes)
378 {
379 munmap (base, n_bytes);
380 base += n_bytes;
381 }
382
383 return (uword) base + sys_page_sz;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400384}
385
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200386__clib_export clib_mem_vm_map_hdr_t *
Damjan Marion6bfd0762020-09-11 22:16:53 +0200387clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t * hdr)
388{
389 clib_mem_main_t *mm = &clib_mem_main;
Dave Barach27c35e32020-10-07 09:37:36 -0400390 uword sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200391 clib_mem_vm_map_hdr_t *next;
392 if (hdr == 0)
393 {
394 hdr = mm->first_map;
395 if (hdr)
396 mprotect (hdr, sys_page_sz, PROT_READ);
397 return hdr;
398 }
399 next = hdr->next;
400 mprotect (hdr, sys_page_sz, PROT_NONE);
401 if (next)
402 mprotect (next, sys_page_sz, PROT_READ);
403 return next;
404}
405
406void *
407clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
408 uword size, int fd, uword offset, char *name)
409{
410 clib_mem_main_t *mm = &clib_mem_main;
411 clib_mem_vm_map_hdr_t *hdr;
Dave Barach27c35e32020-10-07 09:37:36 -0400412 uword sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200413 int mmap_flags = MAP_FIXED, is_huge = 0;
414
415 if (fd != -1)
416 {
417 mmap_flags |= MAP_SHARED;
418 log2_page_sz = clib_mem_get_fd_log2_page_size (fd);
419 if (log2_page_sz > mm->log2_page_sz)
420 is_huge = 1;
421 }
422 else
423 {
424 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
425
426 if (log2_page_sz == mm->log2_page_sz)
427 log2_page_sz = CLIB_MEM_PAGE_SZ_DEFAULT;
428
429 switch (log2_page_sz)
430 {
431 case CLIB_MEM_PAGE_SZ_UNKNOWN:
432 /* will fail later */
433 break;
434 case CLIB_MEM_PAGE_SZ_DEFAULT:
435 log2_page_sz = mm->log2_page_sz;
436 break;
437 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
438 mmap_flags |= MAP_HUGETLB;
439 log2_page_sz = mm->log2_default_hugepage_sz;
440 is_huge = 1;
441 break;
442 default:
443 mmap_flags |= MAP_HUGETLB;
444 mmap_flags |= log2_page_sz << MAP_HUGE_SHIFT;
445 is_huge = 1;
446 }
447 }
448
449 if (log2_page_sz == CLIB_MEM_PAGE_SZ_UNKNOWN)
450 return CLIB_MEM_VM_MAP_FAILED;
451
Dave Barach27c35e32020-10-07 09:37:36 -0400452 size = round_pow2 (size, 1ULL << log2_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200453
454 base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
455
456 if (base == (void *) ~0)
457 return CLIB_MEM_VM_MAP_FAILED;
458
459 base = mmap (base, size, PROT_READ | PROT_WRITE, mmap_flags, fd, offset);
460
461 if (base == MAP_FAILED)
462 return CLIB_MEM_VM_MAP_FAILED;
463
464 if (is_huge && (mlock (base, size) != 0))
465 {
466 munmap (base, size);
467 return CLIB_MEM_VM_MAP_FAILED;
468 }
469
470 hdr = mmap (base - sys_page_sz, sys_page_sz, PROT_READ | PROT_WRITE,
471 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
472
473 if (hdr != base - sys_page_sz)
474 {
475 munmap (base, size);
476 return CLIB_MEM_VM_MAP_FAILED;
477 }
478
Damjan Marion70ae0a92020-10-26 10:39:30 +0100479 map_lock ();
480
Damjan Marion6bfd0762020-09-11 22:16:53 +0200481 if (mm->last_map)
482 {
483 mprotect (mm->last_map, sys_page_sz, PROT_READ | PROT_WRITE);
484 mm->last_map->next = hdr;
485 mprotect (mm->last_map, sys_page_sz, PROT_NONE);
486 }
487 else
488 mm->first_map = hdr;
489
Benoît Ganne568617b2020-10-21 10:02:18 +0200490 CLIB_MEM_UNPOISON (hdr, sys_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200491 hdr->next = 0;
492 hdr->prev = mm->last_map;
Florin Corasb4d9c5d2021-02-02 15:40:35 -0800493 snprintf (hdr->name, CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1, "%s", (char *) name);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200494 mm->last_map = hdr;
495
Damjan Marion70ae0a92020-10-26 10:39:30 +0100496 map_unlock ();
497
Damjan Marion6bfd0762020-09-11 22:16:53 +0200498 hdr->base_addr = (uword) base;
499 hdr->log2_page_sz = log2_page_sz;
500 hdr->num_pages = size >> log2_page_sz;
Damjan Marion5ef25162020-09-17 13:29:33 +0200501 hdr->fd = fd;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200502 hdr->name[CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1] = 0;
503 mprotect (hdr, sys_page_sz, PROT_NONE);
504
505 CLIB_MEM_UNPOISON (base, size);
506 return base;
507}
508
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200509__clib_export int
Damjan Marion6bfd0762020-09-11 22:16:53 +0200510clib_mem_vm_unmap (void *base)
511{
512 clib_mem_main_t *mm = &clib_mem_main;
Dave Barach27c35e32020-10-07 09:37:36 -0400513 uword size, sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200514 clib_mem_vm_map_hdr_t *hdr = base - sys_page_sz;;
515
chenqijunf299dc42021-07-12 10:51:05 +0800516 map_lock ();
Damjan Marion6bfd0762020-09-11 22:16:53 +0200517 if (mprotect (hdr, sys_page_sz, PROT_READ | PROT_WRITE) != 0)
chenqijunf299dc42021-07-12 10:51:05 +0800518 goto out;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200519
520 size = hdr->num_pages << hdr->log2_page_sz;
521 if (munmap ((void *) hdr->base_addr, size) != 0)
chenqijunf299dc42021-07-12 10:51:05 +0800522 goto out;
Damjan Marion70ae0a92020-10-26 10:39:30 +0100523
Damjan Marion6bfd0762020-09-11 22:16:53 +0200524 if (hdr->next)
525 {
526 mprotect (hdr->next, sys_page_sz, PROT_READ | PROT_WRITE);
527 hdr->next->prev = hdr->prev;
528 mprotect (hdr->next, sys_page_sz, PROT_NONE);
529 }
530 else
531 mm->last_map = hdr->prev;
532
533 if (hdr->prev)
534 {
535 mprotect (hdr->prev, sys_page_sz, PROT_READ | PROT_WRITE);
536 hdr->prev->next = hdr->next;
537 mprotect (hdr->prev, sys_page_sz, PROT_NONE);
538 }
539 else
540 mm->first_map = hdr->next;
541
Damjan Marion70ae0a92020-10-26 10:39:30 +0100542 map_unlock ();
543
Damjan Marion6bfd0762020-09-11 22:16:53 +0200544 if (munmap (hdr, sys_page_sz) != 0)
Damjan Marion561ae5d2020-09-24 13:53:46 +0200545 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200546
547 return 0;
chenqijunf299dc42021-07-12 10:51:05 +0800548out:
549 map_unlock ();
550 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200551}
552
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200553__clib_export void
Damjan Marion6bfd0762020-09-11 22:16:53 +0200554clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
555 uword n_pages, clib_mem_page_stats_t * stats)
556{
557 int i, *status = 0;
558 void **ptr = 0;
559
560 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
561
562 vec_validate (status, n_pages - 1);
563 vec_validate (ptr, n_pages - 1);
564
565 for (i = 0; i < n_pages; i++)
566 ptr[i] = start + (i << log2_page_size);
567
568 clib_memset (stats, 0, sizeof (clib_mem_page_stats_t));
Damjan Marionbfa75d62020-10-06 17:46:06 +0200569 stats->total = n_pages;
570 stats->log2_page_sz = log2_page_size;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200571
Damjan Marionf8cb7012020-10-09 17:16:55 +0200572 if (syscall (__NR_move_pages, 0, n_pages, ptr, 0, status, 0) != 0)
Damjan Marion6bfd0762020-09-11 22:16:53 +0200573 {
574 stats->unknown = n_pages;
Florin Coras7c7ed172021-02-26 13:24:47 -0800575 goto done;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200576 }
577
578 for (i = 0; i < n_pages; i++)
579 {
580 if (status[i] >= 0 && status[i] < CLIB_MAX_NUMAS)
581 {
582 stats->mapped++;
583 stats->per_numa[status[i]]++;
584 }
585 else if (status[i] == -EFAULT)
586 stats->not_mapped++;
587 else
588 stats->unknown++;
589 }
Florin Coras7c7ed172021-02-26 13:24:47 -0800590
591done:
592 vec_free (status);
593 vec_free (ptr);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200594}
595
596
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200597__clib_export u64 *
Damjan Marion6bfd0762020-09-11 22:16:53 +0200598clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
599 int n_pages)
Damjan Marion01914ce2017-09-14 19:04:50 +0200600{
601 int pagesize = sysconf (_SC_PAGESIZE);
602 int fd;
603 int i;
604 u64 *r = 0;
605
Damjan Marion6bfd0762020-09-11 22:16:53 +0200606 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
607
Damjan Marion01914ce2017-09-14 19:04:50 +0200608 if ((fd = open ((char *) "/proc/self/pagemap", O_RDONLY)) == -1)
609 return 0;
610
611 for (i = 0; i < n_pages; i++)
612 {
613 u64 seek, pagemap = 0;
614 uword vaddr = pointer_to_uword (mem) + (((u64) i) << log2_page_size);
615 seek = ((u64) vaddr / pagesize) * sizeof (u64);
616 if (lseek (fd, seek, SEEK_SET) != seek)
617 goto done;
618
619 if (read (fd, &pagemap, sizeof (pagemap)) != (sizeof (pagemap)))
620 goto done;
621
622 if ((pagemap & (1ULL << 63)) == 0)
623 goto done;
624
625 pagemap &= pow2_mask (55);
626 vec_add1 (r, pagemap * pagesize);
627 }
628
629done:
630 close (fd);
631 if (vec_len (r) != n_pages)
632 {
633 vec_free (r);
634 return 0;
635 }
636 return r;
637}
638
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200639__clib_export int
Damjan Marion561ae5d2020-09-24 13:53:46 +0200640clib_mem_set_numa_affinity (u8 numa_node, int force)
641{
642 clib_mem_main_t *mm = &clib_mem_main;
643 long unsigned int mask[16] = { 0 };
644 int mask_len = sizeof (mask) * 8 + 1;
645
646 /* no numa support */
647 if (mm->numa_node_bitmap == 0)
648 {
649 if (numa_node)
650 {
651 vec_reset_length (mm->error);
652 mm->error = clib_error_return (mm->error, "%s: numa not supported",
653 (char *) __func__);
654 return CLIB_MEM_ERROR;
655 }
656 else
657 return 0;
658 }
659
660 mask[0] = 1 << numa_node;
661
Damjan Marionf8cb7012020-10-09 17:16:55 +0200662 if (syscall (__NR_set_mempolicy, force ? MPOL_BIND : MPOL_PREFERRED, mask,
663 mask_len))
Damjan Marion561ae5d2020-09-24 13:53:46 +0200664 goto error;
665
666 vec_reset_length (mm->error);
667 return 0;
668
669error:
670 vec_reset_length (mm->error);
671 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
672 return CLIB_MEM_ERROR;
673}
674
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200675__clib_export int
Damjan Marion561ae5d2020-09-24 13:53:46 +0200676clib_mem_set_default_numa_affinity ()
677{
678 clib_mem_main_t *mm = &clib_mem_main;
679
Damjan Marionf8cb7012020-10-09 17:16:55 +0200680 if (syscall (__NR_set_mempolicy, MPOL_DEFAULT, 0, 0))
Damjan Marion561ae5d2020-09-24 13:53:46 +0200681 {
682 vec_reset_length (mm->error);
683 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
684 return CLIB_MEM_ERROR;
685 }
686 return 0;
687}
688
Damjan Marion01914ce2017-09-14 19:04:50 +0200689/*
690 * fd.io coding-style-patch-verification: ON
691 *
692 * Local Variables:
693 * eval: (c-set-style "gnu")
694 * End:
695 */