blob: 4365281ce8c882a2d5acf479fff1626461e6b455 [file] [log] [blame]
Damjan Marion01914ce2017-09-14 19:04:50 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <unistd.h>
21#include <sys/mount.h>
22#include <sys/mman.h>
23#include <fcntl.h>
24#include <linux/mempolicy.h>
25#include <linux/memfd.h>
26
27#include <vppinfra/clib.h>
28#include <vppinfra/mem.h>
Damjan Marion70ae0a92020-10-26 10:39:30 +010029#include <vppinfra/lock.h>
Florin Corasd3e83a92018-01-16 02:40:18 -080030#include <vppinfra/time.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020031#include <vppinfra/format.h>
32#include <vppinfra/clib_error.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020033#include <vppinfra/linux/sysfs.h>
34
35#ifndef F_LINUX_SPECIFIC_BASE
36#define F_LINUX_SPECIFIC_BASE 1024
37#endif
38
39#ifndef F_ADD_SEALS
40#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
41#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
42
43#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
44#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
45#define F_SEAL_GROW 0x0004 /* prevent file from growing */
46#define F_SEAL_WRITE 0x0008 /* prevent writes */
47#endif
48
Damjan Marionc63e2a42020-09-16 21:36:00 +020049#ifndef MFD_HUGETLB
50#define MFD_HUGETLB 0x0004U
51#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020052
Damjan Marion6bfd0762020-09-11 22:16:53 +020053#ifndef MAP_HUGE_SHIFT
54#define MAP_HUGE_SHIFT 26
55#endif
56
Damjan Marionbdbb0c52020-09-17 10:40:44 +020057#ifndef MFD_HUGE_SHIFT
58#define MFD_HUGE_SHIFT 26
59#endif
60
Damjan Marion6bfd0762020-09-11 22:16:53 +020061#ifndef MAP_FIXED_NOREPLACE
62#define MAP_FIXED_NOREPLACE 0x100000
63#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020064
Damjan Marion70ae0a92020-10-26 10:39:30 +010065static void
66map_lock ()
67{
68 while (clib_atomic_test_and_set (&clib_mem_main.map_lock))
69 CLIB_PAUSE ();
70}
71
72static void
73map_unlock ()
74{
75 clib_atomic_release (&clib_mem_main.map_lock);
76}
77
Damjan Marionc63e2a42020-09-16 21:36:00 +020078static clib_mem_page_sz_t
79legacy_get_log2_default_hugepage_size (void)
80{
81 clib_mem_page_sz_t log2_page_size = CLIB_MEM_PAGE_SZ_UNKNOWN;
82 FILE *fp;
83 char tmp[33] = { };
84
85 if ((fp = fopen ("/proc/meminfo", "r")) == NULL)
86 return CLIB_MEM_PAGE_SZ_UNKNOWN;
87
88 while (fscanf (fp, "%32s", tmp) > 0)
89 if (strncmp ("Hugepagesize:", tmp, 13) == 0)
90 {
91 u32 size;
92 if (fscanf (fp, "%u", &size) > 0)
93 log2_page_size = 10 + min_log2 (size);
94 break;
95 }
96
97 fclose (fp);
98 return log2_page_size;
99}
100
101void
102clib_mem_main_init ()
103{
104 clib_mem_main_t *mm = &clib_mem_main;
105 uword page_size;
106 void *va;
107 int fd;
108
109 if (mm->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
110 return;
111
112 /* system page size */
113 page_size = sysconf (_SC_PAGESIZE);
114 mm->log2_page_sz = min_log2 (page_size);
115
116 /* default system hugeppage size */
Damjan Marionf8cb7012020-10-09 17:16:55 +0200117 if ((fd = syscall (__NR_memfd_create, "test", MFD_HUGETLB)) != -1)
Damjan Marionc63e2a42020-09-16 21:36:00 +0200118 {
119 mm->log2_default_hugepage_sz = clib_mem_get_fd_log2_page_size (fd);
120 close (fd);
121 }
122 else /* likely kernel older than 4.14 */
123 mm->log2_default_hugepage_sz = legacy_get_log2_default_hugepage_size ();
124
Damjan Marionff011b22021-09-21 11:38:04 +0200125 mm->log2_sys_default_hugepage_sz = mm->log2_default_hugepage_sz;
126
Damjan Marionc63e2a42020-09-16 21:36:00 +0200127 /* numa nodes */
128 va = mmap (0, page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
129 MAP_ANONYMOUS, -1, 0);
130 if (va == MAP_FAILED)
131 return;
132
133 if (mlock (va, page_size))
134 goto done;
135
136 for (int i = 0; i < CLIB_MAX_NUMAS; i++)
137 {
138 int status;
Damjan Marionf8cb7012020-10-09 17:16:55 +0200139 if (syscall (__NR_move_pages, 0, 1, &va, &i, &status, 0) == 0)
Damjan Marionc63e2a42020-09-16 21:36:00 +0200140 mm->numa_node_bitmap |= 1ULL << i;
141 }
142
143done:
144 munmap (va, page_size);
145}
146
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200147__clib_export u64
Damjan Marion567e61d2018-10-24 17:08:26 +0200148clib_mem_get_fd_page_size (int fd)
Damjan Marion01914ce2017-09-14 19:04:50 +0200149{
150 struct stat st = { 0 };
Chris Lukeb2bcad62017-09-18 08:51:22 -0400151 if (fstat (fd, &st) == -1)
Damjan Marion01914ce2017-09-14 19:04:50 +0200152 return 0;
Florin Corasd3e83a92018-01-16 02:40:18 -0800153 return st.st_blksize;
154}
155
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200156__clib_export clib_mem_page_sz_t
Damjan Marion567e61d2018-10-24 17:08:26 +0200157clib_mem_get_fd_log2_page_size (int fd)
Florin Corasd3e83a92018-01-16 02:40:18 -0800158{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200159 uword page_size = clib_mem_get_fd_page_size (fd);
160 return page_size ? min_log2 (page_size) : CLIB_MEM_PAGE_SZ_UNKNOWN;
Florin Corasd3e83a92018-01-16 02:40:18 -0800161}
162
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200163__clib_export void
Damjan Marionb5095042020-09-11 22:13:46 +0200164clib_mem_vm_randomize_va (uword * requested_va,
165 clib_mem_page_sz_t log2_page_size)
Florin Corasd3e83a92018-01-16 02:40:18 -0800166{
167 u8 bit_mask = 15;
168
169 if (log2_page_size <= 12)
170 bit_mask = 15;
171 else if (log2_page_size > 12 && log2_page_size <= 16)
172 bit_mask = 3;
173 else
174 bit_mask = 0;
175
Haiyang Tana5ab5032018-10-15 06:17:55 -0700176 *requested_va +=
177 (clib_cpu_time_now () & bit_mask) * (1ull << log2_page_size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200178}
179
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200180static int
181legacy_memfd_create (u8 * name)
182{
183 clib_mem_main_t *mm = &clib_mem_main;
184 int fd = -1;
185 char *mount_dir;
Benoît Ganne2b92c702020-09-28 17:34:17 +0200186 u8 *temp;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200187 u8 *filename;
188
Benoît Ganne2b92c702020-09-28 17:34:17 +0200189 /*
190 * Since mkdtemp will modify template string "/tmp/hugepage_mount.XXXXXX",
191 * it must not be a string constant, but should be declared as
192 * a character array.
193 */
194 temp = format (0, "/tmp/hugepage_mount.XXXXXX%c", 0);
195
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200196 /* create mount directory */
Benoît Ganne2b92c702020-09-28 17:34:17 +0200197 if ((mount_dir = mkdtemp ((char *) temp)) == 0)
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200198 {
Benoît Ganne2b92c702020-09-28 17:34:17 +0200199 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200200 vec_reset_length (mm->error);
201 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
Damjan Marion561ae5d2020-09-24 13:53:46 +0200202 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200203 }
204
205 if (mount ("none", mount_dir, "hugetlbfs", 0, NULL))
206 {
Benoît Ganne2b92c702020-09-28 17:34:17 +0200207 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200208 rmdir ((char *) mount_dir);
209 vec_reset_length (mm->error);
210 mm->error = clib_error_return_unix (mm->error, "mount");
Damjan Marion561ae5d2020-09-24 13:53:46 +0200211 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200212 }
213
214 filename = format (0, "%s/%s%c", mount_dir, name, 0);
215
216 if ((fd = open ((char *) filename, O_CREAT | O_RDWR, 0755)) == -1)
217 {
218 vec_reset_length (mm->error);
219 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
220 }
221
222 umount2 ((char *) mount_dir, MNT_DETACH);
223 rmdir ((char *) mount_dir);
224 vec_free (filename);
Benoît Ganne2b92c702020-09-28 17:34:17 +0200225 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200226
227 return fd;
228}
229
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200230__clib_export int
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200231clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...)
232{
233 clib_mem_main_t *mm = &clib_mem_main;
234 int fd;
235 unsigned int memfd_flags;
236 va_list va;
237 u8 *s = 0;
238
239 if (log2_page_size == mm->log2_page_sz)
240 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
Damjan Marionff011b22021-09-21 11:38:04 +0200241 else if (log2_page_size == mm->log2_sys_default_hugepage_sz)
Benoît Ganne2b92c702020-09-28 17:34:17 +0200242 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT_HUGE;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200243
244 switch (log2_page_size)
245 {
246 case CLIB_MEM_PAGE_SZ_UNKNOWN:
Damjan Marion561ae5d2020-09-24 13:53:46 +0200247 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200248 case CLIB_MEM_PAGE_SZ_DEFAULT:
249 memfd_flags = MFD_ALLOW_SEALING;
250 break;
251 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
252 memfd_flags = MFD_HUGETLB;
253 break;
254 default:
255 memfd_flags = MFD_HUGETLB | log2_page_size << MFD_HUGE_SHIFT;
256 }
257
258 va_start (va, fmt);
259 s = va_format (0, fmt, &va);
260 va_end (va);
261
262 /* memfd_create maximum string size is 249 chars without trailing zero */
263 if (vec_len (s) > 249)
Damjan Marion8bea5892022-04-04 22:40:45 +0200264 vec_set_len (s, 249);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200265 vec_add1 (s, 0);
266
267 /* memfd_create introduced in kernel 3.17, we don't support older kernels */
Damjan Marionf8cb7012020-10-09 17:16:55 +0200268 fd = syscall (__NR_memfd_create, (char *) s, memfd_flags);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200269
270 /* kernel versions < 4.14 does not support memfd_create for huge pages */
271 if (fd == -1 && errno == EINVAL &&
272 log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
273 {
274 fd = legacy_memfd_create (s);
275 }
276 else if (fd == -1)
277 {
278 vec_reset_length (mm->error);
279 mm->error = clib_error_return_unix (mm->error, "memfd_create");
280 vec_free (s);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200281 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200282 }
283
284 vec_free (s);
285
286 if ((memfd_flags & MFD_ALLOW_SEALING) &&
287 ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1))
288 {
289 vec_reset_length (mm->error);
290 mm->error = clib_error_return_unix (mm->error, "fcntl (F_ADD_SEALS)");
291 close (fd);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200292 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200293 }
294
295 return fd;
296}
297
Dave Barach16e4a4a2020-04-16 12:00:14 -0400298uword
Damjan Marionb5095042020-09-11 22:13:46 +0200299clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400300{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200301 clib_mem_main_t *mm = &clib_mem_main;
302 uword pagesize = 1ULL << log2_page_sz;
303 uword sys_page_sz = 1ULL << mm->log2_page_sz;
304 uword n_bytes;
305 void *base = 0, *p;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400306
307 size = round_pow2 (size, pagesize);
308
Damjan Marion6bfd0762020-09-11 22:16:53 +0200309 /* in adition of requested reservation, we also rserve one system page
310 * (typically 4K) adjacent to the start off reservation */
Dave Barach16e4a4a2020-04-16 12:00:14 -0400311
Damjan Marion6bfd0762020-09-11 22:16:53 +0200312 if (start)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400313 {
Damjan Marion6bfd0762020-09-11 22:16:53 +0200314 /* start address is provided, so we just need to make sure we are not
315 * replacing existing map */
316 if (start & pow2_mask (log2_page_sz))
317 return ~0;
318
319 base = (void *) start - sys_page_sz;
320 base = mmap (base, size + sys_page_sz, PROT_NONE,
321 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
322 return (base == MAP_FAILED) ? ~0 : start;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400323 }
324
Damjan Marion6bfd0762020-09-11 22:16:53 +0200325 /* to make sure that we get reservation aligned to page_size we need to
326 * request one additional page as mmap will return us address which is
327 * aligned only to system page size */
328 base = mmap (0, size + pagesize, PROT_NONE,
329 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
Dave Barach16e4a4a2020-04-16 12:00:14 -0400330
Damjan Marion6bfd0762020-09-11 22:16:53 +0200331 if (base == MAP_FAILED)
332 return ~0;
333
334 /* return additional space at the end of allocation */
335 p = base + size + pagesize;
336 n_bytes = (uword) p & pow2_mask (log2_page_sz);
337 if (n_bytes)
338 {
339 p -= n_bytes;
340 munmap (p, n_bytes);
341 }
342
343 /* return additional space at the start of allocation */
344 n_bytes = pagesize - sys_page_sz - n_bytes;
345 if (n_bytes)
346 {
347 munmap (base, n_bytes);
348 base += n_bytes;
349 }
350
351 return (uword) base + sys_page_sz;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400352}
353
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200354__clib_export clib_mem_vm_map_hdr_t *
Damjan Marion6bfd0762020-09-11 22:16:53 +0200355clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t * hdr)
356{
357 clib_mem_main_t *mm = &clib_mem_main;
Dave Barach27c35e32020-10-07 09:37:36 -0400358 uword sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200359 clib_mem_vm_map_hdr_t *next;
360 if (hdr == 0)
361 {
362 hdr = mm->first_map;
363 if (hdr)
364 mprotect (hdr, sys_page_sz, PROT_READ);
365 return hdr;
366 }
367 next = hdr->next;
368 mprotect (hdr, sys_page_sz, PROT_NONE);
369 if (next)
370 mprotect (next, sys_page_sz, PROT_READ);
371 return next;
372}
373
374void *
375clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
376 uword size, int fd, uword offset, char *name)
377{
378 clib_mem_main_t *mm = &clib_mem_main;
379 clib_mem_vm_map_hdr_t *hdr;
Dave Barach27c35e32020-10-07 09:37:36 -0400380 uword sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200381 int mmap_flags = MAP_FIXED, is_huge = 0;
382
383 if (fd != -1)
384 {
385 mmap_flags |= MAP_SHARED;
386 log2_page_sz = clib_mem_get_fd_log2_page_size (fd);
387 if (log2_page_sz > mm->log2_page_sz)
388 is_huge = 1;
389 }
390 else
391 {
392 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
393
394 if (log2_page_sz == mm->log2_page_sz)
395 log2_page_sz = CLIB_MEM_PAGE_SZ_DEFAULT;
396
397 switch (log2_page_sz)
398 {
399 case CLIB_MEM_PAGE_SZ_UNKNOWN:
400 /* will fail later */
401 break;
402 case CLIB_MEM_PAGE_SZ_DEFAULT:
403 log2_page_sz = mm->log2_page_sz;
404 break;
405 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
406 mmap_flags |= MAP_HUGETLB;
407 log2_page_sz = mm->log2_default_hugepage_sz;
408 is_huge = 1;
409 break;
410 default:
411 mmap_flags |= MAP_HUGETLB;
412 mmap_flags |= log2_page_sz << MAP_HUGE_SHIFT;
413 is_huge = 1;
414 }
415 }
416
417 if (log2_page_sz == CLIB_MEM_PAGE_SZ_UNKNOWN)
418 return CLIB_MEM_VM_MAP_FAILED;
419
Dave Barach27c35e32020-10-07 09:37:36 -0400420 size = round_pow2 (size, 1ULL << log2_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200421
422 base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
423
424 if (base == (void *) ~0)
425 return CLIB_MEM_VM_MAP_FAILED;
426
427 base = mmap (base, size, PROT_READ | PROT_WRITE, mmap_flags, fd, offset);
428
429 if (base == MAP_FAILED)
430 return CLIB_MEM_VM_MAP_FAILED;
431
432 if (is_huge && (mlock (base, size) != 0))
433 {
434 munmap (base, size);
435 return CLIB_MEM_VM_MAP_FAILED;
436 }
437
438 hdr = mmap (base - sys_page_sz, sys_page_sz, PROT_READ | PROT_WRITE,
439 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
440
441 if (hdr != base - sys_page_sz)
442 {
443 munmap (base, size);
444 return CLIB_MEM_VM_MAP_FAILED;
445 }
446
Damjan Marion70ae0a92020-10-26 10:39:30 +0100447 map_lock ();
448
Damjan Marion6bfd0762020-09-11 22:16:53 +0200449 if (mm->last_map)
450 {
451 mprotect (mm->last_map, sys_page_sz, PROT_READ | PROT_WRITE);
452 mm->last_map->next = hdr;
453 mprotect (mm->last_map, sys_page_sz, PROT_NONE);
454 }
455 else
456 mm->first_map = hdr;
457
Damjan Marion79934e82022-04-05 12:40:31 +0200458 clib_mem_unpoison (hdr, sys_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200459 hdr->next = 0;
460 hdr->prev = mm->last_map;
Florin Corasb4d9c5d2021-02-02 15:40:35 -0800461 snprintf (hdr->name, CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1, "%s", (char *) name);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200462 mm->last_map = hdr;
463
464 hdr->base_addr = (uword) base;
465 hdr->log2_page_sz = log2_page_sz;
466 hdr->num_pages = size >> log2_page_sz;
Damjan Marion5ef25162020-09-17 13:29:33 +0200467 hdr->fd = fd;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200468 hdr->name[CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1] = 0;
469 mprotect (hdr, sys_page_sz, PROT_NONE);
470
liuyacan9b8dc8242021-12-22 16:41:21 +0800471 map_unlock ();
472
Damjan Marion79934e82022-04-05 12:40:31 +0200473 clib_mem_unpoison (base, size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200474 return base;
475}
476
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200477__clib_export int
Damjan Marion6bfd0762020-09-11 22:16:53 +0200478clib_mem_vm_unmap (void *base)
479{
480 clib_mem_main_t *mm = &clib_mem_main;
Dave Barach27c35e32020-10-07 09:37:36 -0400481 uword size, sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200482 clib_mem_vm_map_hdr_t *hdr = base - sys_page_sz;;
483
chenqijunf299dc42021-07-12 10:51:05 +0800484 map_lock ();
Damjan Marion6bfd0762020-09-11 22:16:53 +0200485 if (mprotect (hdr, sys_page_sz, PROT_READ | PROT_WRITE) != 0)
chenqijunf299dc42021-07-12 10:51:05 +0800486 goto out;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200487
488 size = hdr->num_pages << hdr->log2_page_sz;
489 if (munmap ((void *) hdr->base_addr, size) != 0)
chenqijunf299dc42021-07-12 10:51:05 +0800490 goto out;
Damjan Marion70ae0a92020-10-26 10:39:30 +0100491
Damjan Marion6bfd0762020-09-11 22:16:53 +0200492 if (hdr->next)
493 {
494 mprotect (hdr->next, sys_page_sz, PROT_READ | PROT_WRITE);
495 hdr->next->prev = hdr->prev;
496 mprotect (hdr->next, sys_page_sz, PROT_NONE);
497 }
498 else
499 mm->last_map = hdr->prev;
500
501 if (hdr->prev)
502 {
503 mprotect (hdr->prev, sys_page_sz, PROT_READ | PROT_WRITE);
504 hdr->prev->next = hdr->next;
505 mprotect (hdr->prev, sys_page_sz, PROT_NONE);
506 }
507 else
508 mm->first_map = hdr->next;
509
Damjan Marion70ae0a92020-10-26 10:39:30 +0100510 map_unlock ();
511
Damjan Marion6bfd0762020-09-11 22:16:53 +0200512 if (munmap (hdr, sys_page_sz) != 0)
Damjan Marion561ae5d2020-09-24 13:53:46 +0200513 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200514
515 return 0;
chenqijunf299dc42021-07-12 10:51:05 +0800516out:
517 map_unlock ();
518 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200519}
520
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200521__clib_export void
Damjan Marion6bfd0762020-09-11 22:16:53 +0200522clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
523 uword n_pages, clib_mem_page_stats_t * stats)
524{
525 int i, *status = 0;
526 void **ptr = 0;
527
528 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
529
530 vec_validate (status, n_pages - 1);
531 vec_validate (ptr, n_pages - 1);
532
533 for (i = 0; i < n_pages; i++)
534 ptr[i] = start + (i << log2_page_size);
535
536 clib_memset (stats, 0, sizeof (clib_mem_page_stats_t));
Damjan Marionbfa75d62020-10-06 17:46:06 +0200537 stats->total = n_pages;
538 stats->log2_page_sz = log2_page_size;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200539
Damjan Marionf8cb7012020-10-09 17:16:55 +0200540 if (syscall (__NR_move_pages, 0, n_pages, ptr, 0, status, 0) != 0)
Damjan Marion6bfd0762020-09-11 22:16:53 +0200541 {
542 stats->unknown = n_pages;
Florin Coras7c7ed172021-02-26 13:24:47 -0800543 goto done;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200544 }
545
546 for (i = 0; i < n_pages; i++)
547 {
548 if (status[i] >= 0 && status[i] < CLIB_MAX_NUMAS)
549 {
550 stats->mapped++;
551 stats->per_numa[status[i]]++;
552 }
553 else if (status[i] == -EFAULT)
554 stats->not_mapped++;
555 else
556 stats->unknown++;
557 }
Florin Coras7c7ed172021-02-26 13:24:47 -0800558
559done:
560 vec_free (status);
561 vec_free (ptr);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200562}
563
564
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200565__clib_export u64 *
Damjan Marion6bfd0762020-09-11 22:16:53 +0200566clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
567 int n_pages)
Damjan Marion01914ce2017-09-14 19:04:50 +0200568{
569 int pagesize = sysconf (_SC_PAGESIZE);
570 int fd;
571 int i;
572 u64 *r = 0;
573
Damjan Marion6bfd0762020-09-11 22:16:53 +0200574 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
575
Damjan Marion01914ce2017-09-14 19:04:50 +0200576 if ((fd = open ((char *) "/proc/self/pagemap", O_RDONLY)) == -1)
577 return 0;
578
579 for (i = 0; i < n_pages; i++)
580 {
581 u64 seek, pagemap = 0;
582 uword vaddr = pointer_to_uword (mem) + (((u64) i) << log2_page_size);
583 seek = ((u64) vaddr / pagesize) * sizeof (u64);
584 if (lseek (fd, seek, SEEK_SET) != seek)
585 goto done;
586
587 if (read (fd, &pagemap, sizeof (pagemap)) != (sizeof (pagemap)))
588 goto done;
589
590 if ((pagemap & (1ULL << 63)) == 0)
591 goto done;
592
593 pagemap &= pow2_mask (55);
594 vec_add1 (r, pagemap * pagesize);
595 }
596
597done:
598 close (fd);
599 if (vec_len (r) != n_pages)
600 {
601 vec_free (r);
602 return 0;
603 }
604 return r;
605}
606
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200607__clib_export int
Damjan Marion561ae5d2020-09-24 13:53:46 +0200608clib_mem_set_numa_affinity (u8 numa_node, int force)
609{
610 clib_mem_main_t *mm = &clib_mem_main;
611 long unsigned int mask[16] = { 0 };
612 int mask_len = sizeof (mask) * 8 + 1;
613
614 /* no numa support */
615 if (mm->numa_node_bitmap == 0)
616 {
617 if (numa_node)
618 {
619 vec_reset_length (mm->error);
620 mm->error = clib_error_return (mm->error, "%s: numa not supported",
621 (char *) __func__);
622 return CLIB_MEM_ERROR;
623 }
624 else
625 return 0;
626 }
627
628 mask[0] = 1 << numa_node;
629
Damjan Marionf8cb7012020-10-09 17:16:55 +0200630 if (syscall (__NR_set_mempolicy, force ? MPOL_BIND : MPOL_PREFERRED, mask,
631 mask_len))
Damjan Marion561ae5d2020-09-24 13:53:46 +0200632 goto error;
633
634 vec_reset_length (mm->error);
635 return 0;
636
637error:
638 vec_reset_length (mm->error);
639 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
640 return CLIB_MEM_ERROR;
641}
642
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200643__clib_export int
Damjan Marion561ae5d2020-09-24 13:53:46 +0200644clib_mem_set_default_numa_affinity ()
645{
646 clib_mem_main_t *mm = &clib_mem_main;
647
Damjan Marionf8cb7012020-10-09 17:16:55 +0200648 if (syscall (__NR_set_mempolicy, MPOL_DEFAULT, 0, 0))
Damjan Marion561ae5d2020-09-24 13:53:46 +0200649 {
650 vec_reset_length (mm->error);
651 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
652 return CLIB_MEM_ERROR;
653 }
654 return 0;
655}
656
Damjan Marion01914ce2017-09-14 19:04:50 +0200657/*
658 * fd.io coding-style-patch-verification: ON
659 *
660 * Local Variables:
661 * eval: (c-set-style "gnu")
662 * End:
663 */