blob: 5c12cb573b117a6e5cd05ae64d9df16b33dc9610 [file] [log] [blame]
Damjan Marion01914ce2017-09-14 19:04:50 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#define _GNU_SOURCE
17#include <stdlib.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <unistd.h>
21#include <sys/mount.h>
22#include <sys/mman.h>
23#include <fcntl.h>
24#include <linux/mempolicy.h>
25#include <linux/memfd.h>
26
27#include <vppinfra/clib.h>
28#include <vppinfra/mem.h>
Damjan Marion70ae0a92020-10-26 10:39:30 +010029#include <vppinfra/lock.h>
Florin Corasd3e83a92018-01-16 02:40:18 -080030#include <vppinfra/time.h>
Damjan Marionfecb2522022-10-17 17:27:28 +020031#include <vppinfra/bitmap.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020032#include <vppinfra/format.h>
33#include <vppinfra/clib_error.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020034#include <vppinfra/linux/sysfs.h>
35
36#ifndef F_LINUX_SPECIFIC_BASE
37#define F_LINUX_SPECIFIC_BASE 1024
38#endif
39
40#ifndef F_ADD_SEALS
41#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
42#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
43
44#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
45#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
46#define F_SEAL_GROW 0x0004 /* prevent file from growing */
47#define F_SEAL_WRITE 0x0008 /* prevent writes */
48#endif
49
Damjan Marionc63e2a42020-09-16 21:36:00 +020050#ifndef MFD_HUGETLB
51#define MFD_HUGETLB 0x0004U
52#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020053
Damjan Marion6bfd0762020-09-11 22:16:53 +020054#ifndef MAP_HUGE_SHIFT
55#define MAP_HUGE_SHIFT 26
56#endif
57
Damjan Marionbdbb0c52020-09-17 10:40:44 +020058#ifndef MFD_HUGE_SHIFT
59#define MFD_HUGE_SHIFT 26
60#endif
61
Damjan Marion6bfd0762020-09-11 22:16:53 +020062#ifndef MAP_FIXED_NOREPLACE
63#define MAP_FIXED_NOREPLACE 0x100000
64#endif
Damjan Marion9787f5f2018-10-24 12:56:32 +020065
Damjan Marion70ae0a92020-10-26 10:39:30 +010066static void
67map_lock ()
68{
69 while (clib_atomic_test_and_set (&clib_mem_main.map_lock))
70 CLIB_PAUSE ();
71}
72
73static void
74map_unlock ()
75{
76 clib_atomic_release (&clib_mem_main.map_lock);
77}
78
Damjan Marionc63e2a42020-09-16 21:36:00 +020079static clib_mem_page_sz_t
80legacy_get_log2_default_hugepage_size (void)
81{
82 clib_mem_page_sz_t log2_page_size = CLIB_MEM_PAGE_SZ_UNKNOWN;
83 FILE *fp;
84 char tmp[33] = { };
85
86 if ((fp = fopen ("/proc/meminfo", "r")) == NULL)
87 return CLIB_MEM_PAGE_SZ_UNKNOWN;
88
89 while (fscanf (fp, "%32s", tmp) > 0)
90 if (strncmp ("Hugepagesize:", tmp, 13) == 0)
91 {
92 u32 size;
93 if (fscanf (fp, "%u", &size) > 0)
94 log2_page_size = 10 + min_log2 (size);
95 break;
96 }
97
98 fclose (fp);
99 return log2_page_size;
100}
101
102void
103clib_mem_main_init ()
104{
105 clib_mem_main_t *mm = &clib_mem_main;
106 uword page_size;
107 void *va;
108 int fd;
109
110 if (mm->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
111 return;
112
113 /* system page size */
114 page_size = sysconf (_SC_PAGESIZE);
115 mm->log2_page_sz = min_log2 (page_size);
116
117 /* default system hugeppage size */
Damjan Marionf8cb7012020-10-09 17:16:55 +0200118 if ((fd = syscall (__NR_memfd_create, "test", MFD_HUGETLB)) != -1)
Damjan Marionc63e2a42020-09-16 21:36:00 +0200119 {
120 mm->log2_default_hugepage_sz = clib_mem_get_fd_log2_page_size (fd);
121 close (fd);
122 }
123 else /* likely kernel older than 4.14 */
124 mm->log2_default_hugepage_sz = legacy_get_log2_default_hugepage_size ();
125
Damjan Marionff011b22021-09-21 11:38:04 +0200126 mm->log2_sys_default_hugepage_sz = mm->log2_default_hugepage_sz;
127
Damjan Marionc63e2a42020-09-16 21:36:00 +0200128 /* numa nodes */
129 va = mmap (0, page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
130 MAP_ANONYMOUS, -1, 0);
131 if (va == MAP_FAILED)
132 return;
133
134 if (mlock (va, page_size))
135 goto done;
136
137 for (int i = 0; i < CLIB_MAX_NUMAS; i++)
138 {
139 int status;
Damjan Marionf8cb7012020-10-09 17:16:55 +0200140 if (syscall (__NR_move_pages, 0, 1, &va, &i, &status, 0) == 0)
Damjan Marionc63e2a42020-09-16 21:36:00 +0200141 mm->numa_node_bitmap |= 1ULL << i;
142 }
143
144done:
145 munmap (va, page_size);
146}
147
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200148__clib_export u64
Damjan Marion567e61d2018-10-24 17:08:26 +0200149clib_mem_get_fd_page_size (int fd)
Damjan Marion01914ce2017-09-14 19:04:50 +0200150{
151 struct stat st = { 0 };
Chris Lukeb2bcad62017-09-18 08:51:22 -0400152 if (fstat (fd, &st) == -1)
Damjan Marion01914ce2017-09-14 19:04:50 +0200153 return 0;
Florin Corasd3e83a92018-01-16 02:40:18 -0800154 return st.st_blksize;
155}
156
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200157__clib_export clib_mem_page_sz_t
Damjan Marion567e61d2018-10-24 17:08:26 +0200158clib_mem_get_fd_log2_page_size (int fd)
Florin Corasd3e83a92018-01-16 02:40:18 -0800159{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200160 uword page_size = clib_mem_get_fd_page_size (fd);
161 return page_size ? min_log2 (page_size) : CLIB_MEM_PAGE_SZ_UNKNOWN;
Florin Corasd3e83a92018-01-16 02:40:18 -0800162}
163
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200164__clib_export void
Damjan Marionb5095042020-09-11 22:13:46 +0200165clib_mem_vm_randomize_va (uword * requested_va,
166 clib_mem_page_sz_t log2_page_size)
Florin Corasd3e83a92018-01-16 02:40:18 -0800167{
168 u8 bit_mask = 15;
169
170 if (log2_page_size <= 12)
171 bit_mask = 15;
172 else if (log2_page_size > 12 && log2_page_size <= 16)
173 bit_mask = 3;
174 else
175 bit_mask = 0;
176
Haiyang Tana5ab5032018-10-15 06:17:55 -0700177 *requested_va +=
178 (clib_cpu_time_now () & bit_mask) * (1ull << log2_page_size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200179}
180
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200181static int
182legacy_memfd_create (u8 * name)
183{
184 clib_mem_main_t *mm = &clib_mem_main;
185 int fd = -1;
186 char *mount_dir;
Benoît Ganne2b92c702020-09-28 17:34:17 +0200187 u8 *temp;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200188 u8 *filename;
189
Benoît Ganne2b92c702020-09-28 17:34:17 +0200190 /*
191 * Since mkdtemp will modify template string "/tmp/hugepage_mount.XXXXXX",
192 * it must not be a string constant, but should be declared as
193 * a character array.
194 */
195 temp = format (0, "/tmp/hugepage_mount.XXXXXX%c", 0);
196
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200197 /* create mount directory */
Benoît Ganne2b92c702020-09-28 17:34:17 +0200198 if ((mount_dir = mkdtemp ((char *) temp)) == 0)
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200199 {
Benoît Ganne2b92c702020-09-28 17:34:17 +0200200 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200201 vec_reset_length (mm->error);
202 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
Damjan Marion561ae5d2020-09-24 13:53:46 +0200203 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200204 }
205
206 if (mount ("none", mount_dir, "hugetlbfs", 0, NULL))
207 {
Benoît Ganne2b92c702020-09-28 17:34:17 +0200208 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200209 rmdir ((char *) mount_dir);
210 vec_reset_length (mm->error);
211 mm->error = clib_error_return_unix (mm->error, "mount");
Damjan Marion561ae5d2020-09-24 13:53:46 +0200212 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200213 }
214
215 filename = format (0, "%s/%s%c", mount_dir, name, 0);
216
217 if ((fd = open ((char *) filename, O_CREAT | O_RDWR, 0755)) == -1)
218 {
219 vec_reset_length (mm->error);
220 mm->error = clib_error_return_unix (mm->error, "mkdtemp");
221 }
222
223 umount2 ((char *) mount_dir, MNT_DETACH);
224 rmdir ((char *) mount_dir);
225 vec_free (filename);
Benoît Ganne2b92c702020-09-28 17:34:17 +0200226 vec_free (temp);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200227
228 return fd;
229}
230
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200231__clib_export int
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200232clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...)
233{
234 clib_mem_main_t *mm = &clib_mem_main;
235 int fd;
236 unsigned int memfd_flags;
237 va_list va;
238 u8 *s = 0;
239
240 if (log2_page_size == mm->log2_page_sz)
241 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
Damjan Marionff011b22021-09-21 11:38:04 +0200242 else if (log2_page_size == mm->log2_sys_default_hugepage_sz)
Benoît Ganne2b92c702020-09-28 17:34:17 +0200243 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT_HUGE;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200244
245 switch (log2_page_size)
246 {
247 case CLIB_MEM_PAGE_SZ_UNKNOWN:
Damjan Marion561ae5d2020-09-24 13:53:46 +0200248 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200249 case CLIB_MEM_PAGE_SZ_DEFAULT:
250 memfd_flags = MFD_ALLOW_SEALING;
251 break;
252 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
253 memfd_flags = MFD_HUGETLB;
254 break;
255 default:
256 memfd_flags = MFD_HUGETLB | log2_page_size << MFD_HUGE_SHIFT;
257 }
258
259 va_start (va, fmt);
260 s = va_format (0, fmt, &va);
261 va_end (va);
262
263 /* memfd_create maximum string size is 249 chars without trailing zero */
264 if (vec_len (s) > 249)
Damjan Marion8bea5892022-04-04 22:40:45 +0200265 vec_set_len (s, 249);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200266 vec_add1 (s, 0);
267
268 /* memfd_create introduced in kernel 3.17, we don't support older kernels */
Damjan Marionf8cb7012020-10-09 17:16:55 +0200269 fd = syscall (__NR_memfd_create, (char *) s, memfd_flags);
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200270
271 /* kernel versions < 4.14 does not support memfd_create for huge pages */
272 if (fd == -1 && errno == EINVAL &&
273 log2_page_size == CLIB_MEM_PAGE_SZ_DEFAULT_HUGE)
274 {
275 fd = legacy_memfd_create (s);
276 }
277 else if (fd == -1)
278 {
279 vec_reset_length (mm->error);
280 mm->error = clib_error_return_unix (mm->error, "memfd_create");
281 vec_free (s);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200282 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200283 }
284
285 vec_free (s);
286
287 if ((memfd_flags & MFD_ALLOW_SEALING) &&
288 ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1))
289 {
290 vec_reset_length (mm->error);
291 mm->error = clib_error_return_unix (mm->error, "fcntl (F_ADD_SEALS)");
292 close (fd);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200293 return CLIB_MEM_ERROR;
Damjan Marionbdbb0c52020-09-17 10:40:44 +0200294 }
295
296 return fd;
297}
298
Dave Barach16e4a4a2020-04-16 12:00:14 -0400299uword
Damjan Marionb5095042020-09-11 22:13:46 +0200300clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400301{
Damjan Marion6bfd0762020-09-11 22:16:53 +0200302 clib_mem_main_t *mm = &clib_mem_main;
303 uword pagesize = 1ULL << log2_page_sz;
304 uword sys_page_sz = 1ULL << mm->log2_page_sz;
305 uword n_bytes;
306 void *base = 0, *p;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400307
308 size = round_pow2 (size, pagesize);
309
Damjan Marion6bfd0762020-09-11 22:16:53 +0200310 /* in adition of requested reservation, we also rserve one system page
311 * (typically 4K) adjacent to the start off reservation */
Dave Barach16e4a4a2020-04-16 12:00:14 -0400312
Damjan Marion6bfd0762020-09-11 22:16:53 +0200313 if (start)
Dave Barach16e4a4a2020-04-16 12:00:14 -0400314 {
Damjan Marion6bfd0762020-09-11 22:16:53 +0200315 /* start address is provided, so we just need to make sure we are not
316 * replacing existing map */
317 if (start & pow2_mask (log2_page_sz))
318 return ~0;
319
320 base = (void *) start - sys_page_sz;
321 base = mmap (base, size + sys_page_sz, PROT_NONE,
322 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
323 return (base == MAP_FAILED) ? ~0 : start;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400324 }
325
Damjan Marion6bfd0762020-09-11 22:16:53 +0200326 /* to make sure that we get reservation aligned to page_size we need to
327 * request one additional page as mmap will return us address which is
328 * aligned only to system page size */
329 base = mmap (0, size + pagesize, PROT_NONE,
330 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
Dave Barach16e4a4a2020-04-16 12:00:14 -0400331
Damjan Marion6bfd0762020-09-11 22:16:53 +0200332 if (base == MAP_FAILED)
333 return ~0;
334
335 /* return additional space at the end of allocation */
336 p = base + size + pagesize;
337 n_bytes = (uword) p & pow2_mask (log2_page_sz);
338 if (n_bytes)
339 {
340 p -= n_bytes;
341 munmap (p, n_bytes);
342 }
343
344 /* return additional space at the start of allocation */
345 n_bytes = pagesize - sys_page_sz - n_bytes;
346 if (n_bytes)
347 {
348 munmap (base, n_bytes);
349 base += n_bytes;
350 }
351
352 return (uword) base + sys_page_sz;
Dave Barach16e4a4a2020-04-16 12:00:14 -0400353}
354
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200355__clib_export clib_mem_vm_map_hdr_t *
Damjan Marion6bfd0762020-09-11 22:16:53 +0200356clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t * hdr)
357{
358 clib_mem_main_t *mm = &clib_mem_main;
Dave Barach27c35e32020-10-07 09:37:36 -0400359 uword sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200360 clib_mem_vm_map_hdr_t *next;
361 if (hdr == 0)
362 {
363 hdr = mm->first_map;
364 if (hdr)
365 mprotect (hdr, sys_page_sz, PROT_READ);
366 return hdr;
367 }
368 next = hdr->next;
369 mprotect (hdr, sys_page_sz, PROT_NONE);
370 if (next)
371 mprotect (next, sys_page_sz, PROT_READ);
372 return next;
373}
374
375void *
376clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
377 uword size, int fd, uword offset, char *name)
378{
379 clib_mem_main_t *mm = &clib_mem_main;
380 clib_mem_vm_map_hdr_t *hdr;
Dave Barach27c35e32020-10-07 09:37:36 -0400381 uword sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200382 int mmap_flags = MAP_FIXED, is_huge = 0;
383
384 if (fd != -1)
385 {
386 mmap_flags |= MAP_SHARED;
387 log2_page_sz = clib_mem_get_fd_log2_page_size (fd);
388 if (log2_page_sz > mm->log2_page_sz)
389 is_huge = 1;
390 }
391 else
392 {
393 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
394
395 if (log2_page_sz == mm->log2_page_sz)
396 log2_page_sz = CLIB_MEM_PAGE_SZ_DEFAULT;
397
398 switch (log2_page_sz)
399 {
400 case CLIB_MEM_PAGE_SZ_UNKNOWN:
401 /* will fail later */
402 break;
403 case CLIB_MEM_PAGE_SZ_DEFAULT:
404 log2_page_sz = mm->log2_page_sz;
405 break;
406 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
407 mmap_flags |= MAP_HUGETLB;
408 log2_page_sz = mm->log2_default_hugepage_sz;
409 is_huge = 1;
410 break;
411 default:
412 mmap_flags |= MAP_HUGETLB;
413 mmap_flags |= log2_page_sz << MAP_HUGE_SHIFT;
414 is_huge = 1;
415 }
416 }
417
418 if (log2_page_sz == CLIB_MEM_PAGE_SZ_UNKNOWN)
419 return CLIB_MEM_VM_MAP_FAILED;
420
Dave Barach27c35e32020-10-07 09:37:36 -0400421 size = round_pow2 (size, 1ULL << log2_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200422
423 base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
424
425 if (base == (void *) ~0)
426 return CLIB_MEM_VM_MAP_FAILED;
427
428 base = mmap (base, size, PROT_READ | PROT_WRITE, mmap_flags, fd, offset);
429
430 if (base == MAP_FAILED)
431 return CLIB_MEM_VM_MAP_FAILED;
432
433 if (is_huge && (mlock (base, size) != 0))
434 {
435 munmap (base, size);
436 return CLIB_MEM_VM_MAP_FAILED;
437 }
438
439 hdr = mmap (base - sys_page_sz, sys_page_sz, PROT_READ | PROT_WRITE,
440 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
441
442 if (hdr != base - sys_page_sz)
443 {
444 munmap (base, size);
445 return CLIB_MEM_VM_MAP_FAILED;
446 }
447
Damjan Marion70ae0a92020-10-26 10:39:30 +0100448 map_lock ();
449
Damjan Marion6bfd0762020-09-11 22:16:53 +0200450 if (mm->last_map)
451 {
452 mprotect (mm->last_map, sys_page_sz, PROT_READ | PROT_WRITE);
453 mm->last_map->next = hdr;
454 mprotect (mm->last_map, sys_page_sz, PROT_NONE);
455 }
456 else
457 mm->first_map = hdr;
458
Damjan Marion79934e82022-04-05 12:40:31 +0200459 clib_mem_unpoison (hdr, sys_page_sz);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200460 hdr->next = 0;
461 hdr->prev = mm->last_map;
Florin Corasb4d9c5d2021-02-02 15:40:35 -0800462 snprintf (hdr->name, CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1, "%s", (char *) name);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200463 mm->last_map = hdr;
464
465 hdr->base_addr = (uword) base;
466 hdr->log2_page_sz = log2_page_sz;
467 hdr->num_pages = size >> log2_page_sz;
Damjan Marion5ef25162020-09-17 13:29:33 +0200468 hdr->fd = fd;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200469 hdr->name[CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1] = 0;
470 mprotect (hdr, sys_page_sz, PROT_NONE);
471
liuyacan9b8dc8242021-12-22 16:41:21 +0800472 map_unlock ();
473
Damjan Marion79934e82022-04-05 12:40:31 +0200474 clib_mem_unpoison (base, size);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200475 return base;
476}
477
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200478__clib_export int
Damjan Marion6bfd0762020-09-11 22:16:53 +0200479clib_mem_vm_unmap (void *base)
480{
481 clib_mem_main_t *mm = &clib_mem_main;
Dave Barach27c35e32020-10-07 09:37:36 -0400482 uword size, sys_page_sz = 1ULL << mm->log2_page_sz;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200483 clib_mem_vm_map_hdr_t *hdr = base - sys_page_sz;;
484
chenqijunf299dc42021-07-12 10:51:05 +0800485 map_lock ();
Damjan Marion6bfd0762020-09-11 22:16:53 +0200486 if (mprotect (hdr, sys_page_sz, PROT_READ | PROT_WRITE) != 0)
chenqijunf299dc42021-07-12 10:51:05 +0800487 goto out;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200488
489 size = hdr->num_pages << hdr->log2_page_sz;
490 if (munmap ((void *) hdr->base_addr, size) != 0)
chenqijunf299dc42021-07-12 10:51:05 +0800491 goto out;
Damjan Marion70ae0a92020-10-26 10:39:30 +0100492
Damjan Marion6bfd0762020-09-11 22:16:53 +0200493 if (hdr->next)
494 {
495 mprotect (hdr->next, sys_page_sz, PROT_READ | PROT_WRITE);
496 hdr->next->prev = hdr->prev;
497 mprotect (hdr->next, sys_page_sz, PROT_NONE);
498 }
499 else
500 mm->last_map = hdr->prev;
501
502 if (hdr->prev)
503 {
504 mprotect (hdr->prev, sys_page_sz, PROT_READ | PROT_WRITE);
505 hdr->prev->next = hdr->next;
506 mprotect (hdr->prev, sys_page_sz, PROT_NONE);
507 }
508 else
509 mm->first_map = hdr->next;
510
Damjan Marion70ae0a92020-10-26 10:39:30 +0100511 map_unlock ();
512
Damjan Marion6bfd0762020-09-11 22:16:53 +0200513 if (munmap (hdr, sys_page_sz) != 0)
Damjan Marion561ae5d2020-09-24 13:53:46 +0200514 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200515
516 return 0;
chenqijunf299dc42021-07-12 10:51:05 +0800517out:
518 map_unlock ();
519 return CLIB_MEM_ERROR;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200520}
521
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200522__clib_export void
Damjan Marion6bfd0762020-09-11 22:16:53 +0200523clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
524 uword n_pages, clib_mem_page_stats_t * stats)
525{
526 int i, *status = 0;
527 void **ptr = 0;
528
529 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
530
531 vec_validate (status, n_pages - 1);
532 vec_validate (ptr, n_pages - 1);
533
534 for (i = 0; i < n_pages; i++)
535 ptr[i] = start + (i << log2_page_size);
536
537 clib_memset (stats, 0, sizeof (clib_mem_page_stats_t));
Damjan Marionbfa75d62020-10-06 17:46:06 +0200538 stats->total = n_pages;
539 stats->log2_page_sz = log2_page_size;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200540
Damjan Marionf8cb7012020-10-09 17:16:55 +0200541 if (syscall (__NR_move_pages, 0, n_pages, ptr, 0, status, 0) != 0)
Damjan Marion6bfd0762020-09-11 22:16:53 +0200542 {
543 stats->unknown = n_pages;
Florin Coras7c7ed172021-02-26 13:24:47 -0800544 goto done;
Damjan Marion6bfd0762020-09-11 22:16:53 +0200545 }
546
547 for (i = 0; i < n_pages; i++)
548 {
549 if (status[i] >= 0 && status[i] < CLIB_MAX_NUMAS)
550 {
551 stats->mapped++;
552 stats->per_numa[status[i]]++;
553 }
554 else if (status[i] == -EFAULT)
555 stats->not_mapped++;
556 else
557 stats->unknown++;
558 }
Florin Coras7c7ed172021-02-26 13:24:47 -0800559
560done:
561 vec_free (status);
562 vec_free (ptr);
Damjan Marion6bfd0762020-09-11 22:16:53 +0200563}
564
565
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200566__clib_export u64 *
Damjan Marion6bfd0762020-09-11 22:16:53 +0200567clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
568 int n_pages)
Damjan Marion01914ce2017-09-14 19:04:50 +0200569{
570 int pagesize = sysconf (_SC_PAGESIZE);
571 int fd;
572 int i;
573 u64 *r = 0;
574
Damjan Marion6bfd0762020-09-11 22:16:53 +0200575 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
576
Damjan Marion01914ce2017-09-14 19:04:50 +0200577 if ((fd = open ((char *) "/proc/self/pagemap", O_RDONLY)) == -1)
578 return 0;
579
580 for (i = 0; i < n_pages; i++)
581 {
582 u64 seek, pagemap = 0;
583 uword vaddr = pointer_to_uword (mem) + (((u64) i) << log2_page_size);
584 seek = ((u64) vaddr / pagesize) * sizeof (u64);
585 if (lseek (fd, seek, SEEK_SET) != seek)
586 goto done;
587
588 if (read (fd, &pagemap, sizeof (pagemap)) != (sizeof (pagemap)))
589 goto done;
590
591 if ((pagemap & (1ULL << 63)) == 0)
592 goto done;
593
594 pagemap &= pow2_mask (55);
595 vec_add1 (r, pagemap * pagesize);
596 }
597
598done:
599 close (fd);
600 if (vec_len (r) != n_pages)
601 {
602 vec_free (r);
603 return 0;
604 }
605 return r;
606}
607
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200608__clib_export int
Damjan Marion561ae5d2020-09-24 13:53:46 +0200609clib_mem_set_numa_affinity (u8 numa_node, int force)
610{
611 clib_mem_main_t *mm = &clib_mem_main;
Damjan Marionfecb2522022-10-17 17:27:28 +0200612 clib_bitmap_t *bmp = 0;
613 int rv;
Damjan Marion561ae5d2020-09-24 13:53:46 +0200614
615 /* no numa support */
616 if (mm->numa_node_bitmap == 0)
617 {
618 if (numa_node)
619 {
620 vec_reset_length (mm->error);
621 mm->error = clib_error_return (mm->error, "%s: numa not supported",
622 (char *) __func__);
623 return CLIB_MEM_ERROR;
624 }
625 else
626 return 0;
627 }
628
Damjan Marionfecb2522022-10-17 17:27:28 +0200629 bmp = clib_bitmap_set (bmp, numa_node, 1);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200630
Damjan Marionfecb2522022-10-17 17:27:28 +0200631 rv = syscall (__NR_set_mempolicy, force ? MPOL_BIND : MPOL_PREFERRED, bmp,
632 vec_len (bmp) * sizeof (bmp[0] * 8) + 1);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200633
Damjan Marionfecb2522022-10-17 17:27:28 +0200634 clib_bitmap_free (bmp);
Damjan Marion561ae5d2020-09-24 13:53:46 +0200635 vec_reset_length (mm->error);
Damjan Marionfecb2522022-10-17 17:27:28 +0200636
637 if (rv)
638 {
639 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
640 return CLIB_MEM_ERROR;
641 }
642
Damjan Marion561ae5d2020-09-24 13:53:46 +0200643 return 0;
Damjan Marion561ae5d2020-09-24 13:53:46 +0200644}
645
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200646__clib_export int
Damjan Marion561ae5d2020-09-24 13:53:46 +0200647clib_mem_set_default_numa_affinity ()
648{
649 clib_mem_main_t *mm = &clib_mem_main;
650
Damjan Marionf8cb7012020-10-09 17:16:55 +0200651 if (syscall (__NR_set_mempolicy, MPOL_DEFAULT, 0, 0))
Damjan Marion561ae5d2020-09-24 13:53:46 +0200652 {
653 vec_reset_length (mm->error);
654 mm->error = clib_error_return_unix (mm->error, (char *) __func__);
655 return CLIB_MEM_ERROR;
656 }
657 return 0;
658}
659
Damjan Marion01914ce2017-09-14 19:04:50 +0200660/*
661 * fd.io coding-style-patch-verification: ON
662 *
663 * Local Variables:
664 * eval: (c-set-style "gnu")
665 * End:
666 */