blob: 7d27a0dc169184ae342f812ed00b93f0ace27aca [file] [log] [blame]
Tom Jonese12f69f2024-01-26 17:04:23 +00001/* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2021 Cisco Systems, Inc.
3 * Copyright(c) 2024 Tom Jones <thj@freebsd.org>
4 */
5
6#define _GNU_SOURCE
7#include <stdlib.h>
8#include <sys/types.h>
9#include <sys/stat.h>
10#include <unistd.h>
11#include <sys/memrange.h>
12#include <sys/mount.h>
13#include <sys/mman.h>
14#include <fcntl.h>
15#include <vppinfra/clib.h>
16#include <vppinfra/mem.h>
17#include <vppinfra/lock.h>
18#include <vppinfra/time.h>
19#include <vppinfra/bitmap.h>
20#include <vppinfra/format.h>
21#include <vppinfra/clib_error.h>
22
23#ifndef F_FBSD_SPECIFIC_BASE
24#define F_FBSD_SPECIFIC_BASE 1024
25#endif
26
27#ifndef F_ADD_SEALS
28#define F_ADD_SEALS (F_FBSD_SPECIFIC_BASE + 9)
29#define F_GET_SEALS (F_FBSD_SPECIFIC_BASE + 10)
30
31#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
32#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
33#define F_SEAL_GROW 0x0004 /* prevent file from growing */
34#define F_SEAL_WRITE 0x0008 /* prevent writes */
35#endif
36
37#ifndef MFD_HUGETLB
38#define MFD_HUGETLB 0x0004U
39#endif
40
41#ifndef MAP_HUGE_SHIFT
42#define MAP_HUGE_SHIFT 26
43#endif
44
45#ifndef MFD_HUGE_SHIFT
46#define MFD_HUGE_SHIFT 26
47#endif
48
49#ifndef MAP_FIXED_NOREPLACE
50#define MAP_FIXED_NOREPLACE MAP_FIXED
51#endif
52
53static void
54map_lock ()
55{
56 while (clib_atomic_test_and_set (&clib_mem_main.map_lock))
57 CLIB_PAUSE ();
58}
59
60static void
61map_unlock ()
62{
63 clib_atomic_release (&clib_mem_main.map_lock);
64}
65
66void
67clib_mem_main_init (void)
68{
69 clib_mem_main_t *mm = &clib_mem_main;
70 long sysconf_page_size;
71 uword page_size;
72 void *va;
73
74 if (mm->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
75 return;
76
77 /* system page size */
78 sysconf_page_size = sysconf (_SC_PAGESIZE);
79 if (sysconf_page_size < 0)
80 {
81 clib_panic ("Could not determine the page size");
82 }
83 page_size = sysconf_page_size;
84 mm->log2_page_sz = min_log2 (page_size);
85
86 mm->log2_default_hugepage_sz = min_log2 (page_size);
87 mm->log2_sys_default_hugepage_sz = mm->log2_default_hugepage_sz;
88
89 /* numa nodes */
90 va = mmap (0, page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
91 -1, 0);
92 if (va == MAP_FAILED)
93 return;
94
95 if (mlock (va, page_size))
96 goto done;
97
98 /*
99 * TODO: In linux/mem.c we can move pages to numa domains, this isn't an
100 * option in FreeBSD yet.
101 */
102
103done:
104 munmap (va, page_size);
105}
106
107__clib_export u64
108clib_mem_get_fd_page_size (int fd)
109{
110 struct stat st = { 0 };
111 if (fstat (fd, &st) == -1)
112 return 0;
113 return st.st_blksize;
114}
115
116__clib_export clib_mem_page_sz_t
117clib_mem_get_fd_log2_page_size (int fd)
118{
119 uword page_size = clib_mem_get_fd_page_size (fd);
120 return page_size ? min_log2 (page_size) : CLIB_MEM_PAGE_SZ_UNKNOWN;
121}
122
123__clib_export void
124clib_mem_vm_randomize_va (uword *requested_va,
125 clib_mem_page_sz_t log2_page_size)
126{
127 /* TODO: Not yet implemented */
128}
129
130__clib_export int
131clib_mem_vm_create_fd (clib_mem_page_sz_t log2_page_size, char *fmt, ...)
132{
133 clib_mem_main_t *mm = &clib_mem_main;
134 int fd;
135 unsigned int memfd_flags;
136 va_list va;
137 u8 *s = 0;
138
139 if (log2_page_size == mm->log2_page_sz)
140 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT;
141 else if (log2_page_size == mm->log2_sys_default_hugepage_sz)
142 log2_page_size = CLIB_MEM_PAGE_SZ_DEFAULT_HUGE;
143
144 switch (log2_page_size)
145 {
146 case CLIB_MEM_PAGE_SZ_UNKNOWN:
147 return CLIB_MEM_ERROR;
148 case CLIB_MEM_PAGE_SZ_DEFAULT:
149 memfd_flags = MFD_ALLOW_SEALING;
150 break;
151 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
152 memfd_flags = MFD_HUGETLB;
153 break;
154 default:
155 memfd_flags = MFD_HUGETLB | log2_page_size << MFD_HUGE_SHIFT;
156 }
157
158 va_start (va, fmt);
159 s = va_format (0, fmt, &va);
160 va_end (va);
161
162 /* memfd_create maximum string size is 249 chars without trailing zero */
163 if (vec_len (s) > 249)
164 vec_set_len (s, 249);
165 vec_add1 (s, 0);
166
167 fd = memfd_create ((char *) s, memfd_flags);
168 if (fd == -1)
169 {
170 vec_reset_length (mm->error);
171 mm->error = clib_error_return_unix (mm->error, "memfd_create");
172 vec_free (s);
173 return CLIB_MEM_ERROR;
174 }
175
176 vec_free (s);
177
178 if ((memfd_flags & MFD_ALLOW_SEALING) &&
179 ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1))
180 {
181 vec_reset_length (mm->error);
182 mm->error = clib_error_return_unix (mm->error, "fcntl (F_ADD_SEALS)");
183 close (fd);
184 return CLIB_MEM_ERROR;
185 }
186
187 return fd;
188}
189
190uword
191clib_mem_vm_reserve (uword start, uword size, clib_mem_page_sz_t log2_page_sz)
192{
193 clib_mem_main_t *mm = &clib_mem_main;
194 uword pagesize = 1ULL << log2_page_sz;
195 uword sys_page_sz = 1ULL << mm->log2_page_sz;
196 uword n_bytes;
197 void *base = 0, *p;
198
199 size = round_pow2 (size, pagesize);
200
201 /* in adition of requested reservation, we also rserve one system page
202 * (typically 4K) adjacent to the start off reservation */
203
204 if (start)
205 {
206 /* start address is provided, so we just need to make sure we are not
207 * replacing existing map */
208 if (start & pow2_mask (log2_page_sz))
209 return ~0;
210 base = (void *) start - sys_page_sz;
211 base = mmap (base, size + sys_page_sz, PROT_NONE,
212 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
213
214 return (base == MAP_FAILED) ? ~0 : start;
215 }
216
217 /* to make sure that we get reservation aligned to page_size we need to
218 * request one additional page as mmap will return us address which is
219 * aligned only to system page size */
220 base =
221 mmap (0, size + pagesize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
222
223 if (base == MAP_FAILED)
224 return ~0;
225
226 /* return additional space at the end of allocation */
227 p = base + size + pagesize;
228 n_bytes = (uword) p & pow2_mask (log2_page_sz);
229 if (n_bytes)
230 {
231 p -= n_bytes;
232 munmap (p, n_bytes);
233 }
234
235 /* return additional space at the start of allocation */
236 n_bytes = pagesize - sys_page_sz - n_bytes;
237 if (n_bytes)
238 {
239 munmap (base, n_bytes);
240 base += n_bytes;
241 }
242
243 return (uword) base + sys_page_sz;
244}
245
246__clib_export clib_mem_vm_map_hdr_t *
247clib_mem_vm_get_next_map_hdr (clib_mem_vm_map_hdr_t *hdr)
248{
249 /* TODO: Not yet implemented */
250 return NULL;
251}
252
253void *
254clib_mem_vm_map_internal (void *base, clib_mem_page_sz_t log2_page_sz,
255 uword size, int fd, uword offset, char *name)
256{
257 clib_mem_main_t *mm = &clib_mem_main;
258 clib_mem_vm_map_hdr_t *hdr;
259 uword sys_page_sz = 1ULL << mm->log2_page_sz;
260 int mmap_flags = MAP_FIXED, is_huge = 0;
261
262 if (fd != -1)
263 {
264 mmap_flags |= MAP_SHARED;
265 log2_page_sz = clib_mem_get_fd_log2_page_size (fd);
266 if (log2_page_sz > mm->log2_page_sz)
267 is_huge = 1;
268 }
269 else
270 {
271 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
272
273 if (log2_page_sz == mm->log2_page_sz)
274 log2_page_sz = CLIB_MEM_PAGE_SZ_DEFAULT;
275
276 switch (log2_page_sz)
277 {
278 case CLIB_MEM_PAGE_SZ_UNKNOWN:
279 /* will fail later */
280 break;
281 case CLIB_MEM_PAGE_SZ_DEFAULT:
282 log2_page_sz = mm->log2_page_sz;
283 break;
284 case CLIB_MEM_PAGE_SZ_DEFAULT_HUGE:
285 /* We shouldn't be selecting HUGETLB on FreeBSD */
286 log2_page_sz = CLIB_MEM_PAGE_SZ_UNKNOWN;
287 break;
288 default:
289 log2_page_sz = mm->log2_page_sz;
290 break;
291 }
292 }
293
294 size = round_pow2 (size, 1ULL << log2_page_sz);
295
296 base = (void *) clib_mem_vm_reserve ((uword) base, size, log2_page_sz);
297
298 if (base == (void *) ~0)
299 return CLIB_MEM_VM_MAP_FAILED;
300
301 base = mmap (base, size, PROT_READ | PROT_WRITE, mmap_flags, fd, offset);
302
303 if (base == MAP_FAILED)
304 return CLIB_MEM_VM_MAP_FAILED;
305
306 if (is_huge && (mlock (base, size) != 0))
307 {
308 munmap (base, size);
309 return CLIB_MEM_VM_MAP_FAILED;
310 }
311
312 hdr = mmap (base - sys_page_sz, sys_page_sz, PROT_READ | PROT_WRITE,
313 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
314
315 if (hdr != base - sys_page_sz)
316 {
317 munmap (base, size);
318 return CLIB_MEM_VM_MAP_FAILED;
319 }
320
321 map_lock ();
322
323 if (mm->last_map)
324 {
325 mprotect (mm->last_map, sys_page_sz, PROT_READ | PROT_WRITE);
326 mm->last_map->next = hdr;
327 mprotect (mm->last_map, sys_page_sz, PROT_NONE);
328 }
329 else
330 mm->first_map = hdr;
331
332 clib_mem_unpoison (hdr, sys_page_sz);
333 hdr->next = 0;
334 hdr->prev = mm->last_map;
335 snprintf (hdr->name, CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1, "%s", (char *) name);
336 mm->last_map = hdr;
337
338 hdr->base_addr = (uword) base;
339 hdr->log2_page_sz = log2_page_sz;
340 hdr->num_pages = size >> log2_page_sz;
341 hdr->fd = fd;
342 hdr->name[CLIB_VM_MAP_HDR_NAME_MAX_LEN - 1] = 0;
343 mprotect (hdr, sys_page_sz, PROT_NONE);
344
345 map_unlock ();
346
347 clib_mem_unpoison (base, size);
348 return base;
349}
350
351__clib_export int
352clib_mem_vm_unmap (void *base)
353{
354 clib_mem_main_t *mm = &clib_mem_main;
355 uword size, sys_page_sz = 1ULL << mm->log2_page_sz;
356 clib_mem_vm_map_hdr_t *hdr = base - sys_page_sz;
357 ;
358
359 map_lock ();
360 if (mprotect (hdr, sys_page_sz, PROT_READ | PROT_WRITE) != 0)
361 goto out;
362
363 size = hdr->num_pages << hdr->log2_page_sz;
364 if (munmap ((void *) hdr->base_addr, size) != 0)
365 goto out;
366
367 if (hdr->next)
368 {
369 mprotect (hdr->next, sys_page_sz, PROT_READ | PROT_WRITE);
370 hdr->next->prev = hdr->prev;
371 mprotect (hdr->next, sys_page_sz, PROT_NONE);
372 }
373 else
374 mm->last_map = hdr->prev;
375
376 if (hdr->prev)
377 {
378 mprotect (hdr->prev, sys_page_sz, PROT_READ | PROT_WRITE);
379 hdr->prev->next = hdr->next;
380 mprotect (hdr->prev, sys_page_sz, PROT_NONE);
381 }
382 else
383 mm->first_map = hdr->next;
384
385 map_unlock ();
386
387 if (munmap (hdr, sys_page_sz) != 0)
388 return CLIB_MEM_ERROR;
389
390 return 0;
391out:
392 map_unlock ();
393 return CLIB_MEM_ERROR;
394}
395
396__clib_export void
397clib_mem_get_page_stats (void *start, clib_mem_page_sz_t log2_page_size,
398 uword n_pages, clib_mem_page_stats_t *stats)
399{
400 int i, *status = 0;
401 void **ptr = 0;
402
403 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
404
405 vec_validate (status, n_pages - 1);
406 vec_validate (ptr, n_pages - 1);
407
408 for (i = 0; i < n_pages; i++)
409 ptr[i] = start + (i << log2_page_size);
410
411 clib_memset (stats, 0, sizeof (clib_mem_page_stats_t));
412 stats->total = n_pages;
413 stats->log2_page_sz = log2_page_size;
414
415 /*
416 * TODO: Until FreeBSD has support for tracking pages in NUMA domains just
417 * return that all are unknown for the statsistics.
418 */
419 stats->unknown = n_pages;
420
421 vec_free (status);
422 vec_free (ptr);
423}
424
425__clib_export u64 *
426clib_mem_vm_get_paddr (void *mem, clib_mem_page_sz_t log2_page_size,
427 int n_pages)
428{
429 struct mem_extract meme;
430 int pagesize = sysconf (_SC_PAGESIZE);
431 int fd;
432 int i;
433 u64 *r = 0;
434
435 log2_page_size = clib_mem_log2_page_size_validate (log2_page_size);
436
437 if ((fd = open ((char *) "/dev/mem", O_RDONLY)) == -1)
438 return 0;
439
440 for (i = 0; i < n_pages; i++)
441 {
442 meme.me_vaddr = pointer_to_uword (mem) + (((u64) i) << log2_page_size);
443
444 if (ioctl (fd, MEM_EXTRACT_PADDR, &meme) == -1)
445 goto done;
446 vec_add1 (r, meme.me_paddr * pagesize);
447 }
448
449done:
450 close (fd);
451 if (vec_len (r) != n_pages)
452 {
453 vec_free (r);
454 return 0;
455 }
456 return r;
457}
458
459__clib_export int
460clib_mem_set_numa_affinity (u8 numa_node, int force)
461{
462 /* TODO: Not yet implemented */
463 return CLIB_MEM_ERROR;
464}
465
466__clib_export int
467clib_mem_set_default_numa_affinity ()
468{
469 /* TODO: Not yet implemented */
470 return 0;
471}