blob: 161dbf5c0c72d7984ba4c3b1e9cb24a956647d7e [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * physmem.c: Unix physical memory
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
Damjan Marion49d66f12017-07-20 18:10:35 +020040#include <unistd.h>
41#include <sys/types.h>
42#include <sys/mount.h>
43#include <sys/mman.h>
44#include <sys/fcntl.h>
45#include <sys/stat.h>
Damjan Marion2ca8ced2017-10-30 22:38:47 +010046#include <linux/vfio.h>
47#include <unistd.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070048
Damjan Marion01914ce2017-09-14 19:04:50 +020049#include <vppinfra/linux/syscall.h>
50#include <vppinfra/linux/sysfs.h>
Damjan Marion49d66f12017-07-20 18:10:35 +020051#include <vlib/vlib.h>
52#include <vlib/physmem.h>
53#include <vlib/unix/unix.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070054
Damjan Marion2ca8ced2017-10-30 22:38:47 +010055static int vfio_container_fd = -1;
56
Ed Warnickecb9cada2015-12-08 15:45:58 -070057static void *
Damjan Marion49d66f12017-07-20 18:10:35 +020058unix_physmem_alloc_aligned (vlib_main_t * vm, vlib_physmem_region_index_t idx,
59 uword n_bytes, uword alignment)
Ed Warnickecb9cada2015-12-08 15:45:58 -070060{
Damjan Marion49d66f12017-07-20 18:10:35 +020061 vlib_physmem_region_t *pr = vlib_physmem_get_region (vm, idx);
Ed Warnickecb9cada2015-12-08 15:45:58 -070062 uword lo_offset, hi_offset;
Dave Barach9b8ffd92016-07-08 08:13:45 -040063 uword *to_free = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -070064
Damjan Marion49d66f12017-07-20 18:10:35 +020065 if (pr->heap == 0)
66 return 0;
67
Ed Warnickecb9cada2015-12-08 15:45:58 -070068 /* IO memory is always at least cache aligned. */
69 alignment = clib_max (alignment, CLIB_CACHE_LINE_BYTES);
70
71 while (1)
72 {
Damjan Marion49d66f12017-07-20 18:10:35 +020073 mheap_get_aligned (pr->heap, n_bytes,
Ed Warnickecb9cada2015-12-08 15:45:58 -070074 /* align */ alignment,
75 /* align offset */ 0,
76 &lo_offset);
77
78 /* Allocation failed? */
79 if (lo_offset == ~0)
80 break;
81
Damjan Marion49d66f12017-07-20 18:10:35 +020082 if (pr->flags & VLIB_PHYSMEM_F_FAKE)
83 break;
84
Ed Warnickecb9cada2015-12-08 15:45:58 -070085 /* Make sure allocation does not span DMA physical chunk boundary. */
86 hi_offset = lo_offset + n_bytes - 1;
87
Damjan Marion52014c62017-12-13 13:51:13 +010088 if (((pointer_to_uword (pr->heap) + lo_offset) >> pr->log2_page_size) ==
89 ((pointer_to_uword (pr->heap) + hi_offset) >> pr->log2_page_size))
Ed Warnickecb9cada2015-12-08 15:45:58 -070090 break;
91
92 /* Allocation would span chunk boundary, queue it to be freed as soon as
Dave Barach9b8ffd92016-07-08 08:13:45 -040093 we find suitable chunk. */
Ed Warnickecb9cada2015-12-08 15:45:58 -070094 vec_add1 (to_free, lo_offset);
95 }
96
97 if (to_free != 0)
98 {
99 uword i;
100 for (i = 0; i < vec_len (to_free); i++)
Damjan Marion49d66f12017-07-20 18:10:35 +0200101 mheap_put (pr->heap, to_free[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102 vec_free (to_free);
103 }
104
Damjan Marion49d66f12017-07-20 18:10:35 +0200105 return lo_offset != ~0 ? pr->heap + lo_offset : 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106}
107
Dave Barach9b8ffd92016-07-08 08:13:45 -0400108static void
Damjan Marion49d66f12017-07-20 18:10:35 +0200109unix_physmem_free (vlib_main_t * vm, vlib_physmem_region_index_t idx, void *x)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700110{
Damjan Marion49d66f12017-07-20 18:10:35 +0200111 vlib_physmem_region_t *pr = vlib_physmem_get_region (vm, idx);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700112 /* Return object to region's heap. */
Damjan Marion49d66f12017-07-20 18:10:35 +0200113 mheap_put (pr->heap, x - pr->heap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114}
115
Damjan Marion49d66f12017-07-20 18:10:35 +0200116static clib_error_t *
Damjan Marion2ca8ced2017-10-30 22:38:47 +0100117scan_vfio_fd (void *arg, u8 * path_name, u8 * file_name)
118{
119 const char fn[] = "/dev/vfio/vfio";
120 char buff[sizeof (fn)] = { 0 };
121
122 if (readlink ((char *) path_name, buff, sizeof (fn)) + 1 != sizeof (fn))
123 return 0;
124
125 if (strncmp (fn, buff, sizeof (fn)))
126 return 0;
127
128 vfio_container_fd = atoi ((char *) file_name);
129 return 0;
130}
131
132static clib_error_t *
133unix_physmem_region_iommu_register (vlib_physmem_region_t * pr)
134{
135 struct vfio_iommu_type1_dma_map dma_map = { 0 };
136 int i, fd;
137
138 if (vfio_container_fd == -1)
139 foreach_directory_file ("/proc/self/fd", scan_vfio_fd, 0, 0);
140
141 fd = vfio_container_fd;
142
143 if (fd < 0)
144 return 0;
145
146 if (ioctl (fd, VFIO_GET_API_VERSION) != VFIO_API_VERSION)
147 return 0;
148
149 if (ioctl (fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) == 0)
150 return 0;
151
152 dma_map.argsz = sizeof (struct vfio_iommu_type1_dma_map);
153 dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
154
155 vec_foreach_index (i, pr->page_table)
156 {
157 dma_map.vaddr = pointer_to_uword (pr->mem) + (i << pr->log2_page_size);
158 dma_map.size = 1 << pr->log2_page_size;
159 dma_map.iova = pr->page_table[i];
160 if (ioctl (fd, VFIO_IOMMU_MAP_DMA, &dma_map) != 0)
161 return clib_error_return_unix (0, "ioctl (VFIO_IOMMU_MAP_DMA)");
162 }
163 return 0;
164}
165
166static clib_error_t *
Damjan Marion49d66f12017-07-20 18:10:35 +0200167unix_physmem_region_alloc (vlib_main_t * vm, char *name, u32 size,
168 u8 numa_node, u32 flags,
169 vlib_physmem_region_index_t * idx)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400171 vlib_physmem_main_t *vpm = &vm->physmem_main;
Damjan Marion49d66f12017-07-20 18:10:35 +0200172 vlib_physmem_region_t *pr;
173 clib_error_t *error = 0;
Damjan Marion01914ce2017-09-14 19:04:50 +0200174 clib_mem_vm_alloc_t alloc = { 0 };
175
Ed Warnickecb9cada2015-12-08 15:45:58 -0700176
Damjan Marion49d66f12017-07-20 18:10:35 +0200177 if (geteuid () != 0 && (flags & VLIB_PHYSMEM_F_FAKE) == 0)
178 return clib_error_return (0, "not allowed");
179
180 pool_get (vpm->regions, pr);
181
182 if ((pr - vpm->regions) >= 256)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700183 {
Damjan Marion49d66f12017-07-20 18:10:35 +0200184 error = clib_error_return (0, "maximum number of regions reached");
185 goto error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186 }
187
Damjan Marion01914ce2017-09-14 19:04:50 +0200188 alloc.name = name;
189 alloc.size = size;
190 alloc.numa_node = numa_node;
191 alloc.flags = CLIB_MEM_VM_F_SHARED;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700192
Damjan Marion49d66f12017-07-20 18:10:35 +0200193 if ((flags & VLIB_PHYSMEM_F_FAKE) == 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700194 {
Damjan Marion01914ce2017-09-14 19:04:50 +0200195 alloc.flags |= CLIB_MEM_VM_F_HUGETLB;
196 alloc.flags |= CLIB_MEM_VM_F_HUGETLB_PREALLOC;
197 alloc.flags |= CLIB_MEM_VM_F_NUMA_FORCE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700198 }
Damjan Marion49d66f12017-07-20 18:10:35 +0200199 else
200 {
Damjan Marion01914ce2017-09-14 19:04:50 +0200201 alloc.flags |= CLIB_MEM_VM_F_NUMA_PREFER;
Damjan Marion49d66f12017-07-20 18:10:35 +0200202 }
203
Damjan Marion01914ce2017-09-14 19:04:50 +0200204 error = clib_mem_vm_ext_alloc (&alloc);
205 if (error)
206 goto error;
Damjan Marion49d66f12017-07-20 18:10:35 +0200207
Damjan Marion01914ce2017-09-14 19:04:50 +0200208 pr->index = pr - vpm->regions;
209 pr->flags = flags;
210 pr->fd = alloc.fd;
211 pr->mem = alloc.addr;
212 pr->log2_page_size = alloc.log2_page_size;
213 pr->n_pages = alloc.n_pages;
Chris Lukeb2bcad62017-09-18 08:51:22 -0400214 pr->size = (u64) pr->n_pages << (u64) pr->log2_page_size;
Damjan Marion49d66f12017-07-20 18:10:35 +0200215 pr->page_mask = (1 << pr->log2_page_size) - 1;
216 pr->numa_node = numa_node;
217 pr->name = format (0, "%s", name);
218
219 if ((flags & VLIB_PHYSMEM_F_FAKE) == 0)
220 {
221 int i;
222 for (i = 0; i < pr->n_pages; i++)
223 {
224 void *ptr = pr->mem + (i << pr->log2_page_size);
225 int node;
226 move_pages (0, 1, &ptr, 0, &node, 0);
227 if (numa_node != node)
228 {
Damjan Marion01914ce2017-09-14 19:04:50 +0200229 clib_warning ("physmem page for region \'%s\' allocated on the"
230 " wrong numa node (requested %u actual %u)",
231 pr->name, pr->numa_node, node, i);
Damjan Marion49d66f12017-07-20 18:10:35 +0200232 break;
233 }
234 }
Damjan Marion01914ce2017-09-14 19:04:50 +0200235 pr->page_table = clib_mem_vm_get_paddr (pr->mem, pr->log2_page_size,
236 pr->n_pages);
Damjan Marion2ca8ced2017-10-30 22:38:47 +0100237 error = unix_physmem_region_iommu_register (pr);
238 if (error)
239 clib_error_report (error);
Damjan Marion49d66f12017-07-20 18:10:35 +0200240 }
241
242 if (flags & VLIB_PHYSMEM_F_INIT_MHEAP)
243 {
244 pr->heap = mheap_alloc_with_flags (pr->mem, pr->size,
245 /* Don't want mheap mmap/munmap with IO memory. */
246 MHEAP_FLAG_DISABLE_VM |
247 MHEAP_FLAG_THREAD_SAFE);
Damjan Marion49d66f12017-07-20 18:10:35 +0200248 }
249
Damjan Marion49d66f12017-07-20 18:10:35 +0200250 *idx = pr->index;
251
Damjan Marion49d66f12017-07-20 18:10:35 +0200252 goto done;
253
254error:
Damjan Marion49d66f12017-07-20 18:10:35 +0200255 memset (pr, 0, sizeof (*pr));
256 pool_put (vpm->regions, pr);
257
258done:
Damjan Marion49d66f12017-07-20 18:10:35 +0200259 return error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700260}
261
Damjan Marion49d66f12017-07-20 18:10:35 +0200262static void
263unix_physmem_region_free (vlib_main_t * vm, vlib_physmem_region_index_t idx)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700264{
Damjan Marion49d66f12017-07-20 18:10:35 +0200265 vlib_physmem_main_t *vpm = &vm->physmem_main;
266 vlib_physmem_region_t *pr = vlib_physmem_get_region (vm, idx);
267
268 if (pr->fd > 0)
269 close (pr->fd);
270 munmap (pr->mem, pr->size);
271 vec_free (pr->name);
272 pool_put (vpm->regions, pr);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273}
274
Dave Barach9b8ffd92016-07-08 08:13:45 -0400275clib_error_t *
Damjan Marion49d66f12017-07-20 18:10:35 +0200276unix_physmem_init (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700277{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400278 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700279
280 /* Avoid multiple calls. */
281 if (vm->os_physmem_alloc_aligned)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400282 return error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700283
284 vm->os_physmem_alloc_aligned = unix_physmem_alloc_aligned;
285 vm->os_physmem_free = unix_physmem_free;
Damjan Marion49d66f12017-07-20 18:10:35 +0200286 vm->os_physmem_region_alloc = unix_physmem_region_alloc;
287 vm->os_physmem_region_free = unix_physmem_region_free;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700288
Ed Warnickecb9cada2015-12-08 15:45:58 -0700289 return error;
290}
291
292static clib_error_t *
293show_physmem (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400294 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700295{
Damjan Marion49d66f12017-07-20 18:10:35 +0200296 vlib_physmem_main_t *vpm = &vm->physmem_main;
297 vlib_physmem_region_t *pr;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700298
Damjan Marion49d66f12017-07-20 18:10:35 +0200299 /* *INDENT-OFF* */
300 pool_foreach (pr, vpm->regions, (
301 {
302 vlib_cli_output (vm, "index %u name '%s' page-size %uKB num-pages %d "
303 "numa-node %u fd %d\n",
304 pr->index, pr->name, (1 << (pr->log2_page_size -10)),
305 pr->n_pages, pr->numa_node, pr->fd);
306 if (pr->heap)
307 vlib_cli_output (vm, " %U", format_mheap, pr->heap, /* verbose */ 1);
308 else
309 vlib_cli_output (vm, " no heap\n");
310 }));
311 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700312 return 0;
313}
314
Dave Barach9b8ffd92016-07-08 08:13:45 -0400315/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700316VLIB_CLI_COMMAND (show_physmem_command, static) = {
317 .path = "show physmem",
318 .short_help = "Show physical memory allocation",
319 .function = show_physmem,
320};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400321/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322
Dave Barach9b8ffd92016-07-08 08:13:45 -0400323/*
324 * fd.io coding-style-patch-verification: ON
325 *
326 * Local Variables:
327 * eval: (c-set-style "gnu")
328 * End:
329 */