blob: dbc4c549a9909350c3ef7784582fae3adce9e43e [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * physmem.c: Unix physical memory
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
Damjan Marion49d66f12017-07-20 18:10:35 +020040#include <unistd.h>
41#include <sys/types.h>
42#include <sys/mount.h>
43#include <sys/mman.h>
44#include <sys/fcntl.h>
45#include <sys/stat.h>
Damjan Marion2ca8ced2017-10-30 22:38:47 +010046#include <unistd.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070047
Damjan Marion01914ce2017-09-14 19:04:50 +020048#include <vppinfra/linux/syscall.h>
49#include <vppinfra/linux/sysfs.h>
Damjan Marion49d66f12017-07-20 18:10:35 +020050#include <vlib/vlib.h>
51#include <vlib/physmem.h>
52#include <vlib/unix/unix.h>
Damjan Marion1ba0fa42018-03-04 17:19:08 +010053#include <vlib/pci/pci.h>
54#include <vlib/linux/vfio.h>
Damjan Marion2ca8ced2017-10-30 22:38:47 +010055
Ed Warnickecb9cada2015-12-08 15:45:58 -070056static void *
Damjan Marion49d66f12017-07-20 18:10:35 +020057unix_physmem_alloc_aligned (vlib_main_t * vm, vlib_physmem_region_index_t idx,
58 uword n_bytes, uword alignment)
Ed Warnickecb9cada2015-12-08 15:45:58 -070059{
Damjan Marion49d66f12017-07-20 18:10:35 +020060 vlib_physmem_region_t *pr = vlib_physmem_get_region (vm, idx);
Ed Warnickecb9cada2015-12-08 15:45:58 -070061 uword lo_offset, hi_offset;
Dave Barach9b8ffd92016-07-08 08:13:45 -040062 uword *to_free = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -070063
Damjan Marion49d66f12017-07-20 18:10:35 +020064 if (pr->heap == 0)
65 return 0;
66
Ed Warnickecb9cada2015-12-08 15:45:58 -070067 /* IO memory is always at least cache aligned. */
68 alignment = clib_max (alignment, CLIB_CACHE_LINE_BYTES);
69
70 while (1)
71 {
Dave Barach6a5adc32018-07-04 10:56:23 -040072#if USE_DLMALLOC == 0
73
Damjan Marion49d66f12017-07-20 18:10:35 +020074 mheap_get_aligned (pr->heap, n_bytes,
Ed Warnickecb9cada2015-12-08 15:45:58 -070075 /* align */ alignment,
76 /* align offset */ 0,
77 &lo_offset);
Dave Barach6a5adc32018-07-04 10:56:23 -040078#else
79 lo_offset = (uword) mspace_get_aligned (pr->heap, n_bytes,
80 alignment, ~0ULL /* offset */ );
81 if (lo_offset == 0)
82 lo_offset = ~0ULL;
83#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -070084
85 /* Allocation failed? */
86 if (lo_offset == ~0)
87 break;
88
89 /* Make sure allocation does not span DMA physical chunk boundary. */
90 hi_offset = lo_offset + n_bytes - 1;
91
Damjan Marion52014c62017-12-13 13:51:13 +010092 if (((pointer_to_uword (pr->heap) + lo_offset) >> pr->log2_page_size) ==
93 ((pointer_to_uword (pr->heap) + hi_offset) >> pr->log2_page_size))
Ed Warnickecb9cada2015-12-08 15:45:58 -070094 break;
95
96 /* Allocation would span chunk boundary, queue it to be freed as soon as
Dave Barach9b8ffd92016-07-08 08:13:45 -040097 we find suitable chunk. */
Ed Warnickecb9cada2015-12-08 15:45:58 -070098 vec_add1 (to_free, lo_offset);
99 }
100
101 if (to_free != 0)
102 {
103 uword i;
104 for (i = 0; i < vec_len (to_free); i++)
Dave Barach6a5adc32018-07-04 10:56:23 -0400105 {
106#if USE_DLMALLOC == 0
107 mheap_put (pr->heap, to_free[i]);
108#else
109 mspace_put_no_offset (pr->heap, (void *) to_free[i]);
110#endif
111 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700112 vec_free (to_free);
113 }
114
Damjan Marion13637632018-07-20 15:33:17 +0200115#if USE_DLMALLOC == 0
116 return lo_offset != ~0 ? (void *) (pr->heap + lo_offset) : 0;
117#else
118 return lo_offset != ~0 ? (void *) lo_offset : 0;
119#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700120}
121
Dave Barach9b8ffd92016-07-08 08:13:45 -0400122static void
Damjan Marion49d66f12017-07-20 18:10:35 +0200123unix_physmem_free (vlib_main_t * vm, vlib_physmem_region_index_t idx, void *x)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124{
Damjan Marion49d66f12017-07-20 18:10:35 +0200125 vlib_physmem_region_t *pr = vlib_physmem_get_region (vm, idx);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126 /* Return object to region's heap. */
Dave Barach6a5adc32018-07-04 10:56:23 -0400127#if USE_DLMALLOC == 0
Damjan Marion49d66f12017-07-20 18:10:35 +0200128 mheap_put (pr->heap, x - pr->heap);
Dave Barach6a5adc32018-07-04 10:56:23 -0400129#else
130 mspace_put_no_offset (pr->heap, x);
131#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700132}
133
Damjan Marion49d66f12017-07-20 18:10:35 +0200134static clib_error_t *
135unix_physmem_region_alloc (vlib_main_t * vm, char *name, u32 size,
136 u8 numa_node, u32 flags,
137 vlib_physmem_region_index_t * idx)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700138{
Damjan Marion88c06212018-03-05 20:08:28 +0100139 vlib_physmem_main_t *vpm = &physmem_main;
Damjan Marion49d66f12017-07-20 18:10:35 +0200140 vlib_physmem_region_t *pr;
141 clib_error_t *error = 0;
Damjan Marion01914ce2017-09-14 19:04:50 +0200142 clib_mem_vm_alloc_t alloc = { 0 };
Damjan Marion1ba0fa42018-03-04 17:19:08 +0100143 int i;
Damjan Marion49d66f12017-07-20 18:10:35 +0200144
145 pool_get (vpm->regions, pr);
146
147 if ((pr - vpm->regions) >= 256)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148 {
Damjan Marion49d66f12017-07-20 18:10:35 +0200149 error = clib_error_return (0, "maximum number of regions reached");
150 goto error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151 }
152
Damjan Marion01914ce2017-09-14 19:04:50 +0200153 alloc.name = name;
154 alloc.size = size;
155 alloc.numa_node = numa_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156
Damjan Marion1ba0fa42018-03-04 17:19:08 +0100157 alloc.flags = (flags & VLIB_PHYSMEM_F_SHARED) ?
158 CLIB_MEM_VM_F_SHARED : CLIB_MEM_VM_F_LOCKED;
159
160 if ((flags & VLIB_PHYSMEM_F_HUGETLB))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161 {
Damjan Marion01914ce2017-09-14 19:04:50 +0200162 alloc.flags |= CLIB_MEM_VM_F_HUGETLB;
163 alloc.flags |= CLIB_MEM_VM_F_HUGETLB_PREALLOC;
164 alloc.flags |= CLIB_MEM_VM_F_NUMA_FORCE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165 }
Damjan Marion49d66f12017-07-20 18:10:35 +0200166 else
167 {
Damjan Marion01914ce2017-09-14 19:04:50 +0200168 alloc.flags |= CLIB_MEM_VM_F_NUMA_PREFER;
Damjan Marion49d66f12017-07-20 18:10:35 +0200169 }
170
Damjan Marion01914ce2017-09-14 19:04:50 +0200171 error = clib_mem_vm_ext_alloc (&alloc);
172 if (error)
173 goto error;
Damjan Marion49d66f12017-07-20 18:10:35 +0200174
Damjan Marion01914ce2017-09-14 19:04:50 +0200175 pr->index = pr - vpm->regions;
176 pr->flags = flags;
177 pr->fd = alloc.fd;
178 pr->mem = alloc.addr;
179 pr->log2_page_size = alloc.log2_page_size;
180 pr->n_pages = alloc.n_pages;
Chris Lukeb2bcad62017-09-18 08:51:22 -0400181 pr->size = (u64) pr->n_pages << (u64) pr->log2_page_size;
Haiyang Tana5ab5032018-10-15 06:17:55 -0700182 pr->page_mask = (1ull << pr->log2_page_size) - 1;
Damjan Marion49d66f12017-07-20 18:10:35 +0200183 pr->numa_node = numa_node;
lollita liueaba9342018-06-13 05:14:02 -0400184 pr->name = format (0, "%s%c", name, 0);
Damjan Marion49d66f12017-07-20 18:10:35 +0200185
Damjan Marion1ba0fa42018-03-04 17:19:08 +0100186 for (i = 0; i < pr->n_pages; i++)
Damjan Marion49d66f12017-07-20 18:10:35 +0200187 {
Lee Roberts45a09462018-03-07 19:47:00 -0700188 void *ptr = pr->mem + ((u64) i << pr->log2_page_size);
Damjan Marion1ba0fa42018-03-04 17:19:08 +0100189 int node;
190 if ((move_pages (0, 1, &ptr, 0, &node, 0) == 0) && (numa_node != node))
Damjan Marion49d66f12017-07-20 18:10:35 +0200191 {
Damjan Marion1ba0fa42018-03-04 17:19:08 +0100192 clib_warning ("physmem page for region \'%s\' allocated on the"
193 " wrong numa node (requested %u actual %u)",
194 pr->name, pr->numa_node, node, i);
195 break;
Damjan Marion49d66f12017-07-20 18:10:35 +0200196 }
197 }
198
Damjan Marion51c52c02018-03-09 16:05:56 +0100199 pr->page_table = clib_mem_vm_get_paddr (pr->mem, pr->log2_page_size,
200 pr->n_pages);
Damjan Marion1ba0fa42018-03-04 17:19:08 +0100201
202 linux_vfio_dma_map_regions (vm);
203
Damjan Marion49d66f12017-07-20 18:10:35 +0200204 if (flags & VLIB_PHYSMEM_F_INIT_MHEAP)
205 {
Dave Barach6a5adc32018-07-04 10:56:23 -0400206#if USE_DLMALLOC == 0
Damjan Marion49d66f12017-07-20 18:10:35 +0200207 pr->heap = mheap_alloc_with_flags (pr->mem, pr->size,
208 /* Don't want mheap mmap/munmap with IO memory. */
209 MHEAP_FLAG_DISABLE_VM |
210 MHEAP_FLAG_THREAD_SAFE);
Dave Barach6a5adc32018-07-04 10:56:23 -0400211#else
212 pr->heap = create_mspace_with_base (pr->mem, pr->size, 1 /* locked */ );
213 mspace_disable_expand (pr->heap);
214#endif
Damjan Marion49d66f12017-07-20 18:10:35 +0200215 }
216
Damjan Marion49d66f12017-07-20 18:10:35 +0200217 *idx = pr->index;
218
Damjan Marion49d66f12017-07-20 18:10:35 +0200219 goto done;
220
221error:
Damjan Marion49d66f12017-07-20 18:10:35 +0200222 memset (pr, 0, sizeof (*pr));
223 pool_put (vpm->regions, pr);
224
225done:
Damjan Marion49d66f12017-07-20 18:10:35 +0200226 return error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700227}
228
Damjan Marion49d66f12017-07-20 18:10:35 +0200229static void
230unix_physmem_region_free (vlib_main_t * vm, vlib_physmem_region_index_t idx)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700231{
Damjan Marion88c06212018-03-05 20:08:28 +0100232 vlib_physmem_main_t *vpm = &physmem_main;
Damjan Marion49d66f12017-07-20 18:10:35 +0200233 vlib_physmem_region_t *pr = vlib_physmem_get_region (vm, idx);
234
235 if (pr->fd > 0)
236 close (pr->fd);
237 munmap (pr->mem, pr->size);
238 vec_free (pr->name);
239 pool_put (vpm->regions, pr);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240}
241
Dave Barach9b8ffd92016-07-08 08:13:45 -0400242clib_error_t *
Damjan Marion49d66f12017-07-20 18:10:35 +0200243unix_physmem_init (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244{
Damjan Marion88c06212018-03-05 20:08:28 +0100245 vlib_physmem_main_t *vpm = &physmem_main;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400246 clib_error_t *error = 0;
Damjan Marion1ba0fa42018-03-04 17:19:08 +0100247 u64 *pt = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700248
249 /* Avoid multiple calls. */
250 if (vm->os_physmem_alloc_aligned)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400251 return error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252
Damjan Marion1ba0fa42018-03-04 17:19:08 +0100253 /* check if pagemap is accessible */
254 pt = clib_mem_vm_get_paddr (&pt, min_log2 (sysconf (_SC_PAGESIZE)), 1);
255 if (pt[0])
256 vpm->flags |= VLIB_PHYSMEM_MAIN_F_HAVE_PAGEMAP;
257 vec_free (pt);
258
259 if ((error = linux_vfio_init (vm)))
260 return error;
261
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262 vm->os_physmem_alloc_aligned = unix_physmem_alloc_aligned;
263 vm->os_physmem_free = unix_physmem_free;
Damjan Marion49d66f12017-07-20 18:10:35 +0200264 vm->os_physmem_region_alloc = unix_physmem_region_alloc;
265 vm->os_physmem_region_free = unix_physmem_region_free;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266
Ed Warnickecb9cada2015-12-08 15:45:58 -0700267 return error;
268}
269
270static clib_error_t *
271show_physmem (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400272 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273{
Damjan Marion88c06212018-03-05 20:08:28 +0100274 vlib_physmem_main_t *vpm = &physmem_main;
Damjan Marion49d66f12017-07-20 18:10:35 +0200275 vlib_physmem_region_t *pr;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276
Damjan Marion49d66f12017-07-20 18:10:35 +0200277 /* *INDENT-OFF* */
278 pool_foreach (pr, vpm->regions, (
279 {
280 vlib_cli_output (vm, "index %u name '%s' page-size %uKB num-pages %d "
281 "numa-node %u fd %d\n",
282 pr->index, pr->name, (1 << (pr->log2_page_size -10)),
283 pr->n_pages, pr->numa_node, pr->fd);
284 if (pr->heap)
285 vlib_cli_output (vm, " %U", format_mheap, pr->heap, /* verbose */ 1);
286 else
287 vlib_cli_output (vm, " no heap\n");
288 }));
289 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290 return 0;
291}
292
Dave Barach9b8ffd92016-07-08 08:13:45 -0400293/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700294VLIB_CLI_COMMAND (show_physmem_command, static) = {
295 .path = "show physmem",
296 .short_help = "Show physical memory allocation",
297 .function = show_physmem,
298};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400299/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700300
Dave Barach9b8ffd92016-07-08 08:13:45 -0400301/*
302 * fd.io coding-style-patch-verification: ON
303 *
304 * Local Variables:
305 * eval: (c-set-style "gnu")
306 * End:
307 */