blob: 421121957f0a533db94316880282cb0bc2ab6521 [file] [log] [blame]
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001/*
Ed Warnickecb9cada2015-12-08 15:45:58 -07002 *------------------------------------------------------------------
Dave Barach8a7fb0c2016-07-08 14:44:23 -04003 * svm.c - shared VM allocation, mmap(...MAP_FIXED...)
Ed Warnickecb9cada2015-12-08 15:45:58 -07004 * library
5 *
6 * Copyright (c) 2009 Cisco and/or its affiliates.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at:
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *------------------------------------------------------------------
19 */
20
21#include <stdio.h>
22#include <stdlib.h>
23#include <sys/types.h>
24#include <sys/mman.h>
25#include <sys/stat.h>
26#include <netinet/in.h>
27#include <signal.h>
28#include <pthread.h>
29#include <unistd.h>
30#include <time.h>
31#include <fcntl.h>
32#include <string.h>
33#include <vppinfra/clib.h>
34#include <vppinfra/vec.h>
35#include <vppinfra/hash.h>
36#include <vppinfra/bitmap.h>
37#include <vppinfra/fifo.h>
38#include <vppinfra/time.h>
39#include <vppinfra/mheap.h>
40#include <vppinfra/heap.h>
41#include <vppinfra/pool.h>
42#include <vppinfra/format.h>
43
44#include "svm.h"
45
46static svm_region_t *root_rp;
47static int root_rp_refcount;
48
49#define MAXLOCK 2
Dave Barach8a7fb0c2016-07-08 14:44:23 -040050static pthread_mutex_t *mutexes_held[MAXLOCK];
Ed Warnickecb9cada2015-12-08 15:45:58 -070051static int nheld;
52
Dave Barach8a7fb0c2016-07-08 14:44:23 -040053svm_region_t *
54svm_get_root_rp (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070055{
Dave Barach8a7fb0c2016-07-08 14:44:23 -040056 return root_rp;
Ed Warnickecb9cada2015-12-08 15:45:58 -070057}
58
59#define MUTEX_DEBUG
60
Damjan Marionaec8f892018-01-08 16:35:35 +010061u64
62svm_get_global_region_base_va ()
63{
64#if __aarch64__
65 /* On AArch64 VA space can have different size, from 36 to 48 bits.
66 Here we are trying to detect VA bits by parsing /proc/self/maps
67 address ranges */
68 int fd;
69 unformat_input_t input;
70 u64 start, end = 0;
71 u8 bits = 0;
72
73 if ((fd = open ("/proc/self/maps", 0)) < 0)
74 clib_unix_error ("open '/proc/self/maps'");
75
76 unformat_init_clib_file (&input, fd);
77 while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
78 {
Gabriel Gannec5239ad2018-01-11 15:04:19 +010079 if (unformat (&input, "%llx-%llx", &start, &end))
80 end--;
Damjan Marionaec8f892018-01-08 16:35:35 +010081 unformat_skip_line (&input);
82 }
Gabriel Ganne83d47432018-01-10 11:40:50 +010083 unformat_free (&input);
84 close (fd);
Damjan Marionaec8f892018-01-08 16:35:35 +010085
Damjan Marion11056002018-05-10 13:40:44 +020086 bits = count_leading_zeros (end);
Gabriel Gannec5239ad2018-01-11 15:04:19 +010087 bits = 64 - bits;
Damjan Marionaec8f892018-01-08 16:35:35 +010088 if (bits >= 36 && bits <= 48)
89 return ((1ul << bits) / 4) - (2 * SVM_GLOBAL_REGION_SIZE);
90 else
91 clib_unix_error ("unexpected va bits '%u'", bits);
Damjan Marionaec8f892018-01-08 16:35:35 +010092#endif
93
94 /* default value */
Dave Barach9466c452018-08-24 17:21:14 -040095 return 0x130000000ULL;
Damjan Marionaec8f892018-01-08 16:35:35 +010096}
97
Dave Barach8a7fb0c2016-07-08 14:44:23 -040098static void
99region_lock (svm_region_t * rp, int tag)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700100{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400101 pthread_mutex_lock (&rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102#ifdef MUTEX_DEBUG
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400103 rp->mutex_owner_pid = getpid ();
104 rp->mutex_owner_tag = tag;
105#endif
106 ASSERT (nheld < MAXLOCK);
107 /*
108 * Keep score of held mutexes so we can try to exit
109 * cleanly if the world comes to an end at the worst possible
110 * moment
111 */
112 mutexes_held[nheld++] = &rp->mutex;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700113}
114
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400115static void
116region_unlock (svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700117{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400118 int i, j;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700119#ifdef MUTEX_DEBUG
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400120 rp->mutex_owner_pid = 0;
121 rp->mutex_owner_tag = 0;
122#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400124 for (i = nheld - 1; i >= 0; i--)
125 {
126 if (mutexes_held[i] == &rp->mutex)
127 {
128 for (j = i; j < MAXLOCK - 1; j++)
129 mutexes_held[j] = mutexes_held[j + 1];
130 nheld--;
131 goto found;
132 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700133 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400134 ASSERT (0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700135
136found:
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400137 CLIB_MEMORY_BARRIER ();
138 pthread_mutex_unlock (&rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139}
140
141
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400142static u8 *
143format_svm_flags (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700144{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400145 uword f = va_arg (*args, uword);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400147 if (f & SVM_FLAGS_MHEAP)
148 s = format (s, "MHEAP ");
149 if (f & SVM_FLAGS_FILE)
150 s = format (s, "FILE ");
151 if (f & SVM_FLAGS_NODATA)
152 s = format (s, "NODATA ");
153 if (f & SVM_FLAGS_NEED_DATA_INIT)
154 s = format (s, "INIT ");
155
156 return (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157}
158
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400159static u8 *
160format_svm_size (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400162 uword size = va_arg (*args, uword);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400164 if (size >= (1 << 20))
165 {
166 s = format (s, "(%d mb)", size >> 20);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400168 else if (size >= (1 << 10))
169 {
170 s = format (s, "(%d kb)", size >> 10);
171 }
172 else
173 {
174 s = format (s, "(%d bytes)", size);
175 }
176 return (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700177}
178
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400179u8 *
180format_svm_region (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700181{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400182 svm_region_t *rp = va_arg (*args, svm_region_t *);
183 int verbose = va_arg (*args, int);
184 int i;
185 uword lo, hi;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400187 s = format (s, "%s: base va 0x%x size 0x%x %U\n",
188 rp->region_name, rp->virtual_base,
189 rp->virtual_size, format_svm_size, rp->virtual_size);
190 s = format (s, " user_ctx 0x%x, bitmap_size %d\n",
191 rp->user_ctx, rp->bitmap_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700192
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400193 if (verbose)
194 {
195 s = format (s, " flags: 0x%x %U\n", rp->flags,
196 format_svm_flags, rp->flags);
197 s = format (s,
198 " region_heap 0x%x data_base 0x%x data_heap 0x%x\n",
199 rp->region_heap, rp->data_base, rp->data_heap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700200 }
201
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400202 s = format (s, " %d clients, pids: ", vec_len (rp->client_pids));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700203
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400204 for (i = 0; i < vec_len (rp->client_pids); i++)
205 s = format (s, "%d ", rp->client_pids[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700206
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400207 s = format (s, "\n");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700208
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400209 if (verbose)
210 {
211 lo = hi = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400213 s = format (s, " VM in use: ");
214
215 for (i = 0; i < rp->bitmap_size; i++)
216 {
217 if (clib_bitmap_get_no_check (rp->bitmap, i) != 0)
218 {
219 if (lo == ~0)
220 {
221 hi = lo = rp->virtual_base + i * MMAP_PAGESIZE;
222 }
223 else
224 {
225 hi = rp->virtual_base + i * MMAP_PAGESIZE;
226 }
227 }
228 else
229 {
230 if (lo != ~0)
231 {
232 hi = rp->virtual_base + i * MMAP_PAGESIZE - 1;
233 s = format (s, " 0x%x - 0x%x (%dk)\n", lo, hi,
234 (hi - lo) >> 10);
235 lo = hi = ~0;
236 }
237 }
238 }
Dave Barach6a5adc32018-07-04 10:56:23 -0400239#if USE_DLMALLOC == 0
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400240 s = format (s, " rgn heap stats: %U", format_mheap,
241 rp->region_heap, 0);
242 if ((rp->flags & SVM_FLAGS_MHEAP) && rp->data_heap)
243 {
244 s = format (s, "\n data heap stats: %U", format_mheap,
245 rp->data_heap, 1);
246 }
247 s = format (s, "\n");
Dave Barach6a5adc32018-07-04 10:56:23 -0400248#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700249 }
250
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400251 return (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252}
253
254/*
255 * rnd_pagesize
256 * Round to a pagesize multiple, presumably 4k works
257 */
Dave Barachb3d93da2016-08-03 14:34:38 -0400258static u64
259rnd_pagesize (u64 size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700260{
Dave Barachb3d93da2016-08-03 14:34:38 -0400261 u64 rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400263 rv = (size + (MMAP_PAGESIZE - 1)) & ~(MMAP_PAGESIZE - 1);
264 return (rv);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265}
266
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400267/*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700268 * svm_data_region_setup
269 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400270static int
271svm_data_region_create (svm_map_region_args_t * a, svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400273 int fd;
274 u8 junk = 0;
275 uword map_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276
Dave Barachc3799992016-08-15 11:12:27 -0400277 map_size = rp->virtual_size - (MMAP_PAGESIZE +
278 (a->pvt_heap_size ? a->pvt_heap_size :
279 SVM_PVT_MHEAP_SIZE));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700280
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400281 if (a->flags & SVM_FLAGS_FILE)
282 {
283 struct stat statb;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700284
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400285 fd = open (a->backing_file, O_RDWR | O_CREAT, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400287 if (fd < 0)
288 {
289 clib_unix_warning ("open");
290 return -1;
291 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700292
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400293 if (fstat (fd, &statb) < 0)
294 {
295 clib_unix_warning ("fstat");
296 close (fd);
297 return -2;
298 }
299
300 if (statb.st_mode & S_IFREG)
301 {
302 if (statb.st_size == 0)
303 {
304 if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
305 {
306 clib_unix_warning ("seek region size");
307 close (fd);
308 return -3;
309 }
310 if (write (fd, &junk, 1) != 1)
311 {
312 clib_unix_warning ("set region size");
313 close (fd);
314 return -3;
315 }
316 }
317 else
318 {
319 map_size = rnd_pagesize (statb.st_size);
320 }
321 }
322 else
323 {
324 map_size = a->backing_mmap_size;
325 }
326
327 ASSERT (map_size <= rp->virtual_size -
328 (MMAP_PAGESIZE + SVM_PVT_MHEAP_SIZE));
329
330 if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
331 MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
332 {
333 clib_unix_warning ("mmap");
334 close (fd);
335 return -3;
336 }
337 close (fd);
338 rp->backing_file = (char *) format (0, "%s\0", a->backing_file);
339 rp->flags |= SVM_FLAGS_FILE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700340 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400341
342 if (a->flags & SVM_FLAGS_MHEAP)
343 {
Dave Barach6a5adc32018-07-04 10:56:23 -0400344#if USE_DLMALLOC == 0
Ole Troan73710c72018-06-04 22:27:49 +0200345 mheap_t *heap_header;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400346 rp->data_heap =
347 mheap_alloc_with_flags ((void *) (rp->data_base), map_size,
348 MHEAP_FLAG_DISABLE_VM);
Ole Troan73710c72018-06-04 22:27:49 +0200349 heap_header = mheap_header (rp->data_heap);
350 heap_header->flags |= MHEAP_FLAG_THREAD_SAFE;
Dave Barach6a5adc32018-07-04 10:56:23 -0400351#else
352 rp->data_heap = create_mspace_with_base (rp->data_base,
353 map_size, 1 /* locked */ );
354 mspace_disable_expand (rp->data_heap);
355#endif
Ole Troan73710c72018-06-04 22:27:49 +0200356
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400357 rp->flags |= SVM_FLAGS_MHEAP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700358 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400359 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700360}
361
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400362static int
363svm_data_region_map (svm_map_region_args_t * a, svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700364{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400365 int fd;
366 u8 junk = 0;
367 uword map_size;
368 struct stat statb;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700369
Dave Barachc3799992016-08-15 11:12:27 -0400370 map_size = rp->virtual_size -
371 (MMAP_PAGESIZE
Dave Barachb3d93da2016-08-03 14:34:38 -0400372 + (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700373
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400374 if (a->flags & SVM_FLAGS_FILE)
375 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700376
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400377 fd = open (a->backing_file, O_RDWR, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700378
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400379 if (fd < 0)
380 {
381 clib_unix_warning ("open");
382 return -1;
383 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700384
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400385 if (fstat (fd, &statb) < 0)
386 {
387 clib_unix_warning ("fstat");
388 close (fd);
389 return -2;
390 }
391
392 if (statb.st_mode & S_IFREG)
393 {
394 if (statb.st_size == 0)
395 {
396 if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
397 {
398 clib_unix_warning ("seek region size");
399 close (fd);
400 return -3;
401 }
402 if (write (fd, &junk, 1) != 1)
403 {
404 clib_unix_warning ("set region size");
405 close (fd);
406 return -3;
407 }
408 }
409 else
410 {
411 map_size = rnd_pagesize (statb.st_size);
412 }
413 }
414 else
415 {
416 map_size = a->backing_mmap_size;
417 }
418
419 ASSERT (map_size <= rp->virtual_size
Dave Barachc3799992016-08-15 11:12:27 -0400420 - (MMAP_PAGESIZE
421 +
422 (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE)));
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400423
424 if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
425 MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
426 {
427 clib_unix_warning ("mmap");
428 close (fd);
429 return -3;
430 }
431 close (fd);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700432 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400433 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700434}
435
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400436u8 *
437shm_name_from_svm_map_region_args (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700438{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400439 u8 *shm_name;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400440 int root_path_offset = 0;
441 int name_offset = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700442
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400443 if (a->root_path)
444 {
445 /* Tolerate present or absent slashes */
446 if (a->root_path[0] == '/')
447 root_path_offset++;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700448
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400449 if (a->name[0] == '/')
450 name_offset = 1;
451
Matej Perinad135c192017-07-18 13:59:41 +0200452 shm_name = format (0, "/%s-%s%c", &a->root_path[root_path_offset],
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400453 &a->name[name_offset], 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700454 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400455 else
456 shm_name = format (0, "%s%c", a->name, 0);
457 return (shm_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700458}
459
Dave Barach59b25652017-09-10 15:04:27 -0400460void
461svm_region_init_mapped_region (svm_map_region_args_t * a, svm_region_t * rp)
462{
463 pthread_mutexattr_t attr;
464 pthread_condattr_t cattr;
465 int nbits, words, bit;
466 int overhead_space;
467 void *oldheap;
468 uword data_base;
469 ASSERT (rp);
470 int rv;
471
Dave Barachb7b92992018-10-17 10:38:51 -0400472 clib_memset (rp, 0, sizeof (*rp));
Dave Barach59b25652017-09-10 15:04:27 -0400473
474 if (pthread_mutexattr_init (&attr))
475 clib_unix_warning ("mutexattr_init");
476
477 if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED))
478 clib_unix_warning ("mutexattr_setpshared");
479
480 if (pthread_mutex_init (&rp->mutex, &attr))
481 clib_unix_warning ("mutex_init");
482
483 if (pthread_mutexattr_destroy (&attr))
484 clib_unix_warning ("mutexattr_destroy");
485
486 if (pthread_condattr_init (&cattr))
487 clib_unix_warning ("condattr_init");
488
489 if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED))
490 clib_unix_warning ("condattr_setpshared");
491
492 if (pthread_cond_init (&rp->condvar, &cattr))
493 clib_unix_warning ("cond_init");
494
495 if (pthread_condattr_destroy (&cattr))
496 clib_unix_warning ("condattr_destroy");
497
498 region_lock (rp, 1);
499
500 rp->virtual_base = a->baseva;
501 rp->virtual_size = a->size;
502
Dave Barach6a5adc32018-07-04 10:56:23 -0400503#if USE_DLMALLOC == 0
Dave Barach59b25652017-09-10 15:04:27 -0400504 rp->region_heap =
505 mheap_alloc_with_flags (uword_to_pointer
506 (a->baseva + MMAP_PAGESIZE, void *),
507 (a->pvt_heap_size !=
508 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE,
509 MHEAP_FLAG_DISABLE_VM);
Dave Barach6a5adc32018-07-04 10:56:23 -0400510#else
511 rp->region_heap = create_mspace_with_base
512 (uword_to_pointer (a->baseva + MMAP_PAGESIZE, void *),
513 (a->pvt_heap_size !=
514 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, 1 /* locked */ );
515
516 mspace_disable_expand (rp->region_heap);
517#endif
518
Dave Barach59b25652017-09-10 15:04:27 -0400519 oldheap = svm_push_pvt_heap (rp);
520
521 rp->region_name = (char *) format (0, "%s%c", a->name, 0);
522 vec_add1 (rp->client_pids, getpid ());
523
524 nbits = rp->virtual_size / MMAP_PAGESIZE;
525
526 ASSERT (nbits > 0);
527 rp->bitmap_size = nbits;
528 words = (nbits + BITS (uword) - 1) / BITS (uword);
529 vec_validate (rp->bitmap, words - 1);
530
531 overhead_space = MMAP_PAGESIZE /* header */ +
532 ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
533
534 bit = 0;
535 data_base = (uword) rp->virtual_base;
536
537 if (a->flags & SVM_FLAGS_NODATA)
538 rp->flags |= SVM_FLAGS_NEED_DATA_INIT;
539
540 do
541 {
542 clib_bitmap_set_no_check (rp->bitmap, bit, 1);
543 bit++;
544 overhead_space -= MMAP_PAGESIZE;
545 data_base += MMAP_PAGESIZE;
546 }
547 while (overhead_space > 0);
548
549 rp->data_base = (void *) data_base;
550
551 /*
552 * Note: although the POSIX spec guarantees that only one
553 * process enters this block, we have to play games
554 * to hold off clients until e.g. the mutex is ready
555 */
556 rp->version = SVM_VERSION;
557
558 /* setup the data portion of the region */
559
560 rv = svm_data_region_create (a, rp);
561 if (rv)
562 {
563 clib_warning ("data_region_create: %d", rv);
564 }
565
566 region_unlock (rp);
567
568 svm_pop_heap (oldheap);
569}
570
Ed Warnickecb9cada2015-12-08 15:45:58 -0700571/*
572 * svm_map_region
573 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400574void *
575svm_map_region (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700576{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400577 int svm_fd;
578 svm_region_t *rp;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400579 int deadman = 0;
580 u8 junk = 0;
581 void *oldheap;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400582 int rv;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400583 int pid_holding_region_lock;
584 u8 *shm_name;
585 int dead_region_recovery = 0;
586 int time_left;
587 struct stat stat;
588 struct timespec ts, tsrem;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700589
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400590 ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
591 ASSERT (a->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700592
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400593 shm_name = shm_name_from_svm_map_region_args (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700594
Dave Wallaced756b352017-07-03 13:11:38 -0400595 if (CLIB_DEBUG > 1)
596 clib_warning ("[%d] map region %s: shm_open (%s)",
597 getpid (), a->name, shm_name);
598
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400599 svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700600
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400601 if (svm_fd >= 0)
602 {
Dave Wallace19296112017-08-31 15:54:11 -0400603 if (fchmod (svm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0)
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400604 clib_unix_warning ("segment chmod");
605 /* This turns out to fail harmlessly if the client starts first */
606 if (fchown (svm_fd, a->uid, a->gid) < 0)
607 clib_unix_warning ("segment chown [ok if client starts first]");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700608
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400609 vec_free (shm_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700610
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400611 if (lseek (svm_fd, a->size, SEEK_SET) == (off_t) - 1)
612 {
613 clib_warning ("seek region size");
614 close (svm_fd);
615 return (0);
616 }
617 if (write (svm_fd, &junk, 1) != 1)
618 {
619 clib_warning ("set region size");
620 close (svm_fd);
621 return (0);
622 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700623
Damjan Marion7bee80c2017-04-26 15:32:12 +0200624 rp = mmap (uword_to_pointer (a->baseva, void *), a->size,
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400625 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700626
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400627 if (rp == (svm_region_t *) MAP_FAILED)
628 {
629 clib_unix_warning ("mmap create");
630 close (svm_fd);
631 return (0);
632 }
633 close (svm_fd);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700634
Dave Barach59b25652017-09-10 15:04:27 -0400635 svm_region_init_mapped_region (a, rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700636
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400637 return ((void *) rp);
638 }
639 else
640 {
641 svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700642
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400643 vec_free (shm_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700644
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400645 if (svm_fd < 0)
646 {
647 perror ("svm_region_map(mmap open)");
648 return (0);
649 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700650
Ole Troanc4f2ef72018-05-30 22:43:25 +0200651 /* Reset ownership in case the client started first */
652 if (fchown (svm_fd, a->uid, a->gid) < 0)
653 clib_unix_warning ("segment chown [ok if client starts first]");
654
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400655 time_left = 20;
656 while (1)
657 {
658 if (0 != fstat (svm_fd, &stat))
659 {
660 clib_warning ("fstat failed: %d", errno);
661 close (svm_fd);
662 return (0);
663 }
664 if (stat.st_size > 0)
665 {
666 break;
667 }
668 if (0 == time_left)
669 {
670 clib_warning ("waiting for resize of shm file timed out");
671 close (svm_fd);
672 return (0);
673 }
674 ts.tv_sec = 0;
675 ts.tv_nsec = 100000000;
676 while (nanosleep (&ts, &tsrem) < 0)
677 ts = tsrem;
678 time_left--;
679 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700680
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400681 rp = mmap (0, MMAP_PAGESIZE,
682 PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700683
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400684 if (rp == (svm_region_t *) MAP_FAILED)
685 {
686 close (svm_fd);
687 clib_warning ("mmap");
688 return (0);
689 }
690 /*
691 * We lost the footrace to create this region; make sure
692 * the winner has crossed the finish line.
693 */
694 while (rp->version == 0 && deadman++ < 5)
695 {
696 sleep (1);
697 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700698
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400699 /*
700 * <bleep>-ed?
701 */
702 if (rp->version == 0)
703 {
704 clib_warning ("rp->version %d not %d", rp->version, SVM_VERSION);
705 close (svm_fd);
706 munmap (rp, a->size);
707 return (0);
708 }
709 /* Remap now that the region has been placed */
710 a->baseva = rp->virtual_base;
711 a->size = rp->virtual_size;
712 munmap (rp, MMAP_PAGESIZE);
713
Damjan Marion7bee80c2017-04-26 15:32:12 +0200714 rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size,
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400715 PROT_READ | PROT_WRITE,
716 MAP_SHARED | MAP_FIXED, svm_fd, 0);
717 if ((uword) rp == (uword) MAP_FAILED)
718 {
719 clib_unix_warning ("mmap");
720 close (svm_fd);
721 return (0);
722 }
723
Dave Barachada24ea2018-05-24 17:32:00 -0400724 close (svm_fd);
725
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400726 if ((uword) rp != rp->virtual_base)
727 {
728 clib_warning ("mmap botch");
729 }
730
731 /*
732 * Try to fix the region mutex if it is held by
733 * a dead process
734 */
735 pid_holding_region_lock = rp->mutex_owner_pid;
736 if (pid_holding_region_lock && kill (pid_holding_region_lock, 0) < 0)
737 {
738 clib_warning
739 ("region %s mutex held by dead pid %d, tag %d, force unlock",
740 rp->region_name, pid_holding_region_lock, rp->mutex_owner_tag);
741 /* owner pid is nonexistent */
742 rp->mutex.__data.__owner = 0;
743 rp->mutex.__data.__lock = 0;
744 dead_region_recovery = 1;
745 }
746
747 if (dead_region_recovery)
748 clib_warning ("recovery: attempt to re-lock region");
749
750 region_lock (rp, 2);
751 oldheap = svm_push_pvt_heap (rp);
752 vec_add1 (rp->client_pids, getpid ());
753
754 if (dead_region_recovery)
755 clib_warning ("recovery: attempt svm_data_region_map");
756
757 rv = svm_data_region_map (a, rp);
758 if (rv)
759 {
760 clib_warning ("data_region_map: %d", rv);
761 }
762
763 if (dead_region_recovery)
764 clib_warning ("unlock and continue");
765
766 region_unlock (rp);
767
768 svm_pop_heap (oldheap);
769
770 return ((void *) rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700771
772 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400773 return 0; /* NOTREACHED */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700774}
775
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400776static void
777svm_mutex_cleanup (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700778{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400779 int i;
780 for (i = 0; i < nheld; i++)
781 {
782 pthread_mutex_unlock (mutexes_held[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700783 }
784}
785
Ole Troan3cdc25f2017-08-17 11:07:33 +0200786static int
Dave Barachb3d93da2016-08-03 14:34:38 -0400787svm_region_init_internal (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700788{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400789 svm_region_t *rp;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400790 u64 ticks = clib_cpu_time_now ();
791 uword randomize_baseva;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700792
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400793 /* guard against klutz calls */
794 if (root_rp)
Ole Troan3cdc25f2017-08-17 11:07:33 +0200795 return -1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700796
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400797 root_rp_refcount++;
Dave Barach16c75df2016-05-31 14:05:46 -0400798
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400799 atexit (svm_mutex_cleanup);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700800
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400801 /* Randomize the shared-VM base at init time */
802 if (MMAP_PAGESIZE <= (4 << 10))
803 randomize_baseva = (ticks & 15) * MMAP_PAGESIZE;
804 else
805 randomize_baseva = (ticks & 3) * MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700806
Dave Barachb3d93da2016-08-03 14:34:38 -0400807 a->baseva += randomize_baseva;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700808
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400809 rp = svm_map_region (a);
Ole Troan3cdc25f2017-08-17 11:07:33 +0200810 if (!rp)
811 return -1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700812
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400813 region_lock (rp, 3);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700814
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400815 /* Set up the main region data structures */
816 if (rp->flags & SVM_FLAGS_NEED_DATA_INIT)
817 {
818 svm_main_region_t *mp = 0;
819 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700820
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400821 rp->flags &= ~(SVM_FLAGS_NEED_DATA_INIT);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700822
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400823 oldheap = svm_push_pvt_heap (rp);
824 vec_validate (mp, 0);
825 mp->name_hash = hash_create_string (0, sizeof (uword));
Dave Barachb3d93da2016-08-03 14:34:38 -0400826 mp->root_path = a->root_path ? format (0, "%s%c", a->root_path, 0) : 0;
Dave Wallace19296112017-08-31 15:54:11 -0400827 mp->uid = a->uid;
828 mp->gid = a->gid;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400829 rp->data_base = mp;
830 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700831 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400832 region_unlock (rp);
833 root_rp = rp;
Ole Troan3cdc25f2017-08-17 11:07:33 +0200834
835 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700836}
837
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400838void
839svm_region_init (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700840{
Dave Barachb3d93da2016-08-03 14:34:38 -0400841 svm_map_region_args_t _a, *a = &_a;
Dave Barachc3799992016-08-15 11:12:27 -0400842
Dave Barachb7b92992018-10-17 10:38:51 -0400843 clib_memset (a, 0, sizeof (*a));
Dave Barachb3d93da2016-08-03 14:34:38 -0400844 a->root_path = 0;
845 a->name = SVM_GLOBAL_REGION_NAME;
Damjan Marionaec8f892018-01-08 16:35:35 +0100846 a->baseva = svm_get_global_region_base_va ();
Dave Barachb3d93da2016-08-03 14:34:38 -0400847 a->size = SVM_GLOBAL_REGION_SIZE;
848 a->flags = SVM_FLAGS_NODATA;
849 a->uid = 0;
850 a->gid = 0;
851
852 svm_region_init_internal (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700853}
854
Ole Troan3cdc25f2017-08-17 11:07:33 +0200855int
Neale Rannse72be392017-04-26 13:59:20 -0700856svm_region_init_chroot (const char *root_path)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700857{
Dave Barachb3d93da2016-08-03 14:34:38 -0400858 svm_map_region_args_t _a, *a = &_a;
Dave Barachc3799992016-08-15 11:12:27 -0400859
Dave Barachb7b92992018-10-17 10:38:51 -0400860 clib_memset (a, 0, sizeof (*a));
Dave Barachb3d93da2016-08-03 14:34:38 -0400861 a->root_path = root_path;
862 a->name = SVM_GLOBAL_REGION_NAME;
Damjan Marionaec8f892018-01-08 16:35:35 +0100863 a->baseva = svm_get_global_region_base_va ();
Dave Barachb3d93da2016-08-03 14:34:38 -0400864 a->size = SVM_GLOBAL_REGION_SIZE;
865 a->flags = SVM_FLAGS_NODATA;
866 a->uid = 0;
867 a->gid = 0;
868
Ole Troan3cdc25f2017-08-17 11:07:33 +0200869 return svm_region_init_internal (a);
Dave Barach16c75df2016-05-31 14:05:46 -0400870}
871
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400872void
Neale Rannse72be392017-04-26 13:59:20 -0700873svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid)
Dave Barach16c75df2016-05-31 14:05:46 -0400874{
Dave Barachb3d93da2016-08-03 14:34:38 -0400875 svm_map_region_args_t _a, *a = &_a;
Dave Barachc3799992016-08-15 11:12:27 -0400876
Dave Barachb7b92992018-10-17 10:38:51 -0400877 clib_memset (a, 0, sizeof (*a));
Dave Barachb3d93da2016-08-03 14:34:38 -0400878 a->root_path = root_path;
879 a->name = SVM_GLOBAL_REGION_NAME;
Damjan Marionaec8f892018-01-08 16:35:35 +0100880 a->baseva = svm_get_global_region_base_va ();
Dave Barachb3d93da2016-08-03 14:34:38 -0400881 a->size = SVM_GLOBAL_REGION_SIZE;
882 a->flags = SVM_FLAGS_NODATA;
883 a->uid = uid;
884 a->gid = gid;
885
886 svm_region_init_internal (a);
887}
888
889void
890svm_region_init_args (svm_map_region_args_t * a)
891{
892 svm_region_init_internal (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700893}
894
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400895void *
896svm_region_find_or_create (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700897{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400898 svm_main_region_t *mp;
899 svm_region_t *rp;
900 uword need_nbits;
901 int index, i;
902 void *oldheap;
903 uword *p;
904 u8 *name;
905 svm_subregion_t *subp;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700906
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400907 ASSERT (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700908
Dave Barachc3799992016-08-15 11:12:27 -0400909 a->size += MMAP_PAGESIZE +
Dave Barachb3d93da2016-08-03 14:34:38 -0400910 ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400911 a->size = rnd_pagesize (a->size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700912
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400913 region_lock (root_rp, 4);
914 oldheap = svm_push_pvt_heap (root_rp);
915 mp = root_rp->data_base;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700916
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400917 ASSERT (mp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700918
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400919 /* Map the named region from the correct chroot environment */
Jan Srnicek5beec812017-03-24 10:18:11 +0100920 if (a->root_path == NULL)
921 a->root_path = (char *) mp->root_path;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400922
923 /*
924 * See if this region is already known. If it is, we're
925 * almost done...
926 */
927 p = hash_get_mem (mp->name_hash, a->name);
928
929 if (p)
930 {
931 rp = svm_map_region (a);
932 region_unlock (root_rp);
933 svm_pop_heap (oldheap);
934 return rp;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700935 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700936
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400937 /* Create the region. */
938 ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700939
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400940 need_nbits = a->size / MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700941
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400942 index = 1; /* $$$ fixme, figure out how many bit to really skip */
943
944 /*
945 * Scan the virtual space allocation bitmap, looking for a large
946 * enough chunk
947 */
948 do
949 {
950 if (clib_bitmap_get_no_check (root_rp->bitmap, index) == 0)
951 {
952 for (i = 0; i < (need_nbits - 1); i++)
953 {
954 if (clib_bitmap_get_no_check (root_rp->bitmap, index + i) == 1)
955 {
956 index = index + i;
957 goto next;
958 }
959 }
960 break;
961 }
962 index++;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700963 next:;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700964 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400965 while (index < root_rp->bitmap_size);
966
967 /* Completely out of VM? */
968 if (index >= root_rp->bitmap_size)
969 {
Dave Barachb3d93da2016-08-03 14:34:38 -0400970 clib_warning ("region %s: not enough VM to allocate 0x%llx (%lld)",
971 root_rp->region_name, a->size, a->size);
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400972 svm_pop_heap (oldheap);
973 region_unlock (root_rp);
974 return 0;
975 }
976
977 /*
978 * Mark virtual space allocated
979 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700980#if CLIB_DEBUG > 1
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400981 clib_warning ("set %d bits at index %d", need_nbits, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700982#endif
983
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400984 for (i = 0; i < need_nbits; i++)
985 {
986 clib_bitmap_set_no_check (root_rp->bitmap, index + i, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700987 }
988
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400989 /* Place this region where it goes... */
990 a->baseva = root_rp->virtual_base + index * MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700991
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400992 rp = svm_map_region (a);
Dave Barachc3799992016-08-15 11:12:27 -0400993
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400994 pool_get (mp->subregions, subp);
995 name = format (0, "%s%c", a->name, 0);
996 subp->subregion_name = name;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700997
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400998 hash_set_mem (mp->name_hash, name, subp - mp->subregions);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700999
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001000 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001001
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001002 region_unlock (root_rp);
1003
1004 return (rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001005}
1006
Dave Wallaced756b352017-07-03 13:11:38 -04001007void
1008svm_region_unlink (svm_region_t * rp)
1009{
1010 svm_map_region_args_t _a, *a = &_a;
1011 svm_main_region_t *mp;
1012 u8 *shm_name;
1013
1014 ASSERT (root_rp);
1015 ASSERT (rp);
1016 ASSERT (vec_c_string_is_terminated (rp->region_name));
1017
1018 mp = root_rp->data_base;
1019 ASSERT (mp);
1020
1021 a->root_path = (char *) mp->root_path;
1022 a->name = rp->region_name;
1023 shm_name = shm_name_from_svm_map_region_args (a);
1024 if (CLIB_DEBUG > 1)
1025 clib_warning ("[%d] shm_unlink (%s)", getpid (), shm_name);
1026 shm_unlink ((const char *) shm_name);
1027 vec_free (shm_name);
1028}
1029
Ed Warnickecb9cada2015-12-08 15:45:58 -07001030/*
1031 * svm_region_unmap
1032 *
1033 * Let go of the indicated region. If the calling process
1034 * is the last customer, throw it away completely.
1035 * The root region mutex guarantees atomicity with respect to
1036 * a new region client showing up at the wrong moment.
1037 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001038void
Florin Corasd6c30d92018-01-29 05:11:24 -08001039svm_region_unmap_internal (void *rp_arg, u8 is_client)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001040{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001041 int i, mypid = getpid ();
1042 int nclients_left;
1043 void *oldheap;
1044 uword virtual_base, virtual_size;
1045 svm_region_t *rp = rp_arg;
1046 char *name;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001047
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001048 /*
1049 * If we take a signal while holding one or more shared-memory
1050 * mutexes, we may end up back here from an otherwise
1051 * benign exit handler. Bail out to avoid a recursive
1052 * mutex screw-up.
1053 */
1054 if (nheld)
1055 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001056
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001057 ASSERT (rp);
1058 ASSERT (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001059
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001060 if (CLIB_DEBUG > 1)
1061 clib_warning ("[%d] unmap region %s", getpid (), rp->region_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001062
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001063 region_lock (root_rp, 5);
1064 region_lock (rp, 6);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001065
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001066 oldheap = svm_push_pvt_heap (rp); /* nb vec_delete() in the loop */
1067
1068 /* Remove the caller from the list of mappers */
1069 for (i = 0; i < vec_len (rp->client_pids); i++)
1070 {
1071 if (rp->client_pids[i] == mypid)
1072 {
1073 vec_delete (rp->client_pids, 1, i);
1074 goto found;
1075 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001076 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001077 clib_warning ("pid %d AWOL", mypid);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001078
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001079found:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001080
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001081 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001082
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001083 nclients_left = vec_len (rp->client_pids);
1084 virtual_base = rp->virtual_base;
1085 virtual_size = rp->virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001086
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001087 if (nclients_left == 0)
1088 {
1089 int index, nbits, i;
1090 svm_main_region_t *mp;
1091 uword *p;
1092 svm_subregion_t *subp;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001093
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001094 /* Kill the region, last guy on his way out */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001095
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001096 oldheap = svm_push_pvt_heap (root_rp);
1097 name = vec_dup (rp->region_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001098
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001099 virtual_base = rp->virtual_base;
1100 virtual_size = rp->virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001101
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001102 /* Figure out which bits to clear in the root region bitmap */
1103 index = (virtual_base - root_rp->virtual_base) / MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001104
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001105 nbits = (virtual_size + MMAP_PAGESIZE - 1) / MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001106
1107#if CLIB_DEBUG > 1
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001108 clib_warning ("clear %d bits at index %d", nbits, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001109#endif
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001110 /* Give back the allocated VM */
1111 for (i = 0; i < nbits; i++)
1112 {
1113 clib_bitmap_set_no_check (root_rp->bitmap, index + i, 0);
1114 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001115
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001116 mp = root_rp->data_base;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001117
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001118 p = hash_get_mem (mp->name_hash, name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001119
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001120 /* Better never happen ... */
1121 if (p == NULL)
1122 {
1123 region_unlock (rp);
1124 region_unlock (root_rp);
1125 svm_pop_heap (oldheap);
1126 clib_warning ("Region name '%s' not found?", name);
1127 return;
1128 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001129
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001130 /* Remove from the root region subregion pool */
1131 subp = mp->subregions + p[0];
1132 pool_put (mp->subregions, subp);
1133
1134 hash_unset_mem (mp->name_hash, name);
1135
1136 vec_free (name);
1137
1138 region_unlock (rp);
Florin Corasd6c30d92018-01-29 05:11:24 -08001139
1140 /* If a client asks for the cleanup, don't unlink the backing
1141 * file since we can't tell if it has been recreated. */
1142 if (!is_client)
1143 svm_region_unlink (rp);
1144
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001145 munmap ((void *) virtual_base, virtual_size);
1146 region_unlock (root_rp);
1147 svm_pop_heap (oldheap);
1148 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001149 }
1150
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001151 region_unlock (rp);
1152 region_unlock (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001153
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001154 munmap ((void *) virtual_base, virtual_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001155}
1156
Florin Corasd6c30d92018-01-29 05:11:24 -08001157void
1158svm_region_unmap (void *rp_arg)
1159{
1160 svm_region_unmap_internal (rp_arg, 0 /* is_client */ );
1161}
1162
1163void
1164svm_region_unmap_client (void *rp_arg)
1165{
1166 svm_region_unmap_internal (rp_arg, 1 /* is_client */ );
1167}
1168
Ed Warnickecb9cada2015-12-08 15:45:58 -07001169/*
1170 * svm_region_exit
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001171 */
Florin Corasd6c30d92018-01-29 05:11:24 -08001172static void
1173svm_region_exit_internal (u8 is_client)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001174{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001175 void *oldheap;
1176 int i, mypid = getpid ();
1177 uword virtual_base, virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001178
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001179 /* It felt so nice we did it twice... */
1180 if (root_rp == 0)
1181 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001182
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001183 if (--root_rp_refcount > 0)
1184 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001185
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001186 /*
1187 * If we take a signal while holding one or more shared-memory
1188 * mutexes, we may end up back here from an otherwise
1189 * benign exit handler. Bail out to avoid a recursive
1190 * mutex screw-up.
1191 */
1192 if (nheld)
1193 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001194
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001195 region_lock (root_rp, 7);
1196 oldheap = svm_push_pvt_heap (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001197
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001198 virtual_base = root_rp->virtual_base;
1199 virtual_size = root_rp->virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001200
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001201 for (i = 0; i < vec_len (root_rp->client_pids); i++)
1202 {
1203 if (root_rp->client_pids[i] == mypid)
1204 {
1205 vec_delete (root_rp->client_pids, 1, i);
1206 goto found;
1207 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001208 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001209 clib_warning ("pid %d AWOL", mypid);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001210
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001211found:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001212
Florin Corasd6c30d92018-01-29 05:11:24 -08001213 if (!is_client && vec_len (root_rp->client_pids) == 0)
Dave Wallaced756b352017-07-03 13:11:38 -04001214 svm_region_unlink (root_rp);
1215
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001216 region_unlock (root_rp);
1217 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001218
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001219 root_rp = 0;
1220 munmap ((void *) virtual_base, virtual_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001221}
1222
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001223void
Florin Corasd6c30d92018-01-29 05:11:24 -08001224svm_region_exit (void)
1225{
1226 svm_region_exit_internal (0 /* is_client */ );
1227}
1228
1229void
1230svm_region_exit_client (void)
1231{
1232 svm_region_exit_internal (1 /* is_client */ );
1233}
1234
1235void
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001236svm_client_scan_this_region_nolock (svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001237{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001238 int j;
1239 int mypid = getpid ();
1240 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001241
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001242 for (j = 0; j < vec_len (rp->client_pids); j++)
1243 {
1244 if (mypid == rp->client_pids[j])
1245 continue;
1246 if (rp->client_pids[j] && (kill (rp->client_pids[j], 0) < 0))
1247 {
1248 clib_warning ("%s: cleanup ghost pid %d",
1249 rp->region_name, rp->client_pids[j]);
1250 /* nb: client vec in rp->region_heap */
1251 oldheap = svm_push_pvt_heap (rp);
1252 vec_delete (rp->client_pids, 1, j);
1253 j--;
1254 svm_pop_heap (oldheap);
1255 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001256 }
1257}
1258
1259
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001260/*
Ed Warnickecb9cada2015-12-08 15:45:58 -07001261 * Scan svm regions for dead clients
1262 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001263void
Neale Rannse72be392017-04-26 13:59:20 -07001264svm_client_scan (const char *root_path)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001265{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001266 int i, j;
1267 svm_main_region_t *mp;
1268 svm_map_region_args_t *a = 0;
1269 svm_region_t *root_rp;
1270 svm_region_t *rp;
1271 svm_subregion_t *subp;
1272 u8 *name = 0;
1273 u8 **svm_names = 0;
1274 void *oldheap;
1275 int mypid = getpid ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001276
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001277 vec_validate (a, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001278
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001279 svm_region_init_chroot (root_path);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001280
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001281 root_rp = svm_get_root_rp ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001282
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001283 pthread_mutex_lock (&root_rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001284
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001285 mp = root_rp->data_base;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001286
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001287 for (j = 0; j < vec_len (root_rp->client_pids); j++)
1288 {
1289 if (mypid == root_rp->client_pids[j])
1290 continue;
1291 if (root_rp->client_pids[j] && (kill (root_rp->client_pids[j], 0) < 0))
1292 {
1293 clib_warning ("%s: cleanup ghost pid %d",
1294 root_rp->region_name, root_rp->client_pids[j]);
1295 /* nb: client vec in root_rp->region_heap */
1296 oldheap = svm_push_pvt_heap (root_rp);
1297 vec_delete (root_rp->client_pids, 1, j);
1298 j--;
1299 svm_pop_heap (oldheap);
1300 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001301 }
1302
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001303 /*
1304 * Snapshoot names, can't hold root rp mutex across
1305 * find_or_create.
1306 */
1307 /* *INDENT-OFF* */
1308 pool_foreach (subp, mp->subregions, ({
1309 name = vec_dup (subp->subregion_name);
1310 vec_add1(svm_names, name);
1311 }));
1312 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001313
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001314 pthread_mutex_unlock (&root_rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001315
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001316 for (i = 0; i < vec_len (svm_names); i++)
1317 {
1318 vec_validate (a, 0);
1319 a->root_path = root_path;
1320 a->name = (char *) svm_names[i];
1321 rp = svm_region_find_or_create (a);
1322 if (rp)
1323 {
1324 pthread_mutex_lock (&rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001325
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001326 svm_client_scan_this_region_nolock (rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001327
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001328 pthread_mutex_unlock (&rp->mutex);
1329 svm_region_unmap (rp);
1330 vec_free (svm_names[i]);
1331 }
1332 vec_free (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001333 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001334 vec_free (svm_names);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001335
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001336 svm_region_exit ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001337
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001338 vec_free (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001339}
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001340
1341/*
1342 * fd.io coding-style-patch-verification: ON
1343 *
1344 * Local Variables:
1345 * eval: (c-set-style "gnu")
1346 * End:
1347 */