blob: cb421945deeb01dbdeb362dea0015acf0f92355e [file] [log] [blame]
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001/*
Ed Warnickecb9cada2015-12-08 15:45:58 -07002 *------------------------------------------------------------------
Dave Barach8a7fb0c2016-07-08 14:44:23 -04003 * svm.c - shared VM allocation, mmap(...MAP_FIXED...)
Ed Warnickecb9cada2015-12-08 15:45:58 -07004 * library
5 *
6 * Copyright (c) 2009 Cisco and/or its affiliates.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at:
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *------------------------------------------------------------------
19 */
20
21#include <stdio.h>
22#include <stdlib.h>
23#include <sys/types.h>
24#include <sys/mman.h>
25#include <sys/stat.h>
26#include <netinet/in.h>
27#include <signal.h>
28#include <pthread.h>
29#include <unistd.h>
30#include <time.h>
31#include <fcntl.h>
32#include <string.h>
33#include <vppinfra/clib.h>
34#include <vppinfra/vec.h>
35#include <vppinfra/hash.h>
36#include <vppinfra/bitmap.h>
37#include <vppinfra/fifo.h>
38#include <vppinfra/time.h>
39#include <vppinfra/mheap.h>
40#include <vppinfra/heap.h>
41#include <vppinfra/pool.h>
42#include <vppinfra/format.h>
43
44#include "svm.h"
45
46static svm_region_t *root_rp;
47static int root_rp_refcount;
48
49#define MAXLOCK 2
Dave Barach8a7fb0c2016-07-08 14:44:23 -040050static pthread_mutex_t *mutexes_held[MAXLOCK];
Ed Warnickecb9cada2015-12-08 15:45:58 -070051static int nheld;
52
Dave Barach8a7fb0c2016-07-08 14:44:23 -040053svm_region_t *
54svm_get_root_rp (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070055{
Dave Barach8a7fb0c2016-07-08 14:44:23 -040056 return root_rp;
Ed Warnickecb9cada2015-12-08 15:45:58 -070057}
58
59#define MUTEX_DEBUG
60
Damjan Marionaec8f892018-01-08 16:35:35 +010061u64
62svm_get_global_region_base_va ()
63{
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +020064#ifdef CLIB_SANITIZE_ADDR
65 return 0x200000000000;
66#endif
67
Damjan Marionaec8f892018-01-08 16:35:35 +010068#if __aarch64__
69 /* On AArch64 VA space can have different size, from 36 to 48 bits.
70 Here we are trying to detect VA bits by parsing /proc/self/maps
71 address ranges */
72 int fd;
73 unformat_input_t input;
74 u64 start, end = 0;
75 u8 bits = 0;
76
77 if ((fd = open ("/proc/self/maps", 0)) < 0)
78 clib_unix_error ("open '/proc/self/maps'");
79
80 unformat_init_clib_file (&input, fd);
81 while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
82 {
Gabriel Gannec5239ad2018-01-11 15:04:19 +010083 if (unformat (&input, "%llx-%llx", &start, &end))
84 end--;
Damjan Marionaec8f892018-01-08 16:35:35 +010085 unformat_skip_line (&input);
86 }
Gabriel Ganne83d47432018-01-10 11:40:50 +010087 unformat_free (&input);
88 close (fd);
Damjan Marionaec8f892018-01-08 16:35:35 +010089
Damjan Marion11056002018-05-10 13:40:44 +020090 bits = count_leading_zeros (end);
Gabriel Gannec5239ad2018-01-11 15:04:19 +010091 bits = 64 - bits;
Damjan Marionaec8f892018-01-08 16:35:35 +010092 if (bits >= 36 && bits <= 48)
93 return ((1ul << bits) / 4) - (2 * SVM_GLOBAL_REGION_SIZE);
94 else
95 clib_unix_error ("unexpected va bits '%u'", bits);
Damjan Marionaec8f892018-01-08 16:35:35 +010096#endif
97
98 /* default value */
Dave Barach9466c452018-08-24 17:21:14 -040099 return 0x130000000ULL;
Damjan Marionaec8f892018-01-08 16:35:35 +0100100}
101
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400102static void
103region_lock (svm_region_t * rp, int tag)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700104{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400105 pthread_mutex_lock (&rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106#ifdef MUTEX_DEBUG
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400107 rp->mutex_owner_pid = getpid ();
108 rp->mutex_owner_tag = tag;
109#endif
110 ASSERT (nheld < MAXLOCK);
111 /*
112 * Keep score of held mutexes so we can try to exit
113 * cleanly if the world comes to an end at the worst possible
114 * moment
115 */
116 mutexes_held[nheld++] = &rp->mutex;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700117}
118
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400119static void
120region_unlock (svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400122 int i, j;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123#ifdef MUTEX_DEBUG
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400124 rp->mutex_owner_pid = 0;
125 rp->mutex_owner_tag = 0;
126#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400128 for (i = nheld - 1; i >= 0; i--)
129 {
130 if (mutexes_held[i] == &rp->mutex)
131 {
132 for (j = i; j < MAXLOCK - 1; j++)
133 mutexes_held[j] = mutexes_held[j + 1];
134 nheld--;
135 goto found;
136 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400138 ASSERT (0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139
140found:
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400141 CLIB_MEMORY_BARRIER ();
142 pthread_mutex_unlock (&rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143}
144
145
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400146static u8 *
147format_svm_flags (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400149 uword f = va_arg (*args, uword);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400151 if (f & SVM_FLAGS_MHEAP)
152 s = format (s, "MHEAP ");
153 if (f & SVM_FLAGS_FILE)
154 s = format (s, "FILE ");
155 if (f & SVM_FLAGS_NODATA)
156 s = format (s, "NODATA ");
157 if (f & SVM_FLAGS_NEED_DATA_INIT)
158 s = format (s, "INIT ");
159
160 return (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161}
162
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400163static u8 *
164format_svm_size (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400166 uword size = va_arg (*args, uword);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400168 if (size >= (1 << 20))
169 {
170 s = format (s, "(%d mb)", size >> 20);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700171 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400172 else if (size >= (1 << 10))
173 {
174 s = format (s, "(%d kb)", size >> 10);
175 }
176 else
177 {
178 s = format (s, "(%d bytes)", size);
179 }
180 return (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700181}
182
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400183u8 *
184format_svm_region (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700185{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400186 svm_region_t *rp = va_arg (*args, svm_region_t *);
187 int verbose = va_arg (*args, int);
188 int i;
189 uword lo, hi;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700190
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400191 s = format (s, "%s: base va 0x%x size 0x%x %U\n",
192 rp->region_name, rp->virtual_base,
193 rp->virtual_size, format_svm_size, rp->virtual_size);
194 s = format (s, " user_ctx 0x%x, bitmap_size %d\n",
195 rp->user_ctx, rp->bitmap_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700196
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400197 if (verbose)
198 {
199 s = format (s, " flags: 0x%x %U\n", rp->flags,
200 format_svm_flags, rp->flags);
201 s = format (s,
202 " region_heap 0x%x data_base 0x%x data_heap 0x%x\n",
203 rp->region_heap, rp->data_base, rp->data_heap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700204 }
205
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400206 s = format (s, " %d clients, pids: ", vec_len (rp->client_pids));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700207
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400208 for (i = 0; i < vec_len (rp->client_pids); i++)
209 s = format (s, "%d ", rp->client_pids[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700210
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400211 s = format (s, "\n");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400213 if (verbose)
214 {
215 lo = hi = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400217 s = format (s, " VM in use: ");
218
219 for (i = 0; i < rp->bitmap_size; i++)
220 {
221 if (clib_bitmap_get_no_check (rp->bitmap, i) != 0)
222 {
223 if (lo == ~0)
224 {
225 hi = lo = rp->virtual_base + i * MMAP_PAGESIZE;
226 }
227 else
228 {
229 hi = rp->virtual_base + i * MMAP_PAGESIZE;
230 }
231 }
232 else
233 {
234 if (lo != ~0)
235 {
236 hi = rp->virtual_base + i * MMAP_PAGESIZE - 1;
237 s = format (s, " 0x%x - 0x%x (%dk)\n", lo, hi,
238 (hi - lo) >> 10);
239 lo = hi = ~0;
240 }
241 }
242 }
Dave Barach6a5adc32018-07-04 10:56:23 -0400243#if USE_DLMALLOC == 0
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400244 s = format (s, " rgn heap stats: %U", format_mheap,
245 rp->region_heap, 0);
246 if ((rp->flags & SVM_FLAGS_MHEAP) && rp->data_heap)
247 {
248 s = format (s, "\n data heap stats: %U", format_mheap,
249 rp->data_heap, 1);
250 }
251 s = format (s, "\n");
Dave Barach6a5adc32018-07-04 10:56:23 -0400252#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253 }
254
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400255 return (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256}
257
258/*
259 * rnd_pagesize
260 * Round to a pagesize multiple, presumably 4k works
261 */
Dave Barachb3d93da2016-08-03 14:34:38 -0400262static u64
263rnd_pagesize (u64 size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700264{
Dave Barachb3d93da2016-08-03 14:34:38 -0400265 u64 rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400267 rv = (size + (MMAP_PAGESIZE - 1)) & ~(MMAP_PAGESIZE - 1);
268 return (rv);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700269}
270
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400271/*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272 * svm_data_region_setup
273 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400274static int
275svm_data_region_create (svm_map_region_args_t * a, svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400277 int fd;
278 u8 junk = 0;
279 uword map_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700280
Dave Barachc3799992016-08-15 11:12:27 -0400281 map_size = rp->virtual_size - (MMAP_PAGESIZE +
282 (a->pvt_heap_size ? a->pvt_heap_size :
283 SVM_PVT_MHEAP_SIZE));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700284
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400285 if (a->flags & SVM_FLAGS_FILE)
286 {
287 struct stat statb;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700288
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400289 fd = open (a->backing_file, O_RDWR | O_CREAT, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400291 if (fd < 0)
292 {
293 clib_unix_warning ("open");
294 return -1;
295 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400297 if (fstat (fd, &statb) < 0)
298 {
299 clib_unix_warning ("fstat");
300 close (fd);
301 return -2;
302 }
303
304 if (statb.st_mode & S_IFREG)
305 {
306 if (statb.st_size == 0)
307 {
308 if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
309 {
310 clib_unix_warning ("seek region size");
311 close (fd);
312 return -3;
313 }
314 if (write (fd, &junk, 1) != 1)
315 {
316 clib_unix_warning ("set region size");
317 close (fd);
318 return -3;
319 }
320 }
321 else
322 {
323 map_size = rnd_pagesize (statb.st_size);
324 }
325 }
326 else
327 {
328 map_size = a->backing_mmap_size;
329 }
330
331 ASSERT (map_size <= rp->virtual_size -
332 (MMAP_PAGESIZE + SVM_PVT_MHEAP_SIZE));
333
334 if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
335 MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
336 {
337 clib_unix_warning ("mmap");
338 close (fd);
339 return -3;
340 }
341 close (fd);
342 rp->backing_file = (char *) format (0, "%s\0", a->backing_file);
343 rp->flags |= SVM_FLAGS_FILE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700344 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400345
346 if (a->flags & SVM_FLAGS_MHEAP)
347 {
Dave Barach6a5adc32018-07-04 10:56:23 -0400348#if USE_DLMALLOC == 0
Ole Troan73710c72018-06-04 22:27:49 +0200349 mheap_t *heap_header;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400350 rp->data_heap =
351 mheap_alloc_with_flags ((void *) (rp->data_base), map_size,
352 MHEAP_FLAG_DISABLE_VM);
Ole Troan73710c72018-06-04 22:27:49 +0200353 heap_header = mheap_header (rp->data_heap);
354 heap_header->flags |= MHEAP_FLAG_THREAD_SAFE;
Dave Barach6a5adc32018-07-04 10:56:23 -0400355#else
356 rp->data_heap = create_mspace_with_base (rp->data_base,
357 map_size, 1 /* locked */ );
358 mspace_disable_expand (rp->data_heap);
359#endif
Ole Troan73710c72018-06-04 22:27:49 +0200360
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400361 rp->flags |= SVM_FLAGS_MHEAP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700362 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400363 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700364}
365
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400366static int
367svm_data_region_map (svm_map_region_args_t * a, svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700368{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400369 int fd;
370 u8 junk = 0;
371 uword map_size;
372 struct stat statb;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700373
Dave Barachc3799992016-08-15 11:12:27 -0400374 map_size = rp->virtual_size -
375 (MMAP_PAGESIZE
Dave Barachb3d93da2016-08-03 14:34:38 -0400376 + (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700377
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400378 if (a->flags & SVM_FLAGS_FILE)
379 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700380
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400381 fd = open (a->backing_file, O_RDWR, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700382
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400383 if (fd < 0)
384 {
385 clib_unix_warning ("open");
386 return -1;
387 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700388
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400389 if (fstat (fd, &statb) < 0)
390 {
391 clib_unix_warning ("fstat");
392 close (fd);
393 return -2;
394 }
395
396 if (statb.st_mode & S_IFREG)
397 {
398 if (statb.st_size == 0)
399 {
400 if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
401 {
402 clib_unix_warning ("seek region size");
403 close (fd);
404 return -3;
405 }
406 if (write (fd, &junk, 1) != 1)
407 {
408 clib_unix_warning ("set region size");
409 close (fd);
410 return -3;
411 }
412 }
413 else
414 {
415 map_size = rnd_pagesize (statb.st_size);
416 }
417 }
418 else
419 {
420 map_size = a->backing_mmap_size;
421 }
422
423 ASSERT (map_size <= rp->virtual_size
Dave Barachc3799992016-08-15 11:12:27 -0400424 - (MMAP_PAGESIZE
425 +
426 (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE)));
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400427
428 if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
429 MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
430 {
431 clib_unix_warning ("mmap");
432 close (fd);
433 return -3;
434 }
435 close (fd);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700436 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400437 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700438}
439
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400440u8 *
441shm_name_from_svm_map_region_args (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700442{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400443 u8 *shm_name;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400444 int root_path_offset = 0;
445 int name_offset = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700446
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400447 if (a->root_path)
448 {
449 /* Tolerate present or absent slashes */
450 if (a->root_path[0] == '/')
451 root_path_offset++;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700452
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400453 if (a->name[0] == '/')
454 name_offset = 1;
455
Matej Perinad135c192017-07-18 13:59:41 +0200456 shm_name = format (0, "/%s-%s%c", &a->root_path[root_path_offset],
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400457 &a->name[name_offset], 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700458 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400459 else
460 shm_name = format (0, "%s%c", a->name, 0);
461 return (shm_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700462}
463
Dave Barach59b25652017-09-10 15:04:27 -0400464void
465svm_region_init_mapped_region (svm_map_region_args_t * a, svm_region_t * rp)
466{
467 pthread_mutexattr_t attr;
468 pthread_condattr_t cattr;
469 int nbits, words, bit;
470 int overhead_space;
471 void *oldheap;
472 uword data_base;
473 ASSERT (rp);
474 int rv;
475
Dave Barachb7b92992018-10-17 10:38:51 -0400476 clib_memset (rp, 0, sizeof (*rp));
Dave Barach59b25652017-09-10 15:04:27 -0400477
478 if (pthread_mutexattr_init (&attr))
479 clib_unix_warning ("mutexattr_init");
480
481 if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED))
482 clib_unix_warning ("mutexattr_setpshared");
483
484 if (pthread_mutex_init (&rp->mutex, &attr))
485 clib_unix_warning ("mutex_init");
486
487 if (pthread_mutexattr_destroy (&attr))
488 clib_unix_warning ("mutexattr_destroy");
489
490 if (pthread_condattr_init (&cattr))
491 clib_unix_warning ("condattr_init");
492
493 if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED))
494 clib_unix_warning ("condattr_setpshared");
495
496 if (pthread_cond_init (&rp->condvar, &cattr))
497 clib_unix_warning ("cond_init");
498
499 if (pthread_condattr_destroy (&cattr))
500 clib_unix_warning ("condattr_destroy");
501
502 region_lock (rp, 1);
503
504 rp->virtual_base = a->baseva;
505 rp->virtual_size = a->size;
506
Dave Barach6a5adc32018-07-04 10:56:23 -0400507#if USE_DLMALLOC == 0
Dave Barach59b25652017-09-10 15:04:27 -0400508 rp->region_heap =
509 mheap_alloc_with_flags (uword_to_pointer
510 (a->baseva + MMAP_PAGESIZE, void *),
511 (a->pvt_heap_size !=
512 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE,
513 MHEAP_FLAG_DISABLE_VM);
Dave Barach6a5adc32018-07-04 10:56:23 -0400514#else
515 rp->region_heap = create_mspace_with_base
516 (uword_to_pointer (a->baseva + MMAP_PAGESIZE, void *),
517 (a->pvt_heap_size !=
518 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, 1 /* locked */ );
519
520 mspace_disable_expand (rp->region_heap);
521#endif
522
Dave Barach59b25652017-09-10 15:04:27 -0400523 oldheap = svm_push_pvt_heap (rp);
524
525 rp->region_name = (char *) format (0, "%s%c", a->name, 0);
526 vec_add1 (rp->client_pids, getpid ());
527
528 nbits = rp->virtual_size / MMAP_PAGESIZE;
529
530 ASSERT (nbits > 0);
531 rp->bitmap_size = nbits;
532 words = (nbits + BITS (uword) - 1) / BITS (uword);
533 vec_validate (rp->bitmap, words - 1);
534
535 overhead_space = MMAP_PAGESIZE /* header */ +
536 ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
537
538 bit = 0;
539 data_base = (uword) rp->virtual_base;
540
541 if (a->flags & SVM_FLAGS_NODATA)
542 rp->flags |= SVM_FLAGS_NEED_DATA_INIT;
543
544 do
545 {
546 clib_bitmap_set_no_check (rp->bitmap, bit, 1);
547 bit++;
548 overhead_space -= MMAP_PAGESIZE;
549 data_base += MMAP_PAGESIZE;
550 }
551 while (overhead_space > 0);
552
553 rp->data_base = (void *) data_base;
554
555 /*
556 * Note: although the POSIX spec guarantees that only one
557 * process enters this block, we have to play games
558 * to hold off clients until e.g. the mutex is ready
559 */
560 rp->version = SVM_VERSION;
561
562 /* setup the data portion of the region */
563
564 rv = svm_data_region_create (a, rp);
565 if (rv)
566 {
567 clib_warning ("data_region_create: %d", rv);
568 }
569
570 region_unlock (rp);
571
572 svm_pop_heap (oldheap);
573}
574
Ed Warnickecb9cada2015-12-08 15:45:58 -0700575/*
576 * svm_map_region
577 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400578void *
579svm_map_region (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700580{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400581 int svm_fd;
582 svm_region_t *rp;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400583 int deadman = 0;
584 u8 junk = 0;
585 void *oldheap;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400586 int rv;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400587 int pid_holding_region_lock;
588 u8 *shm_name;
589 int dead_region_recovery = 0;
590 int time_left;
591 struct stat stat;
592 struct timespec ts, tsrem;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700593
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400594 ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
595 ASSERT (a->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700596
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400597 shm_name = shm_name_from_svm_map_region_args (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700598
Dave Barach39d69112019-11-27 11:42:13 -0500599 if (1 || CLIB_DEBUG > 1)
Dave Wallaced756b352017-07-03 13:11:38 -0400600 clib_warning ("[%d] map region %s: shm_open (%s)",
601 getpid (), a->name, shm_name);
602
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400603 svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700604
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400605 if (svm_fd >= 0)
606 {
Dave Wallace19296112017-08-31 15:54:11 -0400607 if (fchmod (svm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0)
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400608 clib_unix_warning ("segment chmod");
609 /* This turns out to fail harmlessly if the client starts first */
610 if (fchown (svm_fd, a->uid, a->gid) < 0)
611 clib_unix_warning ("segment chown [ok if client starts first]");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700612
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400613 vec_free (shm_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700614
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400615 if (lseek (svm_fd, a->size, SEEK_SET) == (off_t) - 1)
616 {
617 clib_warning ("seek region size");
618 close (svm_fd);
619 return (0);
620 }
621 if (write (svm_fd, &junk, 1) != 1)
622 {
623 clib_warning ("set region size");
624 close (svm_fd);
625 return (0);
626 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700627
Damjan Marion7bee80c2017-04-26 15:32:12 +0200628 rp = mmap (uword_to_pointer (a->baseva, void *), a->size,
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400629 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700630
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400631 if (rp == (svm_region_t *) MAP_FAILED)
632 {
633 clib_unix_warning ("mmap create");
634 close (svm_fd);
635 return (0);
636 }
637 close (svm_fd);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700638
Dave Barach59b25652017-09-10 15:04:27 -0400639 svm_region_init_mapped_region (a, rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700640
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400641 return ((void *) rp);
642 }
643 else
644 {
645 svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700646
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400647 vec_free (shm_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700648
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400649 if (svm_fd < 0)
650 {
651 perror ("svm_region_map(mmap open)");
652 return (0);
653 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700654
Ole Troanc4f2ef72018-05-30 22:43:25 +0200655 /* Reset ownership in case the client started first */
656 if (fchown (svm_fd, a->uid, a->gid) < 0)
657 clib_unix_warning ("segment chown [ok if client starts first]");
658
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400659 time_left = 20;
660 while (1)
661 {
662 if (0 != fstat (svm_fd, &stat))
663 {
664 clib_warning ("fstat failed: %d", errno);
665 close (svm_fd);
666 return (0);
667 }
668 if (stat.st_size > 0)
669 {
670 break;
671 }
672 if (0 == time_left)
673 {
674 clib_warning ("waiting for resize of shm file timed out");
675 close (svm_fd);
676 return (0);
677 }
678 ts.tv_sec = 0;
679 ts.tv_nsec = 100000000;
680 while (nanosleep (&ts, &tsrem) < 0)
681 ts = tsrem;
682 time_left--;
683 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700684
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400685 rp = mmap (0, MMAP_PAGESIZE,
686 PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700687
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400688 if (rp == (svm_region_t *) MAP_FAILED)
689 {
690 close (svm_fd);
691 clib_warning ("mmap");
692 return (0);
693 }
694 /*
695 * We lost the footrace to create this region; make sure
696 * the winner has crossed the finish line.
697 */
698 while (rp->version == 0 && deadman++ < 5)
699 {
700 sleep (1);
701 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700702
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400703 /*
704 * <bleep>-ed?
705 */
706 if (rp->version == 0)
707 {
708 clib_warning ("rp->version %d not %d", rp->version, SVM_VERSION);
709 close (svm_fd);
710 munmap (rp, a->size);
711 return (0);
712 }
713 /* Remap now that the region has been placed */
714 a->baseva = rp->virtual_base;
715 a->size = rp->virtual_size;
716 munmap (rp, MMAP_PAGESIZE);
717
Damjan Marion7bee80c2017-04-26 15:32:12 +0200718 rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size,
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400719 PROT_READ | PROT_WRITE,
720 MAP_SHARED | MAP_FIXED, svm_fd, 0);
721 if ((uword) rp == (uword) MAP_FAILED)
722 {
723 clib_unix_warning ("mmap");
724 close (svm_fd);
725 return (0);
726 }
727
Dave Barachada24ea2018-05-24 17:32:00 -0400728 close (svm_fd);
729
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400730 if ((uword) rp != rp->virtual_base)
731 {
732 clib_warning ("mmap botch");
733 }
734
735 /*
736 * Try to fix the region mutex if it is held by
737 * a dead process
738 */
739 pid_holding_region_lock = rp->mutex_owner_pid;
740 if (pid_holding_region_lock && kill (pid_holding_region_lock, 0) < 0)
741 {
742 clib_warning
743 ("region %s mutex held by dead pid %d, tag %d, force unlock",
744 rp->region_name, pid_holding_region_lock, rp->mutex_owner_tag);
745 /* owner pid is nonexistent */
746 rp->mutex.__data.__owner = 0;
747 rp->mutex.__data.__lock = 0;
748 dead_region_recovery = 1;
749 }
750
751 if (dead_region_recovery)
752 clib_warning ("recovery: attempt to re-lock region");
753
754 region_lock (rp, 2);
755 oldheap = svm_push_pvt_heap (rp);
756 vec_add1 (rp->client_pids, getpid ());
757
758 if (dead_region_recovery)
759 clib_warning ("recovery: attempt svm_data_region_map");
760
761 rv = svm_data_region_map (a, rp);
762 if (rv)
763 {
764 clib_warning ("data_region_map: %d", rv);
765 }
766
767 if (dead_region_recovery)
768 clib_warning ("unlock and continue");
769
770 region_unlock (rp);
771
772 svm_pop_heap (oldheap);
773
774 return ((void *) rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700775
776 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400777 return 0; /* NOTREACHED */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700778}
779
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400780static void
781svm_mutex_cleanup (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700782{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400783 int i;
784 for (i = 0; i < nheld; i++)
785 {
786 pthread_mutex_unlock (mutexes_held[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700787 }
788}
789
Ole Troan3cdc25f2017-08-17 11:07:33 +0200790static int
Dave Barachb3d93da2016-08-03 14:34:38 -0400791svm_region_init_internal (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700792{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400793 svm_region_t *rp;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400794 u64 ticks = clib_cpu_time_now ();
795 uword randomize_baseva;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700796
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400797 /* guard against klutz calls */
798 if (root_rp)
Ole Troan3cdc25f2017-08-17 11:07:33 +0200799 return -1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700800
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400801 root_rp_refcount++;
Dave Barach16c75df2016-05-31 14:05:46 -0400802
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400803 atexit (svm_mutex_cleanup);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700804
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400805 /* Randomize the shared-VM base at init time */
806 if (MMAP_PAGESIZE <= (4 << 10))
807 randomize_baseva = (ticks & 15) * MMAP_PAGESIZE;
808 else
809 randomize_baseva = (ticks & 3) * MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700810
Dave Barachb3d93da2016-08-03 14:34:38 -0400811 a->baseva += randomize_baseva;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700812
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400813 rp = svm_map_region (a);
Ole Troan3cdc25f2017-08-17 11:07:33 +0200814 if (!rp)
815 return -1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700816
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400817 region_lock (rp, 3);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700818
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400819 /* Set up the main region data structures */
820 if (rp->flags & SVM_FLAGS_NEED_DATA_INIT)
821 {
822 svm_main_region_t *mp = 0;
823 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700824
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400825 rp->flags &= ~(SVM_FLAGS_NEED_DATA_INIT);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700826
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400827 oldheap = svm_push_pvt_heap (rp);
828 vec_validate (mp, 0);
829 mp->name_hash = hash_create_string (0, sizeof (uword));
Dave Barachb3d93da2016-08-03 14:34:38 -0400830 mp->root_path = a->root_path ? format (0, "%s%c", a->root_path, 0) : 0;
Dave Wallace19296112017-08-31 15:54:11 -0400831 mp->uid = a->uid;
832 mp->gid = a->gid;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400833 rp->data_base = mp;
834 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700835 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400836 region_unlock (rp);
837 root_rp = rp;
Ole Troan3cdc25f2017-08-17 11:07:33 +0200838
839 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700840}
841
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400842void
843svm_region_init (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700844{
Dave Barachb3d93da2016-08-03 14:34:38 -0400845 svm_map_region_args_t _a, *a = &_a;
Dave Barachc3799992016-08-15 11:12:27 -0400846
Dave Barachb7b92992018-10-17 10:38:51 -0400847 clib_memset (a, 0, sizeof (*a));
Dave Barachb3d93da2016-08-03 14:34:38 -0400848 a->root_path = 0;
849 a->name = SVM_GLOBAL_REGION_NAME;
Damjan Marionaec8f892018-01-08 16:35:35 +0100850 a->baseva = svm_get_global_region_base_va ();
Dave Barachb3d93da2016-08-03 14:34:38 -0400851 a->size = SVM_GLOBAL_REGION_SIZE;
852 a->flags = SVM_FLAGS_NODATA;
853 a->uid = 0;
854 a->gid = 0;
855
856 svm_region_init_internal (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700857}
858
Ole Troan3cdc25f2017-08-17 11:07:33 +0200859int
Neale Rannse72be392017-04-26 13:59:20 -0700860svm_region_init_chroot (const char *root_path)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700861{
Dave Barachb3d93da2016-08-03 14:34:38 -0400862 svm_map_region_args_t _a, *a = &_a;
Dave Barachc3799992016-08-15 11:12:27 -0400863
Dave Barachb7b92992018-10-17 10:38:51 -0400864 clib_memset (a, 0, sizeof (*a));
Dave Barachb3d93da2016-08-03 14:34:38 -0400865 a->root_path = root_path;
866 a->name = SVM_GLOBAL_REGION_NAME;
Damjan Marionaec8f892018-01-08 16:35:35 +0100867 a->baseva = svm_get_global_region_base_va ();
Dave Barachb3d93da2016-08-03 14:34:38 -0400868 a->size = SVM_GLOBAL_REGION_SIZE;
869 a->flags = SVM_FLAGS_NODATA;
870 a->uid = 0;
871 a->gid = 0;
872
Ole Troan3cdc25f2017-08-17 11:07:33 +0200873 return svm_region_init_internal (a);
Dave Barach16c75df2016-05-31 14:05:46 -0400874}
875
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400876void
Neale Rannse72be392017-04-26 13:59:20 -0700877svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid)
Dave Barach16c75df2016-05-31 14:05:46 -0400878{
Dave Barachb3d93da2016-08-03 14:34:38 -0400879 svm_map_region_args_t _a, *a = &_a;
Dave Barachc3799992016-08-15 11:12:27 -0400880
Dave Barachb7b92992018-10-17 10:38:51 -0400881 clib_memset (a, 0, sizeof (*a));
Dave Barachb3d93da2016-08-03 14:34:38 -0400882 a->root_path = root_path;
883 a->name = SVM_GLOBAL_REGION_NAME;
Damjan Marionaec8f892018-01-08 16:35:35 +0100884 a->baseva = svm_get_global_region_base_va ();
Dave Barachb3d93da2016-08-03 14:34:38 -0400885 a->size = SVM_GLOBAL_REGION_SIZE;
886 a->flags = SVM_FLAGS_NODATA;
887 a->uid = uid;
888 a->gid = gid;
889
890 svm_region_init_internal (a);
891}
892
893void
894svm_region_init_args (svm_map_region_args_t * a)
895{
896 svm_region_init_internal (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700897}
898
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400899void *
900svm_region_find_or_create (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700901{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400902 svm_main_region_t *mp;
903 svm_region_t *rp;
904 uword need_nbits;
905 int index, i;
906 void *oldheap;
907 uword *p;
908 u8 *name;
909 svm_subregion_t *subp;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700910
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400911 ASSERT (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700912
Dave Barachc3799992016-08-15 11:12:27 -0400913 a->size += MMAP_PAGESIZE +
Dave Barachb3d93da2016-08-03 14:34:38 -0400914 ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400915 a->size = rnd_pagesize (a->size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700916
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400917 region_lock (root_rp, 4);
918 oldheap = svm_push_pvt_heap (root_rp);
919 mp = root_rp->data_base;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700920
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400921 ASSERT (mp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700922
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400923 /* Map the named region from the correct chroot environment */
Jan Srnicek5beec812017-03-24 10:18:11 +0100924 if (a->root_path == NULL)
925 a->root_path = (char *) mp->root_path;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400926
927 /*
928 * See if this region is already known. If it is, we're
929 * almost done...
930 */
931 p = hash_get_mem (mp->name_hash, a->name);
932
933 if (p)
934 {
935 rp = svm_map_region (a);
936 region_unlock (root_rp);
937 svm_pop_heap (oldheap);
938 return rp;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700939 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700940
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400941 /* Create the region. */
942 ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700943
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400944 need_nbits = a->size / MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700945
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400946 index = 1; /* $$$ fixme, figure out how many bit to really skip */
947
948 /*
949 * Scan the virtual space allocation bitmap, looking for a large
950 * enough chunk
951 */
952 do
953 {
954 if (clib_bitmap_get_no_check (root_rp->bitmap, index) == 0)
955 {
956 for (i = 0; i < (need_nbits - 1); i++)
957 {
958 if (clib_bitmap_get_no_check (root_rp->bitmap, index + i) == 1)
959 {
960 index = index + i;
961 goto next;
962 }
963 }
964 break;
965 }
966 index++;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700967 next:;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700968 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400969 while (index < root_rp->bitmap_size);
970
971 /* Completely out of VM? */
972 if (index >= root_rp->bitmap_size)
973 {
Dave Barachb3d93da2016-08-03 14:34:38 -0400974 clib_warning ("region %s: not enough VM to allocate 0x%llx (%lld)",
975 root_rp->region_name, a->size, a->size);
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400976 svm_pop_heap (oldheap);
977 region_unlock (root_rp);
978 return 0;
979 }
980
981 /*
982 * Mark virtual space allocated
983 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700984#if CLIB_DEBUG > 1
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400985 clib_warning ("set %d bits at index %d", need_nbits, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700986#endif
987
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400988 for (i = 0; i < need_nbits; i++)
989 {
990 clib_bitmap_set_no_check (root_rp->bitmap, index + i, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700991 }
992
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400993 /* Place this region where it goes... */
994 a->baseva = root_rp->virtual_base + index * MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700995
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400996 rp = svm_map_region (a);
Dave Barachc3799992016-08-15 11:12:27 -0400997
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400998 pool_get (mp->subregions, subp);
999 name = format (0, "%s%c", a->name, 0);
1000 subp->subregion_name = name;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001001
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001002 hash_set_mem (mp->name_hash, name, subp - mp->subregions);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001003
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001004 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001005
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001006 region_unlock (root_rp);
1007
1008 return (rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001009}
1010
Dave Wallaced756b352017-07-03 13:11:38 -04001011void
1012svm_region_unlink (svm_region_t * rp)
1013{
1014 svm_map_region_args_t _a, *a = &_a;
1015 svm_main_region_t *mp;
1016 u8 *shm_name;
1017
1018 ASSERT (root_rp);
1019 ASSERT (rp);
1020 ASSERT (vec_c_string_is_terminated (rp->region_name));
1021
1022 mp = root_rp->data_base;
1023 ASSERT (mp);
1024
1025 a->root_path = (char *) mp->root_path;
1026 a->name = rp->region_name;
1027 shm_name = shm_name_from_svm_map_region_args (a);
1028 if (CLIB_DEBUG > 1)
1029 clib_warning ("[%d] shm_unlink (%s)", getpid (), shm_name);
1030 shm_unlink ((const char *) shm_name);
1031 vec_free (shm_name);
1032}
1033
Ed Warnickecb9cada2015-12-08 15:45:58 -07001034/*
1035 * svm_region_unmap
1036 *
1037 * Let go of the indicated region. If the calling process
1038 * is the last customer, throw it away completely.
1039 * The root region mutex guarantees atomicity with respect to
1040 * a new region client showing up at the wrong moment.
1041 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001042void
Florin Corasd6c30d92018-01-29 05:11:24 -08001043svm_region_unmap_internal (void *rp_arg, u8 is_client)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001044{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001045 int i, mypid = getpid ();
1046 int nclients_left;
1047 void *oldheap;
1048 uword virtual_base, virtual_size;
1049 svm_region_t *rp = rp_arg;
1050 char *name;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001051
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001052 /*
1053 * If we take a signal while holding one or more shared-memory
1054 * mutexes, we may end up back here from an otherwise
1055 * benign exit handler. Bail out to avoid a recursive
1056 * mutex screw-up.
1057 */
1058 if (nheld)
1059 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001060
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001061 ASSERT (rp);
1062 ASSERT (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001063
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001064 if (CLIB_DEBUG > 1)
1065 clib_warning ("[%d] unmap region %s", getpid (), rp->region_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001066
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001067 region_lock (root_rp, 5);
1068 region_lock (rp, 6);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001069
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001070 oldheap = svm_push_pvt_heap (rp); /* nb vec_delete() in the loop */
1071
1072 /* Remove the caller from the list of mappers */
1073 for (i = 0; i < vec_len (rp->client_pids); i++)
1074 {
1075 if (rp->client_pids[i] == mypid)
1076 {
1077 vec_delete (rp->client_pids, 1, i);
1078 goto found;
1079 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001080 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001081 clib_warning ("pid %d AWOL", mypid);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001082
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001083found:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001084
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001085 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001086
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001087 nclients_left = vec_len (rp->client_pids);
1088 virtual_base = rp->virtual_base;
1089 virtual_size = rp->virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001090
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001091 if (nclients_left == 0)
1092 {
1093 int index, nbits, i;
1094 svm_main_region_t *mp;
1095 uword *p;
1096 svm_subregion_t *subp;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001097
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001098 /* Kill the region, last guy on his way out */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001099
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001100 oldheap = svm_push_pvt_heap (root_rp);
1101 name = vec_dup (rp->region_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001102
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001103 virtual_base = rp->virtual_base;
1104 virtual_size = rp->virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001105
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001106 /* Figure out which bits to clear in the root region bitmap */
1107 index = (virtual_base - root_rp->virtual_base) / MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001108
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001109 nbits = (virtual_size + MMAP_PAGESIZE - 1) / MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001110
1111#if CLIB_DEBUG > 1
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001112 clib_warning ("clear %d bits at index %d", nbits, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001113#endif
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001114 /* Give back the allocated VM */
1115 for (i = 0; i < nbits; i++)
1116 {
1117 clib_bitmap_set_no_check (root_rp->bitmap, index + i, 0);
1118 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001119
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001120 mp = root_rp->data_base;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001121
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001122 p = hash_get_mem (mp->name_hash, name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001123
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001124 /* Better never happen ... */
1125 if (p == NULL)
1126 {
1127 region_unlock (rp);
1128 region_unlock (root_rp);
1129 svm_pop_heap (oldheap);
1130 clib_warning ("Region name '%s' not found?", name);
1131 return;
1132 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001133
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001134 /* Remove from the root region subregion pool */
1135 subp = mp->subregions + p[0];
1136 pool_put (mp->subregions, subp);
1137
1138 hash_unset_mem (mp->name_hash, name);
1139
1140 vec_free (name);
1141
1142 region_unlock (rp);
Florin Corasd6c30d92018-01-29 05:11:24 -08001143
1144 /* If a client asks for the cleanup, don't unlink the backing
1145 * file since we can't tell if it has been recreated. */
1146 if (!is_client)
1147 svm_region_unlink (rp);
1148
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001149 munmap ((void *) virtual_base, virtual_size);
1150 region_unlock (root_rp);
1151 svm_pop_heap (oldheap);
1152 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001153 }
1154
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001155 region_unlock (rp);
1156 region_unlock (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001157
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001158 munmap ((void *) virtual_base, virtual_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001159}
1160
Florin Corasd6c30d92018-01-29 05:11:24 -08001161void
1162svm_region_unmap (void *rp_arg)
1163{
1164 svm_region_unmap_internal (rp_arg, 0 /* is_client */ );
1165}
1166
1167void
1168svm_region_unmap_client (void *rp_arg)
1169{
1170 svm_region_unmap_internal (rp_arg, 1 /* is_client */ );
1171}
1172
Ed Warnickecb9cada2015-12-08 15:45:58 -07001173/*
1174 * svm_region_exit
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001175 */
Florin Corasd6c30d92018-01-29 05:11:24 -08001176static void
1177svm_region_exit_internal (u8 is_client)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001178{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001179 void *oldheap;
1180 int i, mypid = getpid ();
1181 uword virtual_base, virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001182
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001183 /* It felt so nice we did it twice... */
1184 if (root_rp == 0)
1185 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001186
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001187 if (--root_rp_refcount > 0)
1188 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001189
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001190 /*
1191 * If we take a signal while holding one or more shared-memory
1192 * mutexes, we may end up back here from an otherwise
1193 * benign exit handler. Bail out to avoid a recursive
1194 * mutex screw-up.
1195 */
1196 if (nheld)
1197 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001198
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001199 region_lock (root_rp, 7);
1200 oldheap = svm_push_pvt_heap (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001201
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001202 virtual_base = root_rp->virtual_base;
1203 virtual_size = root_rp->virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001204
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001205 for (i = 0; i < vec_len (root_rp->client_pids); i++)
1206 {
1207 if (root_rp->client_pids[i] == mypid)
1208 {
1209 vec_delete (root_rp->client_pids, 1, i);
1210 goto found;
1211 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001212 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001213 clib_warning ("pid %d AWOL", mypid);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001214
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001215found:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001216
Florin Corasd6c30d92018-01-29 05:11:24 -08001217 if (!is_client && vec_len (root_rp->client_pids) == 0)
Dave Wallaced756b352017-07-03 13:11:38 -04001218 svm_region_unlink (root_rp);
1219
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001220 region_unlock (root_rp);
1221 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001222
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001223 root_rp = 0;
1224 munmap ((void *) virtual_base, virtual_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001225}
1226
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001227void
Florin Corasd6c30d92018-01-29 05:11:24 -08001228svm_region_exit (void)
1229{
1230 svm_region_exit_internal (0 /* is_client */ );
1231}
1232
1233void
1234svm_region_exit_client (void)
1235{
1236 svm_region_exit_internal (1 /* is_client */ );
1237}
1238
1239void
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001240svm_client_scan_this_region_nolock (svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001241{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001242 int j;
1243 int mypid = getpid ();
1244 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001245
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001246 for (j = 0; j < vec_len (rp->client_pids); j++)
1247 {
1248 if (mypid == rp->client_pids[j])
1249 continue;
1250 if (rp->client_pids[j] && (kill (rp->client_pids[j], 0) < 0))
1251 {
1252 clib_warning ("%s: cleanup ghost pid %d",
1253 rp->region_name, rp->client_pids[j]);
1254 /* nb: client vec in rp->region_heap */
1255 oldheap = svm_push_pvt_heap (rp);
1256 vec_delete (rp->client_pids, 1, j);
1257 j--;
1258 svm_pop_heap (oldheap);
1259 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001260 }
1261}
1262
1263
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001264/*
Ed Warnickecb9cada2015-12-08 15:45:58 -07001265 * Scan svm regions for dead clients
1266 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001267void
Neale Rannse72be392017-04-26 13:59:20 -07001268svm_client_scan (const char *root_path)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001269{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001270 int i, j;
1271 svm_main_region_t *mp;
1272 svm_map_region_args_t *a = 0;
1273 svm_region_t *root_rp;
1274 svm_region_t *rp;
1275 svm_subregion_t *subp;
1276 u8 *name = 0;
1277 u8 **svm_names = 0;
1278 void *oldheap;
1279 int mypid = getpid ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001280
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001281 vec_validate (a, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001282
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001283 svm_region_init_chroot (root_path);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001284
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001285 root_rp = svm_get_root_rp ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001286
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001287 pthread_mutex_lock (&root_rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001288
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001289 mp = root_rp->data_base;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001290
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001291 for (j = 0; j < vec_len (root_rp->client_pids); j++)
1292 {
1293 if (mypid == root_rp->client_pids[j])
1294 continue;
1295 if (root_rp->client_pids[j] && (kill (root_rp->client_pids[j], 0) < 0))
1296 {
1297 clib_warning ("%s: cleanup ghost pid %d",
1298 root_rp->region_name, root_rp->client_pids[j]);
1299 /* nb: client vec in root_rp->region_heap */
1300 oldheap = svm_push_pvt_heap (root_rp);
1301 vec_delete (root_rp->client_pids, 1, j);
1302 j--;
1303 svm_pop_heap (oldheap);
1304 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001305 }
1306
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001307 /*
1308 * Snapshoot names, can't hold root rp mutex across
1309 * find_or_create.
1310 */
1311 /* *INDENT-OFF* */
1312 pool_foreach (subp, mp->subregions, ({
1313 name = vec_dup (subp->subregion_name);
1314 vec_add1(svm_names, name);
1315 }));
1316 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001317
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001318 pthread_mutex_unlock (&root_rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001319
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001320 for (i = 0; i < vec_len (svm_names); i++)
1321 {
1322 vec_validate (a, 0);
1323 a->root_path = root_path;
1324 a->name = (char *) svm_names[i];
1325 rp = svm_region_find_or_create (a);
1326 if (rp)
1327 {
1328 pthread_mutex_lock (&rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001329
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001330 svm_client_scan_this_region_nolock (rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001331
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001332 pthread_mutex_unlock (&rp->mutex);
1333 svm_region_unmap (rp);
1334 vec_free (svm_names[i]);
1335 }
1336 vec_free (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001337 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001338 vec_free (svm_names);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001339
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001340 svm_region_exit ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001341
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001342 vec_free (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001343}
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001344
1345/*
1346 * fd.io coding-style-patch-verification: ON
1347 *
1348 * Local Variables:
1349 * eval: (c-set-style "gnu")
1350 * End:
1351 */