blob: 20f4b7a6e7a585ce75becc1f3495e0286c42051c [file] [log] [blame]
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001/*
Ed Warnickecb9cada2015-12-08 15:45:58 -07002 *------------------------------------------------------------------
Dave Barach8a7fb0c2016-07-08 14:44:23 -04003 * svm.c - shared VM allocation, mmap(...MAP_FIXED...)
Ed Warnickecb9cada2015-12-08 15:45:58 -07004 * library
5 *
6 * Copyright (c) 2009 Cisco and/or its affiliates.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at:
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *------------------------------------------------------------------
19 */
20
21#include <stdio.h>
22#include <stdlib.h>
23#include <sys/types.h>
24#include <sys/mman.h>
25#include <sys/stat.h>
26#include <netinet/in.h>
27#include <signal.h>
28#include <pthread.h>
29#include <unistd.h>
30#include <time.h>
31#include <fcntl.h>
32#include <string.h>
33#include <vppinfra/clib.h>
34#include <vppinfra/vec.h>
35#include <vppinfra/hash.h>
36#include <vppinfra/bitmap.h>
37#include <vppinfra/fifo.h>
38#include <vppinfra/time.h>
39#include <vppinfra/mheap.h>
40#include <vppinfra/heap.h>
41#include <vppinfra/pool.h>
42#include <vppinfra/format.h>
43
44#include "svm.h"
45
46static svm_region_t *root_rp;
47static int root_rp_refcount;
48
49#define MAXLOCK 2
Dave Barach8a7fb0c2016-07-08 14:44:23 -040050static pthread_mutex_t *mutexes_held[MAXLOCK];
Ed Warnickecb9cada2015-12-08 15:45:58 -070051static int nheld;
52
Dave Barach8a7fb0c2016-07-08 14:44:23 -040053svm_region_t *
54svm_get_root_rp (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070055{
Dave Barach8a7fb0c2016-07-08 14:44:23 -040056 return root_rp;
Ed Warnickecb9cada2015-12-08 15:45:58 -070057}
58
59#define MUTEX_DEBUG
60
Damjan Marionaec8f892018-01-08 16:35:35 +010061u64
62svm_get_global_region_base_va ()
63{
BenoƮt Ganne9fb6d402019-04-15 15:28:21 +020064#ifdef CLIB_SANITIZE_ADDR
65 return 0x200000000000;
66#endif
67
Damjan Marionaec8f892018-01-08 16:35:35 +010068#if __aarch64__
69 /* On AArch64 VA space can have different size, from 36 to 48 bits.
70 Here we are trying to detect VA bits by parsing /proc/self/maps
71 address ranges */
72 int fd;
73 unformat_input_t input;
74 u64 start, end = 0;
75 u8 bits = 0;
76
77 if ((fd = open ("/proc/self/maps", 0)) < 0)
78 clib_unix_error ("open '/proc/self/maps'");
79
80 unformat_init_clib_file (&input, fd);
81 while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
82 {
Gabriel Gannec5239ad2018-01-11 15:04:19 +010083 if (unformat (&input, "%llx-%llx", &start, &end))
84 end--;
Damjan Marionaec8f892018-01-08 16:35:35 +010085 unformat_skip_line (&input);
86 }
Gabriel Ganne83d47432018-01-10 11:40:50 +010087 unformat_free (&input);
88 close (fd);
Damjan Marionaec8f892018-01-08 16:35:35 +010089
Damjan Marion11056002018-05-10 13:40:44 +020090 bits = count_leading_zeros (end);
Gabriel Gannec5239ad2018-01-11 15:04:19 +010091 bits = 64 - bits;
Damjan Marionaec8f892018-01-08 16:35:35 +010092 if (bits >= 36 && bits <= 48)
93 return ((1ul << bits) / 4) - (2 * SVM_GLOBAL_REGION_SIZE);
94 else
95 clib_unix_error ("unexpected va bits '%u'", bits);
Damjan Marionaec8f892018-01-08 16:35:35 +010096#endif
97
98 /* default value */
Dave Barach9466c452018-08-24 17:21:14 -040099 return 0x130000000ULL;
Damjan Marionaec8f892018-01-08 16:35:35 +0100100}
101
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400102static void
103region_lock (svm_region_t * rp, int tag)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700104{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400105 pthread_mutex_lock (&rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106#ifdef MUTEX_DEBUG
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400107 rp->mutex_owner_pid = getpid ();
108 rp->mutex_owner_tag = tag;
109#endif
Dave Barachc35f3e82020-04-02 10:44:09 -0400110 ASSERT (nheld < MAXLOCK); //NOSONAR
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400111 /*
112 * Keep score of held mutexes so we can try to exit
113 * cleanly if the world comes to an end at the worst possible
114 * moment
115 */
116 mutexes_held[nheld++] = &rp->mutex;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700117}
118
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400119static void
120region_unlock (svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400122 int i, j;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123#ifdef MUTEX_DEBUG
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400124 rp->mutex_owner_pid = 0;
125 rp->mutex_owner_tag = 0;
126#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400128 for (i = nheld - 1; i >= 0; i--)
129 {
130 if (mutexes_held[i] == &rp->mutex)
131 {
132 for (j = i; j < MAXLOCK - 1; j++)
133 mutexes_held[j] = mutexes_held[j + 1];
134 nheld--;
135 goto found;
136 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400138 ASSERT (0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139
140found:
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400141 CLIB_MEMORY_BARRIER ();
142 pthread_mutex_unlock (&rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143}
144
145
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400146static u8 *
147format_svm_flags (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400149 uword f = va_arg (*args, uword);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400151 if (f & SVM_FLAGS_MHEAP)
152 s = format (s, "MHEAP ");
153 if (f & SVM_FLAGS_FILE)
154 s = format (s, "FILE ");
155 if (f & SVM_FLAGS_NODATA)
156 s = format (s, "NODATA ");
157 if (f & SVM_FLAGS_NEED_DATA_INIT)
158 s = format (s, "INIT ");
159
160 return (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161}
162
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400163static u8 *
164format_svm_size (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400166 uword size = va_arg (*args, uword);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400168 if (size >= (1 << 20))
169 {
170 s = format (s, "(%d mb)", size >> 20);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700171 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400172 else if (size >= (1 << 10))
173 {
174 s = format (s, "(%d kb)", size >> 10);
175 }
176 else
177 {
178 s = format (s, "(%d bytes)", size);
179 }
180 return (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700181}
182
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400183u8 *
184format_svm_region (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700185{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400186 svm_region_t *rp = va_arg (*args, svm_region_t *);
187 int verbose = va_arg (*args, int);
188 int i;
189 uword lo, hi;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700190
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400191 s = format (s, "%s: base va 0x%x size 0x%x %U\n",
192 rp->region_name, rp->virtual_base,
193 rp->virtual_size, format_svm_size, rp->virtual_size);
194 s = format (s, " user_ctx 0x%x, bitmap_size %d\n",
195 rp->user_ctx, rp->bitmap_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700196
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400197 if (verbose)
198 {
199 s = format (s, " flags: 0x%x %U\n", rp->flags,
200 format_svm_flags, rp->flags);
201 s = format (s,
202 " region_heap 0x%x data_base 0x%x data_heap 0x%x\n",
203 rp->region_heap, rp->data_base, rp->data_heap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700204 }
205
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400206 s = format (s, " %d clients, pids: ", vec_len (rp->client_pids));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700207
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400208 for (i = 0; i < vec_len (rp->client_pids); i++)
209 s = format (s, "%d ", rp->client_pids[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700210
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400211 s = format (s, "\n");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400213 if (verbose)
214 {
215 lo = hi = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400217 s = format (s, " VM in use: ");
218
219 for (i = 0; i < rp->bitmap_size; i++)
220 {
221 if (clib_bitmap_get_no_check (rp->bitmap, i) != 0)
222 {
223 if (lo == ~0)
224 {
225 hi = lo = rp->virtual_base + i * MMAP_PAGESIZE;
226 }
227 else
228 {
229 hi = rp->virtual_base + i * MMAP_PAGESIZE;
230 }
231 }
232 else
233 {
234 if (lo != ~0)
235 {
236 hi = rp->virtual_base + i * MMAP_PAGESIZE - 1;
237 s = format (s, " 0x%x - 0x%x (%dk)\n", lo, hi,
238 (hi - lo) >> 10);
239 lo = hi = ~0;
240 }
241 }
242 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700243 }
244
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400245 return (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246}
247
248/*
249 * rnd_pagesize
250 * Round to a pagesize multiple, presumably 4k works
251 */
Dave Barachb3d93da2016-08-03 14:34:38 -0400252static u64
253rnd_pagesize (u64 size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700254{
Dave Barachb3d93da2016-08-03 14:34:38 -0400255 u64 rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400257 rv = (size + (MMAP_PAGESIZE - 1)) & ~(MMAP_PAGESIZE - 1);
258 return (rv);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700259}
260
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400261/*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262 * svm_data_region_setup
263 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400264static int
265svm_data_region_create (svm_map_region_args_t * a, svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400267 int fd;
268 u8 junk = 0;
269 uword map_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270
Dave Barachc3799992016-08-15 11:12:27 -0400271 map_size = rp->virtual_size - (MMAP_PAGESIZE +
272 (a->pvt_heap_size ? a->pvt_heap_size :
273 SVM_PVT_MHEAP_SIZE));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400275 if (a->flags & SVM_FLAGS_FILE)
276 {
277 struct stat statb;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700278
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400279 fd = open (a->backing_file, O_RDWR | O_CREAT, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700280
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400281 if (fd < 0)
282 {
283 clib_unix_warning ("open");
284 return -1;
285 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400287 if (fstat (fd, &statb) < 0)
288 {
289 clib_unix_warning ("fstat");
290 close (fd);
291 return -2;
292 }
293
294 if (statb.st_mode & S_IFREG)
295 {
296 if (statb.st_size == 0)
297 {
298 if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
299 {
300 clib_unix_warning ("seek region size");
301 close (fd);
302 return -3;
303 }
304 if (write (fd, &junk, 1) != 1)
305 {
306 clib_unix_warning ("set region size");
307 close (fd);
308 return -3;
309 }
310 }
311 else
312 {
313 map_size = rnd_pagesize (statb.st_size);
314 }
315 }
316 else
317 {
318 map_size = a->backing_mmap_size;
319 }
320
321 ASSERT (map_size <= rp->virtual_size -
322 (MMAP_PAGESIZE + SVM_PVT_MHEAP_SIZE));
323
324 if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
325 MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
326 {
327 clib_unix_warning ("mmap");
328 close (fd);
329 return -3;
330 }
331 close (fd);
BenoƮt Ganne77d42fc2020-04-20 09:52:39 +0200332 CLIB_MEM_UNPOISON (rp->data_base, map_size);
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400333 rp->backing_file = (char *) format (0, "%s\0", a->backing_file);
334 rp->flags |= SVM_FLAGS_FILE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700335 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400336
337 if (a->flags & SVM_FLAGS_MHEAP)
338 {
Dave Barach6a5adc32018-07-04 10:56:23 -0400339 rp->data_heap = create_mspace_with_base (rp->data_base,
340 map_size, 1 /* locked */ );
341 mspace_disable_expand (rp->data_heap);
Ole Troan73710c72018-06-04 22:27:49 +0200342
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400343 rp->flags |= SVM_FLAGS_MHEAP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700344 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400345 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700346}
347
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400348static int
349svm_data_region_map (svm_map_region_args_t * a, svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700350{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400351 int fd;
352 u8 junk = 0;
353 uword map_size;
354 struct stat statb;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700355
Dave Barachc3799992016-08-15 11:12:27 -0400356 map_size = rp->virtual_size -
357 (MMAP_PAGESIZE
Dave Barachb3d93da2016-08-03 14:34:38 -0400358 + (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700359
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400360 if (a->flags & SVM_FLAGS_FILE)
361 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700362
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400363 fd = open (a->backing_file, O_RDWR, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700364
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400365 if (fd < 0)
366 {
367 clib_unix_warning ("open");
368 return -1;
369 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700370
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400371 if (fstat (fd, &statb) < 0)
372 {
373 clib_unix_warning ("fstat");
374 close (fd);
375 return -2;
376 }
377
378 if (statb.st_mode & S_IFREG)
379 {
380 if (statb.st_size == 0)
381 {
382 if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
383 {
384 clib_unix_warning ("seek region size");
385 close (fd);
386 return -3;
387 }
388 if (write (fd, &junk, 1) != 1)
389 {
390 clib_unix_warning ("set region size");
391 close (fd);
392 return -3;
393 }
394 }
395 else
396 {
397 map_size = rnd_pagesize (statb.st_size);
398 }
399 }
400 else
401 {
402 map_size = a->backing_mmap_size;
403 }
404
405 ASSERT (map_size <= rp->virtual_size
Dave Barachc3799992016-08-15 11:12:27 -0400406 - (MMAP_PAGESIZE
407 +
408 (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE)));
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400409
410 if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
411 MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
412 {
413 clib_unix_warning ("mmap");
414 close (fd);
415 return -3;
416 }
417 close (fd);
BenoƮt Ganne77d42fc2020-04-20 09:52:39 +0200418 CLIB_MEM_UNPOISON (rp->data_base, map_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700419 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400420 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700421}
422
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400423u8 *
424shm_name_from_svm_map_region_args (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700425{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400426 u8 *shm_name;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400427 int root_path_offset = 0;
428 int name_offset = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700429
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400430 if (a->root_path)
431 {
432 /* Tolerate present or absent slashes */
433 if (a->root_path[0] == '/')
434 root_path_offset++;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700435
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400436 if (a->name[0] == '/')
437 name_offset = 1;
438
Matej Perinad135c192017-07-18 13:59:41 +0200439 shm_name = format (0, "/%s-%s%c", &a->root_path[root_path_offset],
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400440 &a->name[name_offset], 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700441 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400442 else
443 shm_name = format (0, "%s%c", a->name, 0);
444 return (shm_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700445}
446
Dave Barach59b25652017-09-10 15:04:27 -0400447void
448svm_region_init_mapped_region (svm_map_region_args_t * a, svm_region_t * rp)
449{
450 pthread_mutexattr_t attr;
451 pthread_condattr_t cattr;
452 int nbits, words, bit;
453 int overhead_space;
454 void *oldheap;
455 uword data_base;
456 ASSERT (rp);
457 int rv;
458
Dave Barachb7b92992018-10-17 10:38:51 -0400459 clib_memset (rp, 0, sizeof (*rp));
Dave Barach59b25652017-09-10 15:04:27 -0400460
461 if (pthread_mutexattr_init (&attr))
462 clib_unix_warning ("mutexattr_init");
463
464 if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED))
465 clib_unix_warning ("mutexattr_setpshared");
466
467 if (pthread_mutex_init (&rp->mutex, &attr))
468 clib_unix_warning ("mutex_init");
469
470 if (pthread_mutexattr_destroy (&attr))
471 clib_unix_warning ("mutexattr_destroy");
472
473 if (pthread_condattr_init (&cattr))
474 clib_unix_warning ("condattr_init");
475
476 if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED))
477 clib_unix_warning ("condattr_setpshared");
478
479 if (pthread_cond_init (&rp->condvar, &cattr))
480 clib_unix_warning ("cond_init");
481
482 if (pthread_condattr_destroy (&cattr))
483 clib_unix_warning ("condattr_destroy");
484
485 region_lock (rp, 1);
486
487 rp->virtual_base = a->baseva;
488 rp->virtual_size = a->size;
489
Dave Barach6a5adc32018-07-04 10:56:23 -0400490 rp->region_heap = create_mspace_with_base
491 (uword_to_pointer (a->baseva + MMAP_PAGESIZE, void *),
492 (a->pvt_heap_size !=
493 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, 1 /* locked */ );
494
495 mspace_disable_expand (rp->region_heap);
Dave Barach6a5adc32018-07-04 10:56:23 -0400496
Dave Barach59b25652017-09-10 15:04:27 -0400497 oldheap = svm_push_pvt_heap (rp);
498
499 rp->region_name = (char *) format (0, "%s%c", a->name, 0);
500 vec_add1 (rp->client_pids, getpid ());
501
502 nbits = rp->virtual_size / MMAP_PAGESIZE;
503
504 ASSERT (nbits > 0);
505 rp->bitmap_size = nbits;
506 words = (nbits + BITS (uword) - 1) / BITS (uword);
507 vec_validate (rp->bitmap, words - 1);
508
509 overhead_space = MMAP_PAGESIZE /* header */ +
510 ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
511
512 bit = 0;
513 data_base = (uword) rp->virtual_base;
514
515 if (a->flags & SVM_FLAGS_NODATA)
516 rp->flags |= SVM_FLAGS_NEED_DATA_INIT;
517
518 do
519 {
520 clib_bitmap_set_no_check (rp->bitmap, bit, 1);
521 bit++;
522 overhead_space -= MMAP_PAGESIZE;
523 data_base += MMAP_PAGESIZE;
524 }
525 while (overhead_space > 0);
526
527 rp->data_base = (void *) data_base;
528
529 /*
530 * Note: although the POSIX spec guarantees that only one
531 * process enters this block, we have to play games
532 * to hold off clients until e.g. the mutex is ready
533 */
534 rp->version = SVM_VERSION;
535
536 /* setup the data portion of the region */
537
538 rv = svm_data_region_create (a, rp);
539 if (rv)
540 {
541 clib_warning ("data_region_create: %d", rv);
542 }
543
544 region_unlock (rp);
545
546 svm_pop_heap (oldheap);
547}
548
Ed Warnickecb9cada2015-12-08 15:45:58 -0700549/*
550 * svm_map_region
551 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400552void *
553svm_map_region (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700554{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400555 int svm_fd;
556 svm_region_t *rp;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400557 int deadman = 0;
558 u8 junk = 0;
559 void *oldheap;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400560 int rv;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400561 int pid_holding_region_lock;
562 u8 *shm_name;
563 int dead_region_recovery = 0;
564 int time_left;
565 struct stat stat;
566 struct timespec ts, tsrem;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700567
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400568 ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
569 ASSERT (a->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700570
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400571 shm_name = shm_name_from_svm_map_region_args (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700572
Florin Coras9f4ac582019-12-17 19:46:45 -0800573 if (CLIB_DEBUG > 1)
Dave Wallaced756b352017-07-03 13:11:38 -0400574 clib_warning ("[%d] map region %s: shm_open (%s)",
575 getpid (), a->name, shm_name);
576
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400577 svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700578
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400579 if (svm_fd >= 0)
580 {
Dave Wallace19296112017-08-31 15:54:11 -0400581 if (fchmod (svm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0)
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400582 clib_unix_warning ("segment chmod");
583 /* This turns out to fail harmlessly if the client starts first */
584 if (fchown (svm_fd, a->uid, a->gid) < 0)
585 clib_unix_warning ("segment chown [ok if client starts first]");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700586
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400587 vec_free (shm_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700588
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400589 if (lseek (svm_fd, a->size, SEEK_SET) == (off_t) - 1)
590 {
591 clib_warning ("seek region size");
592 close (svm_fd);
593 return (0);
594 }
595 if (write (svm_fd, &junk, 1) != 1)
596 {
597 clib_warning ("set region size");
598 close (svm_fd);
599 return (0);
600 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700601
Damjan Marion7bee80c2017-04-26 15:32:12 +0200602 rp = mmap (uword_to_pointer (a->baseva, void *), a->size,
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400603 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700604
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400605 if (rp == (svm_region_t *) MAP_FAILED)
606 {
607 clib_unix_warning ("mmap create");
608 close (svm_fd);
609 return (0);
610 }
611 close (svm_fd);
BenoƮt Ganne77d42fc2020-04-20 09:52:39 +0200612 CLIB_MEM_UNPOISON (rp, a->size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700613
Dave Barach59b25652017-09-10 15:04:27 -0400614 svm_region_init_mapped_region (a, rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700615
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400616 return ((void *) rp);
617 }
618 else
619 {
620 svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700621
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400622 vec_free (shm_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700623
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400624 if (svm_fd < 0)
625 {
626 perror ("svm_region_map(mmap open)");
627 return (0);
628 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700629
Ole Troanc4f2ef72018-05-30 22:43:25 +0200630 /* Reset ownership in case the client started first */
631 if (fchown (svm_fd, a->uid, a->gid) < 0)
632 clib_unix_warning ("segment chown [ok if client starts first]");
633
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400634 time_left = 20;
635 while (1)
636 {
637 if (0 != fstat (svm_fd, &stat))
638 {
639 clib_warning ("fstat failed: %d", errno);
640 close (svm_fd);
641 return (0);
642 }
643 if (stat.st_size > 0)
644 {
645 break;
646 }
647 if (0 == time_left)
648 {
649 clib_warning ("waiting for resize of shm file timed out");
650 close (svm_fd);
651 return (0);
652 }
653 ts.tv_sec = 0;
654 ts.tv_nsec = 100000000;
655 while (nanosleep (&ts, &tsrem) < 0)
656 ts = tsrem;
657 time_left--;
658 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700659
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400660 rp = mmap (0, MMAP_PAGESIZE,
661 PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700662
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400663 if (rp == (svm_region_t *) MAP_FAILED)
664 {
665 close (svm_fd);
666 clib_warning ("mmap");
667 return (0);
668 }
BenoƮt Ganne77d42fc2020-04-20 09:52:39 +0200669
670 CLIB_MEM_UNPOISON (rp, MMAP_PAGESIZE);
671
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400672 /*
673 * We lost the footrace to create this region; make sure
674 * the winner has crossed the finish line.
675 */
676 while (rp->version == 0 && deadman++ < 5)
677 {
678 sleep (1);
679 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700680
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400681 /*
682 * <bleep>-ed?
683 */
684 if (rp->version == 0)
685 {
686 clib_warning ("rp->version %d not %d", rp->version, SVM_VERSION);
687 close (svm_fd);
688 munmap (rp, a->size);
689 return (0);
690 }
691 /* Remap now that the region has been placed */
692 a->baseva = rp->virtual_base;
693 a->size = rp->virtual_size;
694 munmap (rp, MMAP_PAGESIZE);
695
Damjan Marion7bee80c2017-04-26 15:32:12 +0200696 rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size,
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400697 PROT_READ | PROT_WRITE,
698 MAP_SHARED | MAP_FIXED, svm_fd, 0);
699 if ((uword) rp == (uword) MAP_FAILED)
700 {
701 clib_unix_warning ("mmap");
702 close (svm_fd);
703 return (0);
704 }
705
Dave Barachada24ea2018-05-24 17:32:00 -0400706 close (svm_fd);
707
BenoƮt Ganne77d42fc2020-04-20 09:52:39 +0200708 CLIB_MEM_UNPOISON (rp, a->size);
709
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400710 if ((uword) rp != rp->virtual_base)
711 {
712 clib_warning ("mmap botch");
713 }
714
715 /*
716 * Try to fix the region mutex if it is held by
717 * a dead process
718 */
719 pid_holding_region_lock = rp->mutex_owner_pid;
720 if (pid_holding_region_lock && kill (pid_holding_region_lock, 0) < 0)
721 {
722 clib_warning
723 ("region %s mutex held by dead pid %d, tag %d, force unlock",
724 rp->region_name, pid_holding_region_lock, rp->mutex_owner_tag);
725 /* owner pid is nonexistent */
726 rp->mutex.__data.__owner = 0;
727 rp->mutex.__data.__lock = 0;
728 dead_region_recovery = 1;
729 }
730
731 if (dead_region_recovery)
732 clib_warning ("recovery: attempt to re-lock region");
733
734 region_lock (rp, 2);
735 oldheap = svm_push_pvt_heap (rp);
736 vec_add1 (rp->client_pids, getpid ());
737
738 if (dead_region_recovery)
739 clib_warning ("recovery: attempt svm_data_region_map");
740
741 rv = svm_data_region_map (a, rp);
742 if (rv)
743 {
744 clib_warning ("data_region_map: %d", rv);
745 }
746
747 if (dead_region_recovery)
748 clib_warning ("unlock and continue");
749
750 region_unlock (rp);
751
752 svm_pop_heap (oldheap);
753
754 return ((void *) rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700755
756 }
Dave Barachc35f3e82020-04-02 10:44:09 -0400757 return 0; /* NOTREACHED *///NOSONAR
Ed Warnickecb9cada2015-12-08 15:45:58 -0700758}
759
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400760static void
761svm_mutex_cleanup (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700762{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400763 int i;
764 for (i = 0; i < nheld; i++)
765 {
Dave Barachc35f3e82020-04-02 10:44:09 -0400766 pthread_mutex_unlock (mutexes_held[i]); //NOSONAR
Ed Warnickecb9cada2015-12-08 15:45:58 -0700767 }
768}
769
Ole Troan3cdc25f2017-08-17 11:07:33 +0200770static int
Dave Barachb3d93da2016-08-03 14:34:38 -0400771svm_region_init_internal (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700772{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400773 svm_region_t *rp;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400774 u64 ticks = clib_cpu_time_now ();
775 uword randomize_baseva;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700776
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400777 /* guard against klutz calls */
778 if (root_rp)
Ole Troan3cdc25f2017-08-17 11:07:33 +0200779 return -1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700780
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400781 root_rp_refcount++;
Dave Barach16c75df2016-05-31 14:05:46 -0400782
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400783 atexit (svm_mutex_cleanup);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700784
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400785 /* Randomize the shared-VM base at init time */
786 if (MMAP_PAGESIZE <= (4 << 10))
787 randomize_baseva = (ticks & 15) * MMAP_PAGESIZE;
788 else
789 randomize_baseva = (ticks & 3) * MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700790
Dave Barachb3d93da2016-08-03 14:34:38 -0400791 a->baseva += randomize_baseva;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700792
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400793 rp = svm_map_region (a);
Ole Troan3cdc25f2017-08-17 11:07:33 +0200794 if (!rp)
795 return -1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700796
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400797 region_lock (rp, 3);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700798
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400799 /* Set up the main region data structures */
800 if (rp->flags & SVM_FLAGS_NEED_DATA_INIT)
801 {
802 svm_main_region_t *mp = 0;
803 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700804
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400805 rp->flags &= ~(SVM_FLAGS_NEED_DATA_INIT);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700806
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400807 oldheap = svm_push_pvt_heap (rp);
808 vec_validate (mp, 0);
809 mp->name_hash = hash_create_string (0, sizeof (uword));
Dave Barachb3d93da2016-08-03 14:34:38 -0400810 mp->root_path = a->root_path ? format (0, "%s%c", a->root_path, 0) : 0;
Dave Wallace19296112017-08-31 15:54:11 -0400811 mp->uid = a->uid;
812 mp->gid = a->gid;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400813 rp->data_base = mp;
814 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700815 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400816 region_unlock (rp);
817 root_rp = rp;
Ole Troan3cdc25f2017-08-17 11:07:33 +0200818
819 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700820}
821
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400822void
823svm_region_init (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700824{
Dave Barachb3d93da2016-08-03 14:34:38 -0400825 svm_map_region_args_t _a, *a = &_a;
Dave Barachc3799992016-08-15 11:12:27 -0400826
Dave Barachb7b92992018-10-17 10:38:51 -0400827 clib_memset (a, 0, sizeof (*a));
Dave Barachb3d93da2016-08-03 14:34:38 -0400828 a->root_path = 0;
829 a->name = SVM_GLOBAL_REGION_NAME;
Damjan Marionaec8f892018-01-08 16:35:35 +0100830 a->baseva = svm_get_global_region_base_va ();
Dave Barachb3d93da2016-08-03 14:34:38 -0400831 a->size = SVM_GLOBAL_REGION_SIZE;
832 a->flags = SVM_FLAGS_NODATA;
833 a->uid = 0;
834 a->gid = 0;
835
836 svm_region_init_internal (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700837}
838
Ole Troan3cdc25f2017-08-17 11:07:33 +0200839int
Neale Rannse72be392017-04-26 13:59:20 -0700840svm_region_init_chroot (const char *root_path)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700841{
Dave Barachb3d93da2016-08-03 14:34:38 -0400842 svm_map_region_args_t _a, *a = &_a;
Dave Barachc3799992016-08-15 11:12:27 -0400843
Dave Barachb7b92992018-10-17 10:38:51 -0400844 clib_memset (a, 0, sizeof (*a));
Dave Barachb3d93da2016-08-03 14:34:38 -0400845 a->root_path = root_path;
846 a->name = SVM_GLOBAL_REGION_NAME;
Damjan Marionaec8f892018-01-08 16:35:35 +0100847 a->baseva = svm_get_global_region_base_va ();
Dave Barachb3d93da2016-08-03 14:34:38 -0400848 a->size = SVM_GLOBAL_REGION_SIZE;
849 a->flags = SVM_FLAGS_NODATA;
850 a->uid = 0;
851 a->gid = 0;
852
Ole Troan3cdc25f2017-08-17 11:07:33 +0200853 return svm_region_init_internal (a);
Dave Barach16c75df2016-05-31 14:05:46 -0400854}
855
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400856void
Neale Rannse72be392017-04-26 13:59:20 -0700857svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid)
Dave Barach16c75df2016-05-31 14:05:46 -0400858{
Dave Barachb3d93da2016-08-03 14:34:38 -0400859 svm_map_region_args_t _a, *a = &_a;
Dave Barachc3799992016-08-15 11:12:27 -0400860
Dave Barachb7b92992018-10-17 10:38:51 -0400861 clib_memset (a, 0, sizeof (*a));
Dave Barachb3d93da2016-08-03 14:34:38 -0400862 a->root_path = root_path;
863 a->name = SVM_GLOBAL_REGION_NAME;
Damjan Marionaec8f892018-01-08 16:35:35 +0100864 a->baseva = svm_get_global_region_base_va ();
Dave Barachb3d93da2016-08-03 14:34:38 -0400865 a->size = SVM_GLOBAL_REGION_SIZE;
866 a->flags = SVM_FLAGS_NODATA;
867 a->uid = uid;
868 a->gid = gid;
869
870 svm_region_init_internal (a);
871}
872
873void
874svm_region_init_args (svm_map_region_args_t * a)
875{
876 svm_region_init_internal (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700877}
878
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400879void *
880svm_region_find_or_create (svm_map_region_args_t * a)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700881{
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400882 svm_main_region_t *mp;
883 svm_region_t *rp;
884 uword need_nbits;
885 int index, i;
886 void *oldheap;
887 uword *p;
888 u8 *name;
889 svm_subregion_t *subp;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700890
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400891 ASSERT (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700892
Dave Barachc3799992016-08-15 11:12:27 -0400893 a->size += MMAP_PAGESIZE +
Dave Barachb3d93da2016-08-03 14:34:38 -0400894 ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400895 a->size = rnd_pagesize (a->size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700896
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400897 region_lock (root_rp, 4);
898 oldheap = svm_push_pvt_heap (root_rp);
899 mp = root_rp->data_base;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700900
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400901 ASSERT (mp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700902
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400903 /* Map the named region from the correct chroot environment */
Jan Srnicek5beec812017-03-24 10:18:11 +0100904 if (a->root_path == NULL)
905 a->root_path = (char *) mp->root_path;
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400906
907 /*
908 * See if this region is already known. If it is, we're
909 * almost done...
910 */
911 p = hash_get_mem (mp->name_hash, a->name);
912
913 if (p)
914 {
915 rp = svm_map_region (a);
916 region_unlock (root_rp);
917 svm_pop_heap (oldheap);
918 return rp;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700919 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700920
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400921 /* Create the region. */
922 ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700923
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400924 need_nbits = a->size / MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700925
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400926 index = 1; /* $$$ fixme, figure out how many bit to really skip */
927
928 /*
929 * Scan the virtual space allocation bitmap, looking for a large
930 * enough chunk
931 */
932 do
933 {
934 if (clib_bitmap_get_no_check (root_rp->bitmap, index) == 0)
935 {
936 for (i = 0; i < (need_nbits - 1); i++)
937 {
938 if (clib_bitmap_get_no_check (root_rp->bitmap, index + i) == 1)
939 {
940 index = index + i;
941 goto next;
942 }
943 }
944 break;
945 }
946 index++;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700947 next:;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700948 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400949 while (index < root_rp->bitmap_size);
950
951 /* Completely out of VM? */
952 if (index >= root_rp->bitmap_size)
953 {
Dave Barachb3d93da2016-08-03 14:34:38 -0400954 clib_warning ("region %s: not enough VM to allocate 0x%llx (%lld)",
955 root_rp->region_name, a->size, a->size);
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400956 svm_pop_heap (oldheap);
957 region_unlock (root_rp);
958 return 0;
959 }
960
961 /*
962 * Mark virtual space allocated
963 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700964#if CLIB_DEBUG > 1
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400965 clib_warning ("set %d bits at index %d", need_nbits, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700966#endif
967
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400968 for (i = 0; i < need_nbits; i++)
969 {
970 clib_bitmap_set_no_check (root_rp->bitmap, index + i, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700971 }
972
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400973 /* Place this region where it goes... */
974 a->baseva = root_rp->virtual_base + index * MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700975
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400976 rp = svm_map_region (a);
Dave Barachc3799992016-08-15 11:12:27 -0400977
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400978 pool_get (mp->subregions, subp);
979 name = format (0, "%s%c", a->name, 0);
980 subp->subregion_name = name;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700981
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400982 hash_set_mem (mp->name_hash, name, subp - mp->subregions);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700983
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400984 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700985
Dave Barach8a7fb0c2016-07-08 14:44:23 -0400986 region_unlock (root_rp);
987
988 return (rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700989}
990
Dave Wallaced756b352017-07-03 13:11:38 -0400991void
992svm_region_unlink (svm_region_t * rp)
993{
994 svm_map_region_args_t _a, *a = &_a;
995 svm_main_region_t *mp;
996 u8 *shm_name;
997
998 ASSERT (root_rp);
999 ASSERT (rp);
1000 ASSERT (vec_c_string_is_terminated (rp->region_name));
1001
1002 mp = root_rp->data_base;
1003 ASSERT (mp);
1004
1005 a->root_path = (char *) mp->root_path;
1006 a->name = rp->region_name;
1007 shm_name = shm_name_from_svm_map_region_args (a);
1008 if (CLIB_DEBUG > 1)
1009 clib_warning ("[%d] shm_unlink (%s)", getpid (), shm_name);
1010 shm_unlink ((const char *) shm_name);
1011 vec_free (shm_name);
1012}
1013
Ed Warnickecb9cada2015-12-08 15:45:58 -07001014/*
1015 * svm_region_unmap
1016 *
1017 * Let go of the indicated region. If the calling process
1018 * is the last customer, throw it away completely.
1019 * The root region mutex guarantees atomicity with respect to
1020 * a new region client showing up at the wrong moment.
1021 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001022void
Florin Corasd6c30d92018-01-29 05:11:24 -08001023svm_region_unmap_internal (void *rp_arg, u8 is_client)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001024{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001025 int i, mypid = getpid ();
1026 int nclients_left;
1027 void *oldheap;
1028 uword virtual_base, virtual_size;
1029 svm_region_t *rp = rp_arg;
1030 char *name;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001031
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001032 /*
1033 * If we take a signal while holding one or more shared-memory
1034 * mutexes, we may end up back here from an otherwise
1035 * benign exit handler. Bail out to avoid a recursive
1036 * mutex screw-up.
1037 */
1038 if (nheld)
1039 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001040
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001041 ASSERT (rp);
1042 ASSERT (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001043
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001044 if (CLIB_DEBUG > 1)
1045 clib_warning ("[%d] unmap region %s", getpid (), rp->region_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001046
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001047 region_lock (root_rp, 5);
1048 region_lock (rp, 6);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001049
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001050 oldheap = svm_push_pvt_heap (rp); /* nb vec_delete() in the loop */
1051
1052 /* Remove the caller from the list of mappers */
BenoƮt Ganne77d42fc2020-04-20 09:52:39 +02001053 CLIB_MEM_UNPOISON (rp->client_pids, vec_bytes (rp->client_pids));
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001054 for (i = 0; i < vec_len (rp->client_pids); i++)
1055 {
1056 if (rp->client_pids[i] == mypid)
1057 {
1058 vec_delete (rp->client_pids, 1, i);
1059 goto found;
1060 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001061 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001062 clib_warning ("pid %d AWOL", mypid);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001063
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001064found:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001065
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001066 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001067
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001068 nclients_left = vec_len (rp->client_pids);
1069 virtual_base = rp->virtual_base;
1070 virtual_size = rp->virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001071
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001072 if (nclients_left == 0)
1073 {
1074 int index, nbits, i;
1075 svm_main_region_t *mp;
1076 uword *p;
1077 svm_subregion_t *subp;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001078
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001079 /* Kill the region, last guy on his way out */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001080
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001081 oldheap = svm_push_pvt_heap (root_rp);
1082 name = vec_dup (rp->region_name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001083
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001084 virtual_base = rp->virtual_base;
1085 virtual_size = rp->virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001086
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001087 /* Figure out which bits to clear in the root region bitmap */
1088 index = (virtual_base - root_rp->virtual_base) / MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001089
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001090 nbits = (virtual_size + MMAP_PAGESIZE - 1) / MMAP_PAGESIZE;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001091
1092#if CLIB_DEBUG > 1
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001093 clib_warning ("clear %d bits at index %d", nbits, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001094#endif
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001095 /* Give back the allocated VM */
1096 for (i = 0; i < nbits; i++)
1097 {
1098 clib_bitmap_set_no_check (root_rp->bitmap, index + i, 0);
1099 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001100
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001101 mp = root_rp->data_base;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001102
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001103 p = hash_get_mem (mp->name_hash, name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001104
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001105 /* Better never happen ... */
1106 if (p == NULL)
1107 {
1108 region_unlock (rp);
1109 region_unlock (root_rp);
1110 svm_pop_heap (oldheap);
1111 clib_warning ("Region name '%s' not found?", name);
1112 return;
1113 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001114
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001115 /* Remove from the root region subregion pool */
1116 subp = mp->subregions + p[0];
1117 pool_put (mp->subregions, subp);
1118
1119 hash_unset_mem (mp->name_hash, name);
1120
1121 vec_free (name);
1122
1123 region_unlock (rp);
Florin Corasd6c30d92018-01-29 05:11:24 -08001124
1125 /* If a client asks for the cleanup, don't unlink the backing
1126 * file since we can't tell if it has been recreated. */
1127 if (!is_client)
1128 svm_region_unlink (rp);
1129
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001130 munmap ((void *) virtual_base, virtual_size);
1131 region_unlock (root_rp);
1132 svm_pop_heap (oldheap);
1133 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001134 }
1135
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001136 region_unlock (rp);
1137 region_unlock (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001138
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001139 munmap ((void *) virtual_base, virtual_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001140}
1141
Florin Corasd6c30d92018-01-29 05:11:24 -08001142void
1143svm_region_unmap (void *rp_arg)
1144{
1145 svm_region_unmap_internal (rp_arg, 0 /* is_client */ );
1146}
1147
1148void
1149svm_region_unmap_client (void *rp_arg)
1150{
1151 svm_region_unmap_internal (rp_arg, 1 /* is_client */ );
1152}
1153
Ed Warnickecb9cada2015-12-08 15:45:58 -07001154/*
1155 * svm_region_exit
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001156 */
Florin Corasd6c30d92018-01-29 05:11:24 -08001157static void
1158svm_region_exit_internal (u8 is_client)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001159{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001160 void *oldheap;
1161 int i, mypid = getpid ();
1162 uword virtual_base, virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001163
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001164 /* It felt so nice we did it twice... */
1165 if (root_rp == 0)
1166 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001167
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001168 if (--root_rp_refcount > 0)
1169 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001170
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001171 /*
1172 * If we take a signal while holding one or more shared-memory
1173 * mutexes, we may end up back here from an otherwise
1174 * benign exit handler. Bail out to avoid a recursive
1175 * mutex screw-up.
1176 */
1177 if (nheld)
1178 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001179
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001180 region_lock (root_rp, 7);
1181 oldheap = svm_push_pvt_heap (root_rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001182
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001183 virtual_base = root_rp->virtual_base;
1184 virtual_size = root_rp->virtual_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001185
BenoƮt Ganne77d42fc2020-04-20 09:52:39 +02001186 CLIB_MEM_UNPOISON (root_rp->client_pids, vec_bytes (root_rp->client_pids));
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001187 for (i = 0; i < vec_len (root_rp->client_pids); i++)
1188 {
1189 if (root_rp->client_pids[i] == mypid)
1190 {
1191 vec_delete (root_rp->client_pids, 1, i);
1192 goto found;
1193 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001194 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001195 clib_warning ("pid %d AWOL", mypid);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001196
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001197found:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001198
Florin Corasd6c30d92018-01-29 05:11:24 -08001199 if (!is_client && vec_len (root_rp->client_pids) == 0)
Dave Wallaced756b352017-07-03 13:11:38 -04001200 svm_region_unlink (root_rp);
1201
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001202 region_unlock (root_rp);
1203 svm_pop_heap (oldheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001204
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001205 root_rp = 0;
1206 munmap ((void *) virtual_base, virtual_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001207}
1208
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001209void
Florin Corasd6c30d92018-01-29 05:11:24 -08001210svm_region_exit (void)
1211{
1212 svm_region_exit_internal (0 /* is_client */ );
1213}
1214
1215void
1216svm_region_exit_client (void)
1217{
1218 svm_region_exit_internal (1 /* is_client */ );
1219}
1220
1221void
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001222svm_client_scan_this_region_nolock (svm_region_t * rp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001223{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001224 int j;
1225 int mypid = getpid ();
1226 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001227
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001228 for (j = 0; j < vec_len (rp->client_pids); j++)
1229 {
1230 if (mypid == rp->client_pids[j])
1231 continue;
1232 if (rp->client_pids[j] && (kill (rp->client_pids[j], 0) < 0))
1233 {
1234 clib_warning ("%s: cleanup ghost pid %d",
1235 rp->region_name, rp->client_pids[j]);
1236 /* nb: client vec in rp->region_heap */
1237 oldheap = svm_push_pvt_heap (rp);
1238 vec_delete (rp->client_pids, 1, j);
1239 j--;
1240 svm_pop_heap (oldheap);
1241 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001242 }
1243}
1244
1245
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001246/*
Ed Warnickecb9cada2015-12-08 15:45:58 -07001247 * Scan svm regions for dead clients
1248 */
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001249void
Neale Rannse72be392017-04-26 13:59:20 -07001250svm_client_scan (const char *root_path)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001251{
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001252 int i, j;
1253 svm_main_region_t *mp;
1254 svm_map_region_args_t *a = 0;
1255 svm_region_t *root_rp;
1256 svm_region_t *rp;
1257 svm_subregion_t *subp;
1258 u8 *name = 0;
1259 u8 **svm_names = 0;
1260 void *oldheap;
1261 int mypid = getpid ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001262
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001263 vec_validate (a, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001264
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001265 svm_region_init_chroot (root_path);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001266
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001267 root_rp = svm_get_root_rp ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001268
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001269 pthread_mutex_lock (&root_rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001270
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001271 mp = root_rp->data_base;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001272
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001273 for (j = 0; j < vec_len (root_rp->client_pids); j++)
1274 {
1275 if (mypid == root_rp->client_pids[j])
1276 continue;
1277 if (root_rp->client_pids[j] && (kill (root_rp->client_pids[j], 0) < 0))
1278 {
1279 clib_warning ("%s: cleanup ghost pid %d",
1280 root_rp->region_name, root_rp->client_pids[j]);
1281 /* nb: client vec in root_rp->region_heap */
1282 oldheap = svm_push_pvt_heap (root_rp);
1283 vec_delete (root_rp->client_pids, 1, j);
1284 j--;
1285 svm_pop_heap (oldheap);
1286 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001287 }
1288
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001289 /*
1290 * Snapshoot names, can't hold root rp mutex across
1291 * find_or_create.
1292 */
1293 /* *INDENT-OFF* */
1294 pool_foreach (subp, mp->subregions, ({
1295 name = vec_dup (subp->subregion_name);
1296 vec_add1(svm_names, name);
1297 }));
1298 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001299
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001300 pthread_mutex_unlock (&root_rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001301
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001302 for (i = 0; i < vec_len (svm_names); i++)
1303 {
1304 vec_validate (a, 0);
1305 a->root_path = root_path;
1306 a->name = (char *) svm_names[i];
1307 rp = svm_region_find_or_create (a);
1308 if (rp)
1309 {
1310 pthread_mutex_lock (&rp->mutex);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001311
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001312 svm_client_scan_this_region_nolock (rp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001313
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001314 pthread_mutex_unlock (&rp->mutex);
1315 svm_region_unmap (rp);
1316 vec_free (svm_names[i]);
1317 }
1318 vec_free (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001319 }
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001320 vec_free (svm_names);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001321
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001322 svm_region_exit ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001323
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001324 vec_free (a);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001325}
Dave Barach8a7fb0c2016-07-08 14:44:23 -04001326
1327/*
1328 * fd.io coding-style-patch-verification: ON
1329 *
1330 * Local Variables:
1331 * eval: (c-set-style "gnu")
1332 * End:
1333 */