blob: 143db33c4b4e75963efd196cdc6dd56e790a52ea [file] [log] [blame]
Dave Barach6a5adc32018-07-04 10:56:23 -04001/*
2 This is a version (aka dlmalloc) of malloc/free/realloc written by
3 Doug Lea and released to the public domain, as explained at
4 http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
5 comments, complaints, performance data, etc to dl@cs.oswego.edu
6*/
7
8#include <vppinfra/dlmalloc.h>
9
10/*------------------------------ internal #includes ---------------------- */
11
12#ifdef _MSC_VER
13#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
14#endif /* _MSC_VER */
15#if !NO_MALLOC_STATS
16#include <stdio.h> /* for printing in malloc_stats */
17#endif /* NO_MALLOC_STATS */
18#ifndef LACKS_ERRNO_H
19#include <errno.h> /* for MALLOC_FAILURE_ACTION */
20#endif /* LACKS_ERRNO_H */
21#ifdef DEBUG
22#if DLM_ABORT_ON_ASSERT_FAILURE
23#undef assert
24#define assert(x) if(!(x)) DLM_ABORT
25#else /* DLM_ABORT_ON_ASSERT_FAILURE */
26#include <assert.h>
27#endif /* DLM_ABORT_ON_ASSERT_FAILURE */
28#else /* DEBUG */
29#ifndef assert
30#define assert(x)
31#endif
32#define DEBUG 0
33#endif /* DEBUG */
34#if !defined(WIN32) && !defined(LACKS_TIME_H)
35#include <time.h> /* for magic initialization */
36#endif /* WIN32 */
37#ifndef LACKS_STDLIB_H
38#include <stdlib.h> /* for abort() */
39#endif /* LACKS_STDLIB_H */
40#ifndef LACKS_STRING_H
41#include <string.h> /* for memset etc */
42#endif /* LACKS_STRING_H */
43#if USE_BUILTIN_FFS
44#ifndef LACKS_STRINGS_H
45#include <strings.h> /* for ffs */
46#endif /* LACKS_STRINGS_H */
47#endif /* USE_BUILTIN_FFS */
48#if HAVE_MMAP
49#ifndef LACKS_SYS_MMAN_H
50/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
51#if (defined(linux) && !defined(__USE_GNU))
52#define __USE_GNU 1
53#include <sys/mman.h> /* for mmap */
54#undef __USE_GNU
55#else
56#include <sys/mman.h> /* for mmap */
57#endif /* linux */
58#endif /* LACKS_SYS_MMAN_H */
59#ifndef LACKS_FCNTL_H
60#include <fcntl.h>
61#endif /* LACKS_FCNTL_H */
62#endif /* HAVE_MMAP */
63#ifndef LACKS_UNISTD_H
64#include <unistd.h> /* for sbrk, sysconf */
65#else /* LACKS_UNISTD_H */
66#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
67extern void* sbrk(ptrdiff_t);
68#endif /* FreeBSD etc */
69#endif /* LACKS_UNISTD_H */
70
71/* Declarations for locking */
72#if USE_LOCKS
73#ifndef WIN32
74#if defined (__SVR4) && defined (__sun) /* solaris */
75#include <thread.h>
76#elif !defined(LACKS_SCHED_H)
77#include <sched.h>
78#endif /* solaris or LACKS_SCHED_H */
79#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
80#include <pthread.h>
81#endif /* USE_RECURSIVE_LOCKS ... */
82#elif defined(_MSC_VER)
83#ifndef _M_AMD64
84/* These are already defined on AMD64 builds */
85#ifdef __cplusplus
86extern "C" {
87#endif /* __cplusplus */
88LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
89LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
90#ifdef __cplusplus
91}
92#endif /* __cplusplus */
93#endif /* _M_AMD64 */
94#pragma intrinsic (_InterlockedCompareExchange)
95#pragma intrinsic (_InterlockedExchange)
96#define interlockedcompareexchange _InterlockedCompareExchange
97#define interlockedexchange _InterlockedExchange
98#elif defined(WIN32) && defined(__GNUC__)
99#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
100#define interlockedexchange __sync_lock_test_and_set
101#endif /* Win32 */
102#else /* USE_LOCKS */
103#endif /* USE_LOCKS */
104
105#ifndef LOCK_AT_FORK
106#define LOCK_AT_FORK 0
107#endif
108
109/* Declarations for bit scanning on win32 */
110#if defined(_MSC_VER) && _MSC_VER>=1300
111#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
112#ifdef __cplusplus
113extern "C" {
114#endif /* __cplusplus */
115unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
116unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
117#ifdef __cplusplus
118}
119#endif /* __cplusplus */
120
121#define BitScanForward _BitScanForward
122#define BitScanReverse _BitScanReverse
123#pragma intrinsic(_BitScanForward)
124#pragma intrinsic(_BitScanReverse)
125#endif /* BitScanForward */
126#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
127
128#ifndef WIN32
129#ifndef malloc_getpagesize
130# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
131# ifndef _SC_PAGE_SIZE
132# define _SC_PAGE_SIZE _SC_PAGESIZE
133# endif
134# endif
135# ifdef _SC_PAGE_SIZE
136# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
137# else
138# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
139 extern size_t getpagesize();
140# define malloc_getpagesize getpagesize()
141# else
142# ifdef WIN32 /* use supplied emulation of getpagesize */
143# define malloc_getpagesize getpagesize()
144# else
145# ifndef LACKS_SYS_PARAM_H
146# include <sys/param.h>
147# endif
148# ifdef EXEC_PAGESIZE
149# define malloc_getpagesize EXEC_PAGESIZE
150# else
151# ifdef NBPG
152# ifndef CLSIZE
153# define malloc_getpagesize NBPG
154# else
155# define malloc_getpagesize (NBPG * CLSIZE)
156# endif
157# else
158# ifdef NBPC
159# define malloc_getpagesize NBPC
160# else
161# ifdef PAGESIZE
162# define malloc_getpagesize PAGESIZE
163# else /* just guess */
164# define malloc_getpagesize ((size_t)4096U)
165# endif
166# endif
167# endif
168# endif
169# endif
170# endif
171# endif
172#endif
173#endif
174
175/* ------------------- size_t and alignment properties -------------------- */
176
177/* The byte and bit size of a size_t */
178#define SIZE_T_SIZE (sizeof(size_t))
179#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
180
181/* Some constants coerced to size_t */
182/* Annoying but necessary to avoid errors on some platforms */
183#define SIZE_T_ZERO ((size_t)0)
184#define SIZE_T_ONE ((size_t)1)
185#define SIZE_T_TWO ((size_t)2)
186#define SIZE_T_FOUR ((size_t)4)
187#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
188#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
189#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
190#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
191
192/* The bit mask value corresponding to MALLOC_ALIGNMENT */
193#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
194
195/* True if address a has acceptable alignment */
196#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
197
198/* the number of bytes to offset an address to align it */
199#define align_offset(A)\
200 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
201 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
202
203/* -------------------------- MMAP preliminaries ------------------------- */
204
205/*
206 If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
207 checks to fail so compiler optimizer can delete code rather than
208 using so many "#if"s.
209*/
210
211
212/* MORECORE and MMAP must return MFAIL on failure */
213#define MFAIL ((void*)(MAX_SIZE_T))
214#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
215
216#if HAVE_MMAP
217
218#ifndef WIN32
219#define MUNMAP_DEFAULT(a, s) munmap((a), (s))
220#define MMAP_PROT (PROT_READ|PROT_WRITE)
221#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
222#define MAP_ANONYMOUS MAP_ANON
223#endif /* MAP_ANON */
224#ifdef MAP_ANONYMOUS
225#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
226#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
227#else /* MAP_ANONYMOUS */
228/*
229 Nearly all versions of mmap support MAP_ANONYMOUS, so the following
230 is unlikely to be needed, but is supplied just in case.
231*/
232#define MMAP_FLAGS (MAP_PRIVATE)
233static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
234#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
235 (dev_zero_fd = open("/dev/zero", O_RDWR), \
236 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
237 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
238#endif /* MAP_ANONYMOUS */
239
240#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
241
242#else /* WIN32 */
243
244/* Win32 MMAP via VirtualAlloc */
245static FORCEINLINE void* win32mmap(size_t size) {
246 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
247 return (ptr != 0)? ptr: MFAIL;
248}
249
250/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
251static FORCEINLINE void* win32direct_mmap(size_t size) {
252 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
253 PAGE_READWRITE);
254 return (ptr != 0)? ptr: MFAIL;
255}
256
257/* This function supports releasing coalesed segments */
258static FORCEINLINE int win32munmap(void* ptr, size_t size) {
259 MEMORY_BASIC_INFORMATION minfo;
260 char* cptr = (char*)ptr;
261 while (size) {
262 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
263 return -1;
264 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
265 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
266 return -1;
267 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
268 return -1;
269 cptr += minfo.RegionSize;
270 size -= minfo.RegionSize;
271 }
272 return 0;
273}
274
275#define MMAP_DEFAULT(s) win32mmap(s)
276#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
277#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
278#endif /* WIN32 */
279#endif /* HAVE_MMAP */
280
281#if HAVE_MREMAP
282#ifndef WIN32
283#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
284#endif /* WIN32 */
285#endif /* HAVE_MREMAP */
286
287/**
288 * Define CALL_MORECORE
289 */
290#if HAVE_MORECORE
291 #ifdef MORECORE
292 #define CALL_MORECORE(S) MORECORE(S)
293 #else /* MORECORE */
294 #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
295 #endif /* MORECORE */
296#else /* HAVE_MORECORE */
297 #define CALL_MORECORE(S) MFAIL
298#endif /* HAVE_MORECORE */
299
300/**
301 * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
302 */
303#if HAVE_MMAP
304 #define USE_MMAP_BIT (SIZE_T_ONE)
305
306 #ifdef MMAP
307 #define CALL_MMAP(s) MMAP(s)
308 #else /* MMAP */
309 #define CALL_MMAP(s) MMAP_DEFAULT(s)
310 #endif /* MMAP */
311 #ifdef MUNMAP
312 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
313 #else /* MUNMAP */
314 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
315 #endif /* MUNMAP */
316 #ifdef DIRECT_MMAP
317 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
318 #else /* DIRECT_MMAP */
319 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
320 #endif /* DIRECT_MMAP */
321#else /* HAVE_MMAP */
322 #define USE_MMAP_BIT (SIZE_T_ZERO)
323
324 #define MMAP(s) MFAIL
325 #define MUNMAP(a, s) (-1)
326 #define DIRECT_MMAP(s) MFAIL
327 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
328 #define CALL_MMAP(s) MMAP(s)
329 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
330#endif /* HAVE_MMAP */
331
332/**
333 * Define CALL_MREMAP
334 */
335#if HAVE_MMAP && HAVE_MREMAP
336 #ifdef MREMAP
337 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
338 #else /* MREMAP */
339 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
340 #endif /* MREMAP */
341#else /* HAVE_MMAP && HAVE_MREMAP */
342 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
343#endif /* HAVE_MMAP && HAVE_MREMAP */
344
Paul Vinciguerraec11b132018-09-24 05:25:00 -0700345/* mstate bit set if contiguous morecore disabled or failed */
Dave Barach6a5adc32018-07-04 10:56:23 -0400346#define USE_NONCONTIGUOUS_BIT (4U)
347
348/* mstate bit set if no expansion allowed */
349#define USE_NOEXPAND_BIT (8U)
350
351/* trace allocations if set */
352#define USE_TRACE_BIT (16U)
353
354/* segment bit set in create_mspace_with_base */
355#define EXTERN_BIT (8U)
356
357
358/* --------------------------- Lock preliminaries ------------------------ */
359
360/*
361 When locks are defined, there is one global lock, plus
362 one per-mspace lock.
363
364 The global lock_ensures that mparams.magic and other unique
365 mparams values are initialized only once. It also protects
366 sequences of calls to MORECORE. In many cases sys_alloc requires
367 two calls, that should not be interleaved with calls by other
368 threads. This does not protect against direct calls to MORECORE
369 by other threads not using this lock, so there is still code to
370 cope the best we can on interference.
371
372 Per-mspace locks surround calls to malloc, free, etc.
373 By default, locks are simple non-reentrant mutexes.
374
375 Because lock-protected regions generally have bounded times, it is
376 OK to use the supplied simple spinlocks. Spinlocks are likely to
377 improve performance for lightly contended applications, but worsen
378 performance under heavy contention.
379
380 If USE_LOCKS is > 1, the definitions of lock routines here are
381 bypassed, in which case you will need to define the type MLOCK_T,
382 and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
383 and TRY_LOCK. You must also declare a
384 static MLOCK_T malloc_global_mutex = { initialization values };.
385
386*/
387
388#if !USE_LOCKS
389#define USE_LOCK_BIT (0U)
390#define INITIAL_LOCK(l) (0)
391#define DESTROY_LOCK(l) (0)
392#define ACQUIRE_MALLOC_GLOBAL_LOCK()
393#define RELEASE_MALLOC_GLOBAL_LOCK()
394
395#else
396#if USE_LOCKS > 1
397/* ----------------------- User-defined locks ------------------------ */
398/* Define your own lock implementation here */
399/* #define INITIAL_LOCK(lk) ... */
400/* #define DESTROY_LOCK(lk) ... */
401/* #define ACQUIRE_LOCK(lk) ... */
402/* #define RELEASE_LOCK(lk) ... */
403/* #define TRY_LOCK(lk) ... */
404/* static MLOCK_T malloc_global_mutex = ... */
405
406#elif USE_SPIN_LOCKS
407
408/* First, define CAS_LOCK and CLEAR_LOCK on ints */
409/* Note CAS_LOCK defined to return 0 on success */
410
411#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
412#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
413#define CLEAR_LOCK(sl) __sync_lock_release(sl)
414
415#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
416/* Custom spin locks for older gcc on x86 */
417static FORCEINLINE int x86_cas_lock(int *sl) {
418 int ret;
419 int val = 1;
420 int cmp = 0;
421 __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
422 : "=a" (ret)
423 : "r" (val), "m" (*(sl)), "0"(cmp)
424 : "memory", "cc");
425 return ret;
426}
427
428static FORCEINLINE void x86_clear_lock(int* sl) {
429 assert(*sl != 0);
430 int prev = 0;
431 int ret;
432 __asm__ __volatile__ ("lock; xchgl %0, %1"
433 : "=r" (ret)
434 : "m" (*(sl)), "0"(prev)
435 : "memory");
436}
437
438#define CAS_LOCK(sl) x86_cas_lock(sl)
439#define CLEAR_LOCK(sl) x86_clear_lock(sl)
440
441#else /* Win32 MSC */
442#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
443#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
444
445#endif /* ... gcc spins locks ... */
446
447/* How to yield for a spin lock */
448#define SPINS_PER_YIELD 63
449#if defined(_MSC_VER)
450#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */
451#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
452#elif defined (__SVR4) && defined (__sun) /* solaris */
453#define SPIN_LOCK_YIELD thr_yield();
454#elif !defined(LACKS_SCHED_H)
455#define SPIN_LOCK_YIELD sched_yield();
456#else
457#define SPIN_LOCK_YIELD
458#endif /* ... yield ... */
459
460#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
461/* Plain spin locks use single word (embedded in malloc_states) */
Damjan Marion79934e82022-04-05 12:40:31 +0200462__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -0400463static int spin_acquire_lock(int *sl) {
464 int spins = 0;
465 while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
466 if ((++spins & SPINS_PER_YIELD) == 0) {
467 SPIN_LOCK_YIELD;
468 }
469 }
470 return 0;
471}
472
473#define MLOCK_T int
474#define TRY_LOCK(sl) !CAS_LOCK(sl)
475#define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
476#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
477#define INITIAL_LOCK(sl) (*sl = 0)
478#define DESTROY_LOCK(sl) (0)
479static MLOCK_T malloc_global_mutex = 0;
480
481#else /* USE_RECURSIVE_LOCKS */
482/* types for lock owners */
483#ifdef WIN32
484#define THREAD_ID_T DWORD
485#define CURRENT_THREAD GetCurrentThreadId()
486#define EQ_OWNER(X,Y) ((X) == (Y))
487#else
488/*
489 Note: the following assume that pthread_t is a type that can be
490 initialized to (casted) zero. If this is not the case, you will need to
491 somehow redefine these or not use spin locks.
492*/
493#define THREAD_ID_T pthread_t
494#define CURRENT_THREAD pthread_self()
495#define EQ_OWNER(X,Y) pthread_equal(X, Y)
496#endif
497
498struct malloc_recursive_lock {
499 int sl;
500 unsigned int c;
501 THREAD_ID_T threadid;
502};
503
504#define MLOCK_T struct malloc_recursive_lock
505static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
506
507static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
508 assert(lk->sl != 0);
509 if (--lk->c == 0) {
510 CLEAR_LOCK(&lk->sl);
511 }
512}
513
514static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
515 THREAD_ID_T mythreadid = CURRENT_THREAD;
516 int spins = 0;
517 for (;;) {
518 if (*((volatile int *)(&lk->sl)) == 0) {
519 if (!CAS_LOCK(&lk->sl)) {
520 lk->threadid = mythreadid;
521 lk->c = 1;
522 return 0;
523 }
524 }
525 else if (EQ_OWNER(lk->threadid, mythreadid)) {
526 ++lk->c;
527 return 0;
528 }
529 if ((++spins & SPINS_PER_YIELD) == 0) {
530 SPIN_LOCK_YIELD;
531 }
532 }
533}
534
535static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
536 THREAD_ID_T mythreadid = CURRENT_THREAD;
537 if (*((volatile int *)(&lk->sl)) == 0) {
538 if (!CAS_LOCK(&lk->sl)) {
539 lk->threadid = mythreadid;
540 lk->c = 1;
541 return 1;
542 }
543 }
544 else if (EQ_OWNER(lk->threadid, mythreadid)) {
545 ++lk->c;
546 return 1;
547 }
548 return 0;
549}
550
551#define RELEASE_LOCK(lk) recursive_release_lock(lk)
552#define TRY_LOCK(lk) recursive_try_lock(lk)
553#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
554#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
555#define DESTROY_LOCK(lk) (0)
556#endif /* USE_RECURSIVE_LOCKS */
557
558#elif defined(WIN32) /* Win32 critical sections */
559#define MLOCK_T CRITICAL_SECTION
560#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
561#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
562#define TRY_LOCK(lk) TryEnterCriticalSection(lk)
563#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
564#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
565#define NEED_GLOBAL_LOCK_INIT
566
567static MLOCK_T malloc_global_mutex;
568static volatile LONG malloc_global_mutex_status;
569
570/* Use spin loop to initialize global lock */
571static void init_malloc_global_mutex() {
572 for (;;) {
573 long stat = malloc_global_mutex_status;
574 if (stat > 0)
575 return;
576 /* transition to < 0 while initializing, then to > 0) */
577 if (stat == 0 &&
578 interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
579 InitializeCriticalSection(&malloc_global_mutex);
580 interlockedexchange(&malloc_global_mutex_status, (LONG)1);
581 return;
582 }
583 SleepEx(0, FALSE);
584 }
585}
586
587#else /* pthreads-based locks */
588#define MLOCK_T pthread_mutex_t
589#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
590#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
591#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
592#define INITIAL_LOCK(lk) pthread_init_lock(lk)
593#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
594
595#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
596/* Cope with old-style linux recursive lock initialization by adding */
597/* skipped internal declaration from pthread.h */
598extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
599 int __kind));
600#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
601#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
602#endif /* USE_RECURSIVE_LOCKS ... */
603
604static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
605
606static int pthread_init_lock (MLOCK_T *lk) {
607 pthread_mutexattr_t attr;
608 if (pthread_mutexattr_init(&attr)) return 1;
609#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
610 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
611#endif
612 if (pthread_mutex_init(lk, &attr)) return 1;
613 if (pthread_mutexattr_destroy(&attr)) return 1;
614 return 0;
615}
616
617#endif /* ... lock types ... */
618
619/* Common code for all lock types */
620#define USE_LOCK_BIT (2U)
621
622#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
623#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
624#endif
625
626#ifndef RELEASE_MALLOC_GLOBAL_LOCK
627#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
628#endif
629
630#endif /* USE_LOCKS */
631
632/* ----------------------- Chunk representations ------------------------ */
633
634/*
635 (The following includes lightly edited explanations by Colin Plumb.)
636
637 The malloc_chunk declaration below is misleading (but accurate and
638 necessary). It declares a "view" into memory allowing access to
639 necessary fields at known offsets from a given base.
640
641 Chunks of memory are maintained using a `boundary tag' method as
642 originally described by Knuth. (See the paper by Paul Wilson
643 ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
644 techniques.) Sizes of free chunks are stored both in the front of
645 each chunk and at the end. This makes consolidating fragmented
646 chunks into bigger chunks fast. The head fields also hold bits
647 representing whether chunks are free or in use.
648
649 Here are some pictures to make it clearer. They are "exploded" to
650 show that the state of a chunk can be thought of as extending from
651 the high 31 bits of the head field of its header through the
652 prev_foot and PINUSE_BIT bit of the following chunk header.
653
654 A chunk that's in use looks like:
655
656 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
657 | Size of previous chunk (if P = 0) |
658 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
659 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
660 | Size of this chunk 1| +-+
661 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
662 | |
663 +- -+
664 | |
665 +- -+
666 | :
667 +- size - sizeof(size_t) available payload bytes -+
668 : |
669 chunk-> +- -+
670 | |
671 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
672 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
673 | Size of next chunk (may or may not be in use) | +-+
674 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
675
676 And if it's free, it looks like this:
677
678 chunk-> +- -+
679 | User payload (must be in use, or we would have merged!) |
680 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
681 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
682 | Size of this chunk 0| +-+
683 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
684 | Next pointer |
685 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
686 | Prev pointer |
687 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
688 | :
689 +- size - sizeof(struct chunk) unused bytes -+
690 : |
691 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
692 | Size of this chunk |
693 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
694 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
695 | Size of next chunk (must be in use, or we would have merged)| +-+
696 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
697 | :
698 +- User payload -+
699 : |
700 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
701 |0|
702 +-+
703 Note that since we always merge adjacent free chunks, the chunks
704 adjacent to a free chunk must be in use.
705
706 Given a pointer to a chunk (which can be derived trivially from the
707 payload pointer) we can, in O(1) time, find out whether the adjacent
708 chunks are free, and if so, unlink them from the lists that they
709 are on and merge them with the current chunk.
710
711 Chunks always begin on even word boundaries, so the mem portion
712 (which is returned to the user) is also on an even word boundary, and
713 thus at least double-word aligned.
714
715 The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
716 chunk size (which is always a multiple of two words), is an in-use
717 bit for the *previous* chunk. If that bit is *clear*, then the
718 word before the current chunk size contains the previous chunk
719 size, and can be used to find the front of the previous chunk.
720 The very first chunk allocated always has this bit set, preventing
721 access to non-existent (or non-owned) memory. If pinuse is set for
722 any given chunk, then you CANNOT determine the size of the
723 previous chunk, and might even get a memory addressing fault when
724 trying to do so.
725
726 The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
727 the chunk size redundantly records whether the current chunk is
728 inuse (unless the chunk is mmapped). This redundancy enables usage
729 checks within free and realloc, and reduces indirection when freeing
730 and consolidating chunks.
731
732 Each freshly allocated chunk must have both cinuse and pinuse set.
733 That is, each allocated chunk borders either a previously allocated
734 and still in-use chunk, or the base of its memory arena. This is
735 ensured by making all allocations from the `lowest' part of any
736 found chunk. Further, no free chunk physically borders another one,
737 so each free chunk is known to be preceded and followed by either
738 inuse chunks or the ends of memory.
739
740 Note that the `foot' of the current chunk is actually represented
741 as the prev_foot of the NEXT chunk. This makes it easier to
742 deal with alignments etc but can be very confusing when trying
743 to extend or adapt this code.
744
745 The exceptions to all this are
746
747 1. The special chunk `top' is the top-most available chunk (i.e.,
748 the one bordering the end of available memory). It is treated
749 specially. Top is never included in any bin, is used only if
750 no other chunk is available, and is released back to the
751 system if it is very large (see M_TRIM_THRESHOLD). In effect,
752 the top chunk is treated as larger (and thus less well
753 fitting) than any other available chunk. The top chunk
754 doesn't update its trailing size field since there is no next
755 contiguous chunk that would have to index off it. However,
756 space is still allocated for it (TOP_FOOT_SIZE) to enable
757 separation or merging when space is extended.
758
759 3. Chunks allocated via mmap, have both cinuse and pinuse bits
760 cleared in their head fields. Because they are allocated
761 one-by-one, each must carry its own prev_foot field, which is
762 also used to hold the offset this chunk has within its mmapped
763 region, which is needed to preserve alignment. Each mmapped
764 chunk is trailed by the first two fields of a fake next-chunk
765 for sake of usage checks.
766
767*/
768
769struct malloc_chunk {
770 size_t prev_foot; /* Size of previous chunk (if free). */
771 size_t head; /* Size and inuse bits. */
772 struct malloc_chunk* fd; /* double links -- used only if free. */
773 struct malloc_chunk* bk;
774};
775
776typedef struct malloc_chunk mchunk;
777typedef struct malloc_chunk* mchunkptr;
778typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
779typedef unsigned int bindex_t; /* Described below */
780typedef unsigned int binmap_t; /* Described below */
781typedef unsigned int flag_t; /* The type of various bit flag sets */
782
783/* ------------------- Chunks sizes and alignments ----------------------- */
784
785#define MCHUNK_SIZE (sizeof(mchunk))
786
787#if FOOTERS
788#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
789#else /* FOOTERS */
790#define CHUNK_OVERHEAD (SIZE_T_SIZE)
791#endif /* FOOTERS */
792
793/* MMapped chunks need a second word of overhead ... */
794#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
795/* ... and additional padding for fake next-chunk at foot */
796#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
797
798/* The smallest size we can malloc is an aligned minimal chunk */
799#define MIN_CHUNK_SIZE\
800 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
801
802/* conversion from malloc headers to user pointers, and back */
803#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
804#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
805/* chunk associated with aligned address A */
806#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
807
808/* Bounds on request (not chunk) sizes. */
809#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
810#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
811
812/* pad request bytes into a usable size */
813#define pad_request(req) \
814 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
815
816/* pad request, checking for minimum (but not maximum) */
817#define request2size(req) \
818 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
819
820
821/* ------------------ Operations on head and foot fields ----------------- */
822
823/*
824 The head field of a chunk is or'ed with PINUSE_BIT when previous
825 adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
826 use, unless mmapped, in which case both bits are cleared.
827
828 FLAG4_BIT is not used by this malloc, but might be useful in extensions.
829*/
830
831#define PINUSE_BIT (SIZE_T_ONE)
832#define CINUSE_BIT (SIZE_T_TWO)
833#define FLAG4_BIT (SIZE_T_FOUR)
834#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
835#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
836
837/* Head value for fenceposts */
838#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
839
840/* extraction of fields from head words */
841#define cinuse(p) ((p)->head & CINUSE_BIT)
842#define pinuse(p) ((p)->head & PINUSE_BIT)
843#define flag4inuse(p) ((p)->head & FLAG4_BIT)
844#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
845#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
846
847#define chunksize(p) ((p)->head & ~(FLAG_BITS))
848
849#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
850#define set_flag4(p) ((p)->head |= FLAG4_BIT)
851#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
852
853/* Treat space at ptr +/- offset as a chunk */
854#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
855#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
856
857/* Ptr to next or previous physical malloc_chunk. */
858#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
859#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
860
861/* extract next chunk's pinuse bit */
862#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
863
864/* Get/set size at footer */
865#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
866#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
867
868/* Set size, pinuse bit, and foot */
869#define set_size_and_pinuse_of_free_chunk(p, s)\
870 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
871
872/* Set size, pinuse bit, foot, and clear next pinuse */
873#define set_free_with_pinuse(p, s, n)\
874 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
875
876/* Get the internal overhead associated with chunk p */
877#define overhead_for(p)\
878 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
879
880/* Return true if malloced space is not necessarily cleared */
881#if MMAP_CLEARS
882#define calloc_must_clear(p) (!is_mmapped(p))
883#else /* MMAP_CLEARS */
884#define calloc_must_clear(p) (1)
885#endif /* MMAP_CLEARS */
886
887/* ---------------------- Overlaid data structures ----------------------- */
888
889/*
890 When chunks are not in use, they are treated as nodes of either
891 lists or trees.
892
893 "Small" chunks are stored in circular doubly-linked lists, and look
894 like this:
895
896 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
897 | Size of previous chunk |
898 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
899 `head:' | Size of chunk, in bytes |P|
900 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
901 | Forward pointer to next chunk in list |
902 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
903 | Back pointer to previous chunk in list |
904 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
905 | Unused space (may be 0 bytes long) .
906 . .
907 . |
908nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
909 `foot:' | Size of chunk, in bytes |
910 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
911
912 Larger chunks are kept in a form of bitwise digital trees (aka
913 tries) keyed on chunksizes. Because malloc_tree_chunks are only for
914 free chunks greater than 256 bytes, their size doesn't impose any
915 constraints on user chunk sizes. Each node looks like:
916
917 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
918 | Size of previous chunk |
919 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
920 `head:' | Size of chunk, in bytes |P|
921 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
922 | Forward pointer to next chunk of same size |
923 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
924 | Back pointer to previous chunk of same size |
925 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
926 | Pointer to left child (child[0]) |
927 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
928 | Pointer to right child (child[1]) |
929 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
930 | Pointer to parent |
931 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
932 | bin index of this chunk |
933 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
934 | Unused space .
935 . |
936nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
937 `foot:' | Size of chunk, in bytes |
938 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
939
940 Each tree holding treenodes is a tree of unique chunk sizes. Chunks
941 of the same size are arranged in a circularly-linked list, with only
942 the oldest chunk (the next to be used, in our FIFO ordering)
943 actually in the tree. (Tree members are distinguished by a non-null
944 parent pointer.) If a chunk with the same size an an existing node
945 is inserted, it is linked off the existing node using pointers that
946 work in the same way as fd/bk pointers of small chunks.
947
948 Each tree contains a power of 2 sized range of chunk sizes (the
949 smallest is 0x100 <= x < 0x180), which is is divided in half at each
950 tree level, with the chunks in the smaller half of the range (0x100
951 <= x < 0x140 for the top nose) in the left subtree and the larger
952 half (0x140 <= x < 0x180) in the right subtree. This is, of course,
953 done by inspecting individual bits.
954
955 Using these rules, each node's left subtree contains all smaller
956 sizes than its right subtree. However, the node at the root of each
957 subtree has no particular ordering relationship to either. (The
958 dividing line between the subtree sizes is based on trie relation.)
959 If we remove the last chunk of a given size from the interior of the
960 tree, we need to replace it with a leaf node. The tree ordering
961 rules permit a node to be replaced by any leaf below it.
962
963 The smallest chunk in a tree (a common operation in a best-fit
964 allocator) can be found by walking a path to the leftmost leaf in
965 the tree. Unlike a usual binary tree, where we follow left child
966 pointers until we reach a null, here we follow the right child
967 pointer any time the left one is null, until we reach a leaf with
968 both child pointers null. The smallest chunk in the tree will be
969 somewhere along that path.
970
971 The worst case number of steps to add, find, or remove a node is
972 bounded by the number of bits differentiating chunks within
973 bins. Under current bin calculations, this ranges from 6 up to 21
974 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
975 is of course much better.
976*/
977
978struct malloc_tree_chunk {
979 /* The first four fields must be compatible with malloc_chunk */
980 size_t prev_foot;
981 size_t head;
982 struct malloc_tree_chunk* fd;
983 struct malloc_tree_chunk* bk;
984
985 struct malloc_tree_chunk* child[2];
986 struct malloc_tree_chunk* parent;
987 bindex_t index;
988};
989
990typedef struct malloc_tree_chunk tchunk;
991typedef struct malloc_tree_chunk* tchunkptr;
992typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
993
994/* A little helper macro for trees */
995#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
996
997/* ----------------------------- Segments -------------------------------- */
998
999/*
1000 Each malloc space may include non-contiguous segments, held in a
1001 list headed by an embedded malloc_segment record representing the
1002 top-most space. Segments also include flags holding properties of
1003 the space. Large chunks that are directly allocated by mmap are not
1004 included in this list. They are instead independently created and
1005 destroyed without otherwise keeping track of them.
1006
1007 Segment management mainly comes into play for spaces allocated by
1008 MMAP. Any call to MMAP might or might not return memory that is
1009 adjacent to an existing segment. MORECORE normally contiguously
1010 extends the current space, so this space is almost always adjacent,
1011 which is simpler and faster to deal with. (This is why MORECORE is
1012 used preferentially to MMAP when both are available -- see
1013 sys_alloc.) When allocating using MMAP, we don't use any of the
1014 hinting mechanisms (inconsistently) supported in various
1015 implementations of unix mmap, or distinguish reserving from
1016 committing memory. Instead, we just ask for space, and exploit
1017 contiguity when we get it. It is probably possible to do
1018 better than this on some systems, but no general scheme seems
1019 to be significantly better.
1020
1021 Management entails a simpler variant of the consolidation scheme
1022 used for chunks to reduce fragmentation -- new adjacent memory is
1023 normally prepended or appended to an existing segment. However,
1024 there are limitations compared to chunk consolidation that mostly
1025 reflect the fact that segment processing is relatively infrequent
1026 (occurring only when getting memory from system) and that we
1027 don't expect to have huge numbers of segments:
1028
1029 * Segments are not indexed, so traversal requires linear scans. (It
1030 would be possible to index these, but is not worth the extra
1031 overhead and complexity for most programs on most platforms.)
1032 * New segments are only appended to old ones when holding top-most
1033 memory; if they cannot be prepended to others, they are held in
1034 different segments.
1035
1036 Except for the top-most segment of an mstate, each segment record
1037 is kept at the tail of its segment. Segments are added by pushing
1038 segment records onto the list headed by &mstate.seg for the
1039 containing mstate.
1040
1041 Segment flags control allocation/merge/deallocation policies:
1042 * If EXTERN_BIT set, then we did not allocate this segment,
1043 and so should not try to deallocate or merge with others.
1044 (This currently holds only for the initial segment passed
1045 into create_mspace_with_base.)
1046 * If USE_MMAP_BIT set, the segment may be merged with
1047 other surrounding mmapped segments and trimmed/de-allocated
1048 using munmap.
1049 * If neither bit is set, then the segment was obtained using
1050 MORECORE so can be merged with surrounding MORECORE'd segments
1051 and deallocated/trimmed using MORECORE with negative arguments.
1052*/
1053
1054struct malloc_segment {
1055 char* base; /* base address */
1056 size_t size; /* allocated size */
1057 struct malloc_segment* next; /* ptr to next segment */
1058 flag_t sflags; /* mmap and extern flag */
1059};
1060
1061#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
1062#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1063
1064typedef struct malloc_segment msegment;
1065typedef struct malloc_segment* msegmentptr;
1066
1067/* ---------------------------- malloc_state ----------------------------- */
1068
1069/*
1070 A malloc_state holds all of the bookkeeping for a space.
1071 The main fields are:
1072
1073 Top
1074 The topmost chunk of the currently active segment. Its size is
1075 cached in topsize. The actual size of topmost space is
1076 topsize+TOP_FOOT_SIZE, which includes space reserved for adding
1077 fenceposts and segment records if necessary when getting more
1078 space from the system. The size at which to autotrim top is
1079 cached from mparams in trim_check, except that it is disabled if
1080 an autotrim fails.
1081
1082 Designated victim (dv)
1083 This is the preferred chunk for servicing small requests that
1084 don't have exact fits. It is normally the chunk split off most
1085 recently to service another small request. Its size is cached in
1086 dvsize. The link fields of this chunk are not maintained since it
1087 is not kept in a bin.
1088
1089 SmallBins
1090 An array of bin headers for free chunks. These bins hold chunks
1091 with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
1092 chunks of all the same size, spaced 8 bytes apart. To simplify
1093 use in double-linked lists, each bin header acts as a malloc_chunk
1094 pointing to the real first node, if it exists (else pointing to
1095 itself). This avoids special-casing for headers. But to avoid
1096 waste, we allocate only the fd/bk pointers of bins, and then use
1097 repositioning tricks to treat these as the fields of a chunk.
1098
1099 TreeBins
1100 Treebins are pointers to the roots of trees holding a range of
1101 sizes. There are 2 equally spaced treebins for each power of two
1102 from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
1103 larger.
1104
1105 Bin maps
1106 There is one bit map for small bins ("smallmap") and one for
1107 treebins ("treemap). Each bin sets its bit when non-empty, and
1108 clears the bit when empty. Bit operations are then used to avoid
1109 bin-by-bin searching -- nearly all "search" is done without ever
1110 looking at bins that won't be selected. The bit maps
1111 conservatively use 32 bits per map word, even if on 64bit system.
1112 For a good description of some of the bit-based techniques used
1113 here, see Henry S. Warren Jr's book "Hacker's Delight" (and
1114 supplement at http://hackersdelight.org/). Many of these are
1115 intended to reduce the branchiness of paths through malloc etc, as
1116 well as to reduce the number of memory locations read or written.
1117
1118 Segments
1119 A list of segments headed by an embedded malloc_segment record
1120 representing the initial space.
1121
1122 Address check support
1123 The least_addr field is the least address ever obtained from
1124 MORECORE or MMAP. Attempted frees and reallocs of any address less
1125 than this are trapped (unless INSECURE is defined).
1126
1127 Magic tag
1128 A cross-check field that should always hold same value as mparams.magic.
1129
1130 Max allowed footprint
1131 The maximum allowed bytes to allocate from system (zero means no limit)
1132
1133 Flags
1134 Bits recording whether to use MMAP, locks, or contiguous MORECORE
1135
1136 Statistics
1137 Each space keeps track of current and maximum system memory
1138 obtained via MORECORE or MMAP.
1139
1140 Trim support
1141 Fields holding the amount of unused topmost memory that should trigger
1142 trimming, and a counter to force periodic scanning to release unused
1143 non-topmost segments.
1144
1145 Locking
1146 If USE_LOCKS is defined, the "mutex" lock is acquired and released
1147 around every public call using this mspace.
1148
1149 Extension support
1150 A void* pointer and a size_t field that can be used to help implement
1151 extensions to this malloc.
1152*/
1153
1154/* Bin types, widths and sizes */
1155#define NSMALLBINS (32U)
1156#define NTREEBINS (32U)
1157#define SMALLBIN_SHIFT (3U)
1158#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
1159#define TREEBIN_SHIFT (8U)
1160#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
1161#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
1162#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
1163
1164struct malloc_state {
1165 binmap_t smallmap;
1166 binmap_t treemap;
1167 size_t dvsize;
1168 size_t topsize;
1169 char* least_addr;
1170 mchunkptr dv;
1171 mchunkptr top;
1172 size_t trim_check;
1173 size_t release_checks;
1174 size_t magic;
1175 mchunkptr smallbins[(NSMALLBINS+1)*2];
1176 tbinptr treebins[NTREEBINS];
1177 size_t footprint;
1178 size_t max_footprint;
1179 size_t footprint_limit; /* zero means no limit */
1180 flag_t mflags;
1181#if USE_LOCKS
1182 MLOCK_T mutex; /* locate lock among fields that rarely change */
1183#endif /* USE_LOCKS */
1184 msegment seg;
1185 void* extp; /* Unused but available for extensions */
1186 size_t exts;
1187};
1188
1189typedef struct malloc_state* mstate;
1190
1191/* ------------- Global malloc_state and malloc_params ------------------- */
1192
1193/*
1194 malloc_params holds global properties, including those that can be
1195 dynamically set using mallopt. There is a single instance, mparams,
1196 initialized in init_mparams. Note that the non-zeroness of "magic"
1197 also serves as an initialization flag.
1198*/
1199
1200struct malloc_params {
1201 size_t magic;
1202 size_t page_size;
1203 size_t granularity;
1204 size_t mmap_threshold;
1205 size_t trim_threshold;
1206 flag_t default_mflags;
1207};
1208
1209static struct malloc_params mparams;
1210
1211/* Ensure mparams initialized */
1212#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
1213
1214#if !ONLY_MSPACES
1215
1216/* The global malloc_state used for all non-"mspace" calls */
1217static struct malloc_state _gm_;
1218#define gm (&_gm_)
1219#define is_global(M) ((M) == &_gm_)
1220
1221#endif /* !ONLY_MSPACES */
1222
1223#define is_initialized(M) ((M)->top != 0)
1224
1225/* -------------------------- system alloc setup ------------------------- */
1226
1227/* Operations on mflags */
1228
1229#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
1230#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
1231#if USE_LOCKS
1232#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
1233#else
1234#define disable_lock(M)
1235#endif
1236
1237#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
1238#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
1239#if HAVE_MMAP
1240#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
1241#else
1242#define disable_mmap(M)
1243#endif
1244
1245#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
1246#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
1247#define use_noexpand(M) ((M)->mflags & USE_NOEXPAND_BIT)
1248#define disable_expand(M) ((M)->mflags |= USE_NOEXPAND_BIT)
1249#define use_trace(M) ((M)->mflags & USE_TRACE_BIT)
1250#define enable_trace(M) ((M)->mflags |= USE_TRACE_BIT)
Dave Barach6e6968f2020-03-21 11:15:48 -04001251#define disable_trace(M) ((M)->mflags &= ~USE_TRACE_BIT)
Dave Barach6a5adc32018-07-04 10:56:23 -04001252
1253#define set_lock(M,L)\
1254 ((M)->mflags = (L)?\
1255 ((M)->mflags | USE_LOCK_BIT) :\
1256 ((M)->mflags & ~USE_LOCK_BIT))
1257
1258/* page-align a size */
1259#define page_align(S)\
1260 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
1261
1262/* granularity-align a size */
1263#define granularity_align(S)\
1264 (((S) + (mparams.granularity - SIZE_T_ONE))\
1265 & ~(mparams.granularity - SIZE_T_ONE))
1266
1267
1268/* For mmap, use granularity alignment on windows, else page-align */
1269#ifdef WIN32
1270#define mmap_align(S) granularity_align(S)
1271#else
1272#define mmap_align(S) page_align(S)
1273#endif
1274
1275/* For sys_alloc, enough padding to ensure can malloc request on success */
1276#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
1277
1278#define is_page_aligned(S)\
1279 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
1280#define is_granularity_aligned(S)\
1281 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
1282
1283/* True if segment S holds address A */
1284#define segment_holds(S, A)\
1285 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
1286
1287/* Return segment holding given address */
Damjan Marion79934e82022-04-05 12:40:31 +02001288__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04001289static msegmentptr segment_holding(mstate m, char* addr) {
1290 msegmentptr sp = &m->seg;
1291 for (;;) {
1292 if (addr >= sp->base && addr < sp->base + sp->size)
1293 return sp;
1294 if ((sp = sp->next) == 0)
1295 return 0;
1296 }
1297}
1298
1299/* Return true if segment contains a segment link */
Damjan Marion79934e82022-04-05 12:40:31 +02001300__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04001301static int has_segment_link(mstate m, msegmentptr ss) {
1302 msegmentptr sp = &m->seg;
1303 for (;;) {
1304 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
1305 return 1;
1306 if ((sp = sp->next) == 0)
1307 return 0;
1308 }
1309}
1310
1311#ifndef MORECORE_CANNOT_TRIM
1312#define should_trim(M,s) ((s) > (M)->trim_check)
1313#else /* MORECORE_CANNOT_TRIM */
1314#define should_trim(M,s) (0)
1315#endif /* MORECORE_CANNOT_TRIM */
1316
1317/*
1318 TOP_FOOT_SIZE is padding at the end of a segment, including space
1319 that may be needed to place segment records and fenceposts when new
1320 noncontiguous segments are added.
1321*/
1322#define TOP_FOOT_SIZE\
1323 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
1324
1325
1326/* ------------------------------- Hooks -------------------------------- */
1327
1328/*
1329 PREACTION should be defined to return 0 on success, and nonzero on
1330 failure. If you are not using locking, you can redefine these to do
1331 anything you like.
1332*/
1333
1334#if USE_LOCKS
1335#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
1336#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
1337#else /* USE_LOCKS */
1338
1339#ifndef PREACTION
1340#define PREACTION(M) (0)
1341#endif /* PREACTION */
1342
1343#ifndef POSTACTION
1344#define POSTACTION(M)
1345#endif /* POSTACTION */
1346
1347#endif /* USE_LOCKS */
1348
1349/*
1350 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
1351 USAGE_ERROR_ACTION is triggered on detected bad frees and
1352 reallocs. The argument p is an address that might have triggered the
1353 fault. It is ignored by the two predefined actions, but might be
1354 useful in custom actions that try to help diagnose errors.
1355*/
1356
1357#if PROCEED_ON_ERROR
1358
1359/* A count of the number of corruption errors causing resets */
1360int malloc_corruption_error_count;
1361
1362/* default corruption action */
1363static void reset_on_error(mstate m);
1364
1365#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
1366#define USAGE_ERROR_ACTION(m, p)
1367
1368#else /* PROCEED_ON_ERROR */
1369
1370#ifndef CORRUPTION_ERROR_ACTION
1371#define CORRUPTION_ERROR_ACTION(m) DLM_ABORT
1372#endif /* CORRUPTION_ERROR_ACTION */
1373
1374#ifndef USAGE_ERROR_ACTION
1375#define USAGE_ERROR_ACTION(m,p) DLM_ABORT
1376#endif /* USAGE_ERROR_ACTION */
1377
1378#endif /* PROCEED_ON_ERROR */
1379
1380
1381/* -------------------------- Debugging setup ---------------------------- */
1382
1383#if ! DEBUG
1384
1385#define check_free_chunk(M,P)
1386#define check_inuse_chunk(M,P)
1387#define check_malloced_chunk(M,P,N)
1388#define check_mmapped_chunk(M,P)
1389#define check_malloc_state(M)
1390#define check_top_chunk(M,P)
1391
1392#else /* DEBUG */
1393#define check_free_chunk(M,P) do_check_free_chunk(M,P)
1394#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
1395#define check_top_chunk(M,P) do_check_top_chunk(M,P)
1396#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
1397#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
1398#define check_malloc_state(M) do_check_malloc_state(M)
1399
1400static void do_check_any_chunk(mstate m, mchunkptr p);
1401static void do_check_top_chunk(mstate m, mchunkptr p);
1402static void do_check_mmapped_chunk(mstate m, mchunkptr p);
1403static void do_check_inuse_chunk(mstate m, mchunkptr p);
1404static void do_check_free_chunk(mstate m, mchunkptr p);
1405static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
1406static void do_check_tree(mstate m, tchunkptr t);
1407static void do_check_treebin(mstate m, bindex_t i);
1408static void do_check_smallbin(mstate m, bindex_t i);
1409static void do_check_malloc_state(mstate m);
1410static int bin_find(mstate m, mchunkptr x);
1411static size_t traverse_and_check(mstate m);
1412#endif /* DEBUG */
1413
1414/* ---------------------------- Indexing Bins ---------------------------- */
1415
1416#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
1417#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
1418#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
1419#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
1420
1421/* addressing by index. See above about smallbin repositioning */
1422#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
1423#define treebin_at(M,i) (&((M)->treebins[i]))
1424
1425/* assign tree index for size S to variable I. Use x86 asm if possible */
1426#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1427#define compute_tree_index(S, I)\
1428{\
1429 unsigned int X = S >> TREEBIN_SHIFT;\
1430 if (X == 0)\
1431 I = 0;\
1432 else if (X > 0xFFFF)\
1433 I = NTREEBINS-1;\
1434 else {\
1435 unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
1436 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1437 }\
1438}
1439
1440#elif defined (__INTEL_COMPILER)
1441#define compute_tree_index(S, I)\
1442{\
1443 size_t X = S >> TREEBIN_SHIFT;\
1444 if (X == 0)\
1445 I = 0;\
1446 else if (X > 0xFFFF)\
1447 I = NTREEBINS-1;\
1448 else {\
1449 unsigned int K = _bit_scan_reverse (X); \
1450 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1451 }\
1452}
1453
1454#elif defined(_MSC_VER) && _MSC_VER>=1300
1455#define compute_tree_index(S, I)\
1456{\
1457 size_t X = S >> TREEBIN_SHIFT;\
1458 if (X == 0)\
1459 I = 0;\
1460 else if (X > 0xFFFF)\
1461 I = NTREEBINS-1;\
1462 else {\
1463 unsigned int K;\
1464 _BitScanReverse((DWORD *) &K, (DWORD) X);\
1465 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1466 }\
1467}
1468
1469#else /* GNUC */
1470#define compute_tree_index(S, I)\
1471{\
1472 size_t X = S >> TREEBIN_SHIFT;\
1473 if (X == 0)\
1474 I = 0;\
1475 else if (X > 0xFFFF)\
1476 I = NTREEBINS-1;\
1477 else {\
1478 unsigned int Y = (unsigned int)X;\
1479 unsigned int N = ((Y - 0x100) >> 16) & 8;\
1480 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
1481 N += K;\
1482 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
1483 K = 14 - N + ((Y <<= K) >> 15);\
1484 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
1485 }\
1486}
1487#endif /* GNUC */
1488
1489/* Bit representing maximum resolved size in a treebin at i */
1490#define bit_for_tree_index(i) \
1491 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
1492
1493/* Shift placing maximum resolved bit in a treebin at i as sign bit */
1494#define leftshift_for_tree_index(i) \
1495 ((i == NTREEBINS-1)? 0 : \
1496 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
1497
1498/* The size of the smallest chunk held in bin with index i */
1499#define minsize_for_tree_index(i) \
1500 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
1501 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
1502
1503
1504/* ------------------------ Operations on bin maps ----------------------- */
1505
1506/* bit corresponding to given index */
1507#define idx2bit(i) ((binmap_t)(1) << (i))
1508
1509/* Mark/Clear bits with given index */
1510#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
1511#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
1512#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
1513
1514#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
1515#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
1516#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
1517
1518/* isolate the least set bit of a bitmap */
1519#define least_bit(x) ((x) & -(x))
1520
1521/* mask with all bits to left of least bit of x on */
1522#define left_bits(x) ((x<<1) | -(x<<1))
1523
1524/* mask with all bits to left of or equal to least bit of x on */
1525#define same_or_left_bits(x) ((x) | -(x))
1526
1527/* index corresponding to given bit. Use x86 asm if possible */
1528
1529#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1530#define compute_bit2idx(X, I)\
1531{\
1532 unsigned int J;\
1533 J = __builtin_ctz(X); \
1534 I = (bindex_t)J;\
1535}
1536
1537#elif defined (__INTEL_COMPILER)
1538#define compute_bit2idx(X, I)\
1539{\
1540 unsigned int J;\
1541 J = _bit_scan_forward (X); \
1542 I = (bindex_t)J;\
1543}
1544
1545#elif defined(_MSC_VER) && _MSC_VER>=1300
1546#define compute_bit2idx(X, I)\
1547{\
1548 unsigned int J;\
1549 _BitScanForward((DWORD *) &J, X);\
1550 I = (bindex_t)J;\
1551}
1552
1553#elif USE_BUILTIN_FFS
1554#define compute_bit2idx(X, I) I = ffs(X)-1
1555
1556#else
1557#define compute_bit2idx(X, I)\
1558{\
1559 unsigned int Y = X - 1;\
1560 unsigned int K = Y >> (16-4) & 16;\
1561 unsigned int N = K; Y >>= K;\
1562 N += K = Y >> (8-3) & 8; Y >>= K;\
1563 N += K = Y >> (4-2) & 4; Y >>= K;\
1564 N += K = Y >> (2-1) & 2; Y >>= K;\
1565 N += K = Y >> (1-0) & 1; Y >>= K;\
1566 I = (bindex_t)(N + Y);\
1567}
1568#endif /* GNUC */
1569
1570
1571/* ----------------------- Runtime Check Support ------------------------- */
1572
1573/*
1574 For security, the main invariant is that malloc/free/etc never
1575 writes to a static address other than malloc_state, unless static
1576 malloc_state itself has been corrupted, which cannot occur via
1577 malloc (because of these checks). In essence this means that we
1578 believe all pointers, sizes, maps etc held in malloc_state, but
1579 check all of those linked or offsetted from other embedded data
1580 structures. These checks are interspersed with main code in a way
1581 that tends to minimize their run-time cost.
1582
1583 When FOOTERS is defined, in addition to range checking, we also
1584 verify footer fields of inuse chunks, which can be used guarantee
1585 that the mstate controlling malloc/free is intact. This is a
1586 streamlined version of the approach described by William Robertson
1587 et al in "Run-time Detection of Heap-based Overflows" LISA'03
1588 http://www.usenix.org/events/lisa03/tech/robertson.html The footer
1589 of an inuse chunk holds the xor of its mstate and a random seed,
1590 that is checked upon calls to free() and realloc(). This is
Paul Vinciguerraec11b132018-09-24 05:25:00 -07001591 (probabilistically) unguessable from outside the program, but can be
Dave Barach6a5adc32018-07-04 10:56:23 -04001592 computed by any code successfully malloc'ing any chunk, so does not
1593 itself provide protection against code that has already broken
1594 security through some other means. Unlike Robertson et al, we
1595 always dynamically check addresses of all offset chunks (previous,
1596 next, etc). This turns out to be cheaper than relying on hashes.
1597*/
1598
1599#if !INSECURE
1600/* Check if address a is at least as high as any from MORECORE or MMAP */
1601#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1602/* Check if address of next chunk n is higher than base chunk p */
1603#define ok_next(p, n) ((char*)(p) < (char*)(n))
1604/* Check if p has inuse status */
1605#define ok_inuse(p) is_inuse(p)
1606/* Check if p has its pinuse bit on */
1607#define ok_pinuse(p) pinuse(p)
1608
1609#else /* !INSECURE */
1610#define ok_address(M, a) (1)
1611#define ok_next(b, n) (1)
1612#define ok_inuse(p) (1)
1613#define ok_pinuse(p) (1)
1614#endif /* !INSECURE */
1615
1616#if (FOOTERS && !INSECURE)
1617/* Check if (alleged) mstate m has expected magic field */
Damjan Marion79934e82022-04-05 12:40:31 +02001618__clib_nosanitize_addr
Neale Rannsa88c9162018-08-06 08:16:29 -04001619static inline int
1620ok_magic (const mstate m)
1621{
1622 return (m->magic == mparams.magic);
1623}
Dave Barach6a5adc32018-07-04 10:56:23 -04001624#else /* (FOOTERS && !INSECURE) */
1625#define ok_magic(M) (1)
1626#endif /* (FOOTERS && !INSECURE) */
1627
1628/* In gcc, use __builtin_expect to minimize impact of checks */
1629#if !INSECURE
1630#if defined(__GNUC__) && __GNUC__ >= 3
1631#define RTCHECK(e) __builtin_expect(e, 1)
1632#else /* GNUC */
1633#define RTCHECK(e) (e)
1634#endif /* GNUC */
1635#else /* !INSECURE */
1636#define RTCHECK(e) (1)
1637#endif /* !INSECURE */
1638
1639/* macros to set up inuse chunks with or without footers */
1640
1641#if !FOOTERS
1642
1643#define mark_inuse_foot(M,p,s)
1644
1645/* Macros for setting head/foot of non-mmapped chunks */
1646
1647/* Set cinuse bit and pinuse bit of next chunk */
1648#define set_inuse(M,p,s)\
1649 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1650 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1651
1652/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
1653#define set_inuse_and_pinuse(M,p,s)\
1654 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1655 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1656
1657/* Set size, cinuse and pinuse bit of this chunk */
1658#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1659 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1660
1661#else /* FOOTERS */
1662
1663/* Set foot of inuse chunk to be xor of mstate and seed */
1664#define mark_inuse_foot(M,p,s)\
1665 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1666
1667#define get_mstate_for(p)\
1668 ((mstate)(((mchunkptr)((char*)(p) +\
1669 (chunksize(p))))->prev_foot ^ mparams.magic))
1670
1671#define set_inuse(M,p,s)\
1672 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1673 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1674 mark_inuse_foot(M,p,s))
1675
1676#define set_inuse_and_pinuse(M,p,s)\
1677 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1678 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1679 mark_inuse_foot(M,p,s))
1680
1681#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1682 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1683 mark_inuse_foot(M, p, s))
1684
1685#endif /* !FOOTERS */
1686
1687/* ---------------------------- setting mparams -------------------------- */
1688
1689#if LOCK_AT_FORK
1690static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
1691static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
1692static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }
1693#endif /* LOCK_AT_FORK */
1694
1695/* Initialize mparams */
1696static int init_mparams(void) {
1697#ifdef NEED_GLOBAL_LOCK_INIT
1698 if (malloc_global_mutex_status <= 0)
1699 init_malloc_global_mutex();
1700#endif
1701
1702 ACQUIRE_MALLOC_GLOBAL_LOCK();
1703 if (mparams.magic == 0) {
1704 size_t magic;
1705 size_t psize;
1706 size_t gsize;
1707
1708#ifndef WIN32
1709 psize = malloc_getpagesize;
1710 gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
1711#else /* WIN32 */
1712 {
1713 SYSTEM_INFO system_info;
1714 GetSystemInfo(&system_info);
1715 psize = system_info.dwPageSize;
1716 gsize = ((DEFAULT_GRANULARITY != 0)?
1717 DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
1718 }
1719#endif /* WIN32 */
1720
1721 /* Sanity-check configuration:
1722 size_t must be unsigned and as wide as pointer type.
1723 ints must be at least 4 bytes.
1724 alignment must be at least 8.
1725 Alignment, min chunk size, and page size must all be powers of 2.
1726 */
1727 if ((sizeof(size_t) != sizeof(char*)) ||
1728 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
1729 (sizeof(int) < 4) ||
1730 (MALLOC_ALIGNMENT < (size_t)8U) ||
1731 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
1732 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
1733 ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
1734 ((psize & (psize-SIZE_T_ONE)) != 0))
1735 DLM_ABORT;
1736 mparams.granularity = gsize;
1737 mparams.page_size = psize;
1738 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
1739 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
1740#if MORECORE_CONTIGUOUS
1741 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
1742#else /* MORECORE_CONTIGUOUS */
1743 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
1744#endif /* MORECORE_CONTIGUOUS */
1745
1746#if !ONLY_MSPACES
1747 /* Set up lock for main malloc area */
1748 gm->mflags = mparams.default_mflags;
1749 (void)INITIAL_LOCK(&gm->mutex);
1750#endif
1751#if LOCK_AT_FORK
1752 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
1753#endif
1754
1755 {
Florin Corasd3ca8ff2018-07-28 04:53:30 -07001756#ifndef DLM_MAGIC_CONSTANT
Dave Barach6a5adc32018-07-04 10:56:23 -04001757#if USE_DEV_RANDOM
1758 int fd;
1759 unsigned char buf[sizeof(size_t)];
1760 /* Try to use /dev/urandom, else fall back on using time */
1761 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
1762 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
1763 magic = *((size_t *) buf);
1764 close(fd);
1765 }
1766 else
1767#endif /* USE_DEV_RANDOM */
1768#ifdef WIN32
1769 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
1770#elif defined(LACKS_TIME_H)
1771 magic = (size_t)&magic ^ (size_t)0x55555555U;
1772#else
1773 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
1774#endif
1775 magic |= (size_t)8U; /* ensure nonzero */
1776 magic &= ~(size_t)7U; /* improve chances of fault for bad values */
Florin Corasd3ca8ff2018-07-28 04:53:30 -07001777#else
Florin Corasd0554752018-07-27 11:30:46 -07001778 magic = DLM_MAGIC_CONSTANT;
1779#endif
Dave Barach6a5adc32018-07-04 10:56:23 -04001780 /* Until memory modes commonly available, use volatile-write */
1781 (*(volatile size_t *)(&(mparams.magic))) = magic;
1782 }
1783 }
1784
1785 RELEASE_MALLOC_GLOBAL_LOCK();
1786 return 1;
1787}
1788
1789/* support for mallopt */
1790static int change_mparam(int param_number, int value) {
1791 size_t val;
1792 ensure_initialization();
1793 val = (value == -1)? MAX_SIZE_T : (size_t)value;
1794 switch(param_number) {
1795 case M_TRIM_THRESHOLD:
1796 mparams.trim_threshold = val;
1797 return 1;
1798 case M_GRANULARITY:
1799 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
1800 mparams.granularity = val;
1801 return 1;
1802 }
1803 else
1804 return 0;
1805 case M_MMAP_THRESHOLD:
1806 mparams.mmap_threshold = val;
1807 return 1;
1808 default:
1809 return 0;
1810 }
1811}
1812
1813#if DEBUG
1814/* ------------------------- Debugging Support --------------------------- */
1815
1816/* Check properties of any chunk, whether free, inuse, mmapped etc */
1817static void do_check_any_chunk(mstate m, mchunkptr p) {
1818 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1819 assert(ok_address(m, p));
1820}
1821
1822/* Check properties of top chunk */
1823static void do_check_top_chunk(mstate m, mchunkptr p) {
1824 msegmentptr sp = segment_holding(m, (char*)p);
1825 size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
1826 assert(sp != 0);
1827 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1828 assert(ok_address(m, p));
1829 assert(sz == m->topsize);
1830 assert(sz > 0);
1831 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
1832 assert(pinuse(p));
1833 assert(!pinuse(chunk_plus_offset(p, sz)));
1834}
1835
1836/* Check properties of (inuse) mmapped chunks */
1837static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
1838 size_t sz = chunksize(p);
1839 size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
1840 assert(is_mmapped(p));
1841 assert(use_mmap(m));
1842 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1843 assert(ok_address(m, p));
1844 assert(!is_small(sz));
1845 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
1846 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
1847 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
1848}
1849
1850/* Check properties of inuse chunks */
1851static void do_check_inuse_chunk(mstate m, mchunkptr p) {
1852 do_check_any_chunk(m, p);
1853 assert(is_inuse(p));
1854 assert(next_pinuse(p));
1855 /* If not pinuse and not mmapped, previous chunk has OK offset */
1856 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
1857 if (is_mmapped(p))
1858 do_check_mmapped_chunk(m, p);
1859}
1860
1861/* Check properties of free chunks */
1862static void do_check_free_chunk(mstate m, mchunkptr p) {
1863 size_t sz = chunksize(p);
1864 mchunkptr next = chunk_plus_offset(p, sz);
1865 do_check_any_chunk(m, p);
1866 assert(!is_inuse(p));
1867 assert(!next_pinuse(p));
1868 assert (!is_mmapped(p));
1869 if (p != m->dv && p != m->top) {
1870 if (sz >= MIN_CHUNK_SIZE) {
1871 assert((sz & CHUNK_ALIGN_MASK) == 0);
1872 assert(is_aligned(chunk2mem(p)));
1873 assert(next->prev_foot == sz);
1874 assert(pinuse(p));
1875 assert (next == m->top || is_inuse(next));
1876 assert(p->fd->bk == p);
1877 assert(p->bk->fd == p);
1878 }
1879 else /* markers are always of size SIZE_T_SIZE */
1880 assert(sz == SIZE_T_SIZE);
1881 }
1882}
1883
1884/* Check properties of malloced chunks at the point they are malloced */
1885static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
1886 if (mem != 0) {
1887 mchunkptr p = mem2chunk(mem);
1888 size_t sz = p->head & ~INUSE_BITS;
1889 do_check_inuse_chunk(m, p);
1890 assert((sz & CHUNK_ALIGN_MASK) == 0);
1891 assert(sz >= MIN_CHUNK_SIZE);
1892 assert(sz >= s);
1893 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
1894 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
1895 }
1896}
1897
1898/* Check a tree and its subtrees. */
1899static void do_check_tree(mstate m, tchunkptr t) {
1900 tchunkptr head = 0;
1901 tchunkptr u = t;
1902 bindex_t tindex = t->index;
1903 size_t tsize = chunksize(t);
1904 bindex_t idx;
1905 compute_tree_index(tsize, idx);
1906 assert(tindex == idx);
1907 assert(tsize >= MIN_LARGE_SIZE);
1908 assert(tsize >= minsize_for_tree_index(idx));
1909 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
1910
1911 do { /* traverse through chain of same-sized nodes */
1912 do_check_any_chunk(m, ((mchunkptr)u));
1913 assert(u->index == tindex);
1914 assert(chunksize(u) == tsize);
1915 assert(!is_inuse(u));
1916 assert(!next_pinuse(u));
1917 assert(u->fd->bk == u);
1918 assert(u->bk->fd == u);
1919 if (u->parent == 0) {
1920 assert(u->child[0] == 0);
1921 assert(u->child[1] == 0);
1922 }
1923 else {
1924 assert(head == 0); /* only one node on chain has parent */
1925 head = u;
1926 assert(u->parent != u);
1927 assert (u->parent->child[0] == u ||
1928 u->parent->child[1] == u ||
1929 *((tbinptr*)(u->parent)) == u);
1930 if (u->child[0] != 0) {
1931 assert(u->child[0]->parent == u);
1932 assert(u->child[0] != u);
1933 do_check_tree(m, u->child[0]);
1934 }
1935 if (u->child[1] != 0) {
1936 assert(u->child[1]->parent == u);
1937 assert(u->child[1] != u);
1938 do_check_tree(m, u->child[1]);
1939 }
1940 if (u->child[0] != 0 && u->child[1] != 0) {
1941 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
1942 }
1943 }
1944 u = u->fd;
1945 } while (u != t);
1946 assert(head != 0);
1947}
1948
1949/* Check all the chunks in a treebin. */
1950static void do_check_treebin(mstate m, bindex_t i) {
1951 tbinptr* tb = treebin_at(m, i);
1952 tchunkptr t = *tb;
1953 int empty = (m->treemap & (1U << i)) == 0;
1954 if (t == 0)
1955 assert(empty);
1956 if (!empty)
1957 do_check_tree(m, t);
1958}
1959
1960/* Check all the chunks in a smallbin. */
1961static void do_check_smallbin(mstate m, bindex_t i) {
1962 sbinptr b = smallbin_at(m, i);
1963 mchunkptr p = b->bk;
1964 unsigned int empty = (m->smallmap & (1U << i)) == 0;
1965 if (p == b)
1966 assert(empty);
1967 if (!empty) {
1968 for (; p != b; p = p->bk) {
1969 size_t size = chunksize(p);
1970 mchunkptr q;
1971 /* each chunk claims to be free */
1972 do_check_free_chunk(m, p);
1973 /* chunk belongs in bin */
1974 assert(small_index(size) == i);
1975 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
1976 /* chunk is followed by an inuse chunk */
1977 q = next_chunk(p);
1978 if (q->head != FENCEPOST_HEAD)
1979 do_check_inuse_chunk(m, q);
1980 }
1981 }
1982}
1983
1984/* Find x in a bin. Used in other check functions. */
1985static int bin_find(mstate m, mchunkptr x) {
1986 size_t size = chunksize(x);
1987 if (is_small(size)) {
1988 bindex_t sidx = small_index(size);
1989 sbinptr b = smallbin_at(m, sidx);
1990 if (smallmap_is_marked(m, sidx)) {
1991 mchunkptr p = b;
1992 do {
1993 if (p == x)
1994 return 1;
1995 } while ((p = p->fd) != b);
1996 }
1997 }
1998 else {
1999 bindex_t tidx;
2000 compute_tree_index(size, tidx);
2001 if (treemap_is_marked(m, tidx)) {
2002 tchunkptr t = *treebin_at(m, tidx);
2003 size_t sizebits = size << leftshift_for_tree_index(tidx);
2004 while (t != 0 && chunksize(t) != size) {
2005 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
2006 sizebits <<= 1;
2007 }
2008 if (t != 0) {
2009 tchunkptr u = t;
2010 do {
2011 if (u == (tchunkptr)x)
2012 return 1;
2013 } while ((u = u->fd) != t);
2014 }
2015 }
2016 }
2017 return 0;
2018}
2019
2020/* Traverse each chunk and check it; return total */
2021static size_t traverse_and_check(mstate m) {
2022 size_t sum = 0;
2023 if (is_initialized(m)) {
2024 msegmentptr s = &m->seg;
2025 sum += m->topsize + TOP_FOOT_SIZE;
2026 while (s != 0) {
2027 mchunkptr q = align_as_chunk(s->base);
2028 mchunkptr lastq = 0;
2029 assert(pinuse(q));
2030 while (segment_holds(s, q) &&
2031 q != m->top && q->head != FENCEPOST_HEAD) {
2032 sum += chunksize(q);
2033 if (is_inuse(q)) {
2034 assert(!bin_find(m, q));
2035 do_check_inuse_chunk(m, q);
2036 }
2037 else {
2038 assert(q == m->dv || bin_find(m, q));
2039 assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
2040 do_check_free_chunk(m, q);
2041 }
2042 lastq = q;
2043 q = next_chunk(q);
2044 }
2045 s = s->next;
2046 }
2047 }
2048 return sum;
2049}
2050
2051
2052/* Check all properties of malloc_state. */
2053static void do_check_malloc_state(mstate m) {
2054 bindex_t i;
2055 size_t total;
2056 /* check bins */
2057 for (i = 0; i < NSMALLBINS; ++i)
2058 do_check_smallbin(m, i);
2059 for (i = 0; i < NTREEBINS; ++i)
2060 do_check_treebin(m, i);
2061
2062 if (m->dvsize != 0) { /* check dv chunk */
2063 do_check_any_chunk(m, m->dv);
2064 assert(m->dvsize == chunksize(m->dv));
2065 assert(m->dvsize >= MIN_CHUNK_SIZE);
2066 assert(bin_find(m, m->dv) == 0);
2067 }
2068
2069 if (m->top != 0) { /* check top chunk */
2070 do_check_top_chunk(m, m->top);
2071 /*assert(m->topsize == chunksize(m->top)); redundant */
2072 assert(m->topsize > 0);
2073 assert(bin_find(m, m->top) == 0);
2074 }
2075
2076 total = traverse_and_check(m);
2077 assert(total <= m->footprint);
2078 assert(m->footprint <= m->max_footprint);
2079}
2080#endif /* DEBUG */
2081
2082/* ----------------------------- statistics ------------------------------ */
2083
2084#if !NO_MALLINFO
Damjan Marion79934e82022-04-05 12:40:31 +02002085__clib_nosanitize_addr
Dave Barachaf7dd5b2018-08-23 17:08:44 -04002086static struct dlmallinfo internal_mallinfo(mstate m) {
2087 struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
Dave Barach6a5adc32018-07-04 10:56:23 -04002088 ensure_initialization();
2089 if (!PREACTION(m)) {
2090 check_malloc_state(m);
2091 if (is_initialized(m)) {
2092 size_t nfree = SIZE_T_ONE; /* top always free */
2093 size_t mfree = m->topsize + TOP_FOOT_SIZE;
2094 size_t sum = mfree;
2095 msegmentptr s = &m->seg;
2096 while (s != 0) {
2097 mchunkptr q = align_as_chunk(s->base);
2098 while (segment_holds(s, q) &&
2099 q != m->top && q->head != FENCEPOST_HEAD) {
2100 size_t sz = chunksize(q);
2101 sum += sz;
2102 if (!is_inuse(q)) {
2103 mfree += sz;
2104 ++nfree;
2105 }
2106 q = next_chunk(q);
2107 }
2108 s = s->next;
2109 }
2110
2111 nm.arena = sum;
2112 nm.ordblks = nfree;
2113 nm.hblkhd = m->footprint - sum;
2114 nm.usmblks = m->max_footprint;
2115 nm.uordblks = m->footprint - mfree;
2116 nm.fordblks = mfree;
2117 nm.keepcost = m->topsize;
2118 }
2119
2120 POSTACTION(m);
2121 }
2122 return nm;
2123}
2124#endif /* !NO_MALLINFO */
2125
2126#if !NO_MALLOC_STATS
2127static void internal_malloc_stats(mstate m) {
2128 ensure_initialization();
2129 if (!PREACTION(m)) {
2130 size_t maxfp = 0;
2131 size_t fp = 0;
2132 size_t used = 0;
2133 check_malloc_state(m);
2134 if (is_initialized(m)) {
2135 msegmentptr s = &m->seg;
2136 maxfp = m->max_footprint;
2137 fp = m->footprint;
2138 used = fp - (m->topsize + TOP_FOOT_SIZE);
2139
2140 while (s != 0) {
2141 mchunkptr q = align_as_chunk(s->base);
2142 while (segment_holds(s, q) &&
2143 q != m->top && q->head != FENCEPOST_HEAD) {
2144 if (!is_inuse(q))
2145 used -= chunksize(q);
2146 q = next_chunk(q);
2147 }
2148 s = s->next;
2149 }
2150 }
2151 POSTACTION(m); /* drop lock */
2152 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
2153 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
2154 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
2155 }
2156}
2157#endif /* NO_MALLOC_STATS */
2158
2159/* ----------------------- Operations on smallbins ----------------------- */
2160
2161/*
2162 Various forms of linking and unlinking are defined as macros. Even
2163 the ones for trees, which are very long but have very short typical
2164 paths. This is ugly but reduces reliance on inlining support of
2165 compilers.
2166*/
2167
2168/* Link a free chunk into a smallbin */
2169#define insert_small_chunk(M, P, S) {\
2170 bindex_t I = small_index(S);\
2171 mchunkptr B = smallbin_at(M, I);\
2172 mchunkptr F = B;\
2173 assert(S >= MIN_CHUNK_SIZE);\
2174 if (!smallmap_is_marked(M, I))\
2175 mark_smallmap(M, I);\
2176 else if (RTCHECK(ok_address(M, B->fd)))\
2177 F = B->fd;\
2178 else {\
2179 CORRUPTION_ERROR_ACTION(M);\
2180 }\
2181 B->fd = P;\
2182 F->bk = P;\
2183 P->fd = F;\
2184 P->bk = B;\
2185}
2186
2187/* Unlink a chunk from a smallbin */
2188#define unlink_small_chunk(M, P, S) {\
2189 mchunkptr F = P->fd;\
2190 mchunkptr B = P->bk;\
2191 bindex_t I = small_index(S);\
2192 assert(P != B);\
2193 assert(P != F);\
2194 assert(chunksize(P) == small_index2size(I));\
2195 if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
2196 if (B == F) {\
2197 clear_smallmap(M, I);\
2198 }\
2199 else if (RTCHECK(B == smallbin_at(M,I) ||\
2200 (ok_address(M, B) && B->fd == P))) {\
2201 F->bk = B;\
2202 B->fd = F;\
2203 }\
2204 else {\
2205 CORRUPTION_ERROR_ACTION(M);\
2206 }\
2207 }\
2208 else {\
2209 CORRUPTION_ERROR_ACTION(M);\
2210 }\
2211}
2212
2213/* Unlink the first chunk from a smallbin */
2214#define unlink_first_small_chunk(M, B, P, I) {\
2215 mchunkptr F = P->fd;\
2216 assert(P != B);\
2217 assert(P != F);\
2218 assert(chunksize(P) == small_index2size(I));\
2219 if (B == F) {\
2220 clear_smallmap(M, I);\
2221 }\
2222 else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
2223 F->bk = B;\
2224 B->fd = F;\
2225 }\
2226 else {\
2227 CORRUPTION_ERROR_ACTION(M);\
2228 }\
2229}
2230
2231/* Replace dv node, binning the old one */
2232/* Used only when dvsize known to be small */
2233#define replace_dv(M, P, S) {\
2234 size_t DVS = M->dvsize;\
2235 assert(is_small(DVS));\
2236 if (DVS != 0) {\
2237 mchunkptr DV = M->dv;\
2238 insert_small_chunk(M, DV, DVS);\
2239 }\
2240 M->dvsize = S;\
2241 M->dv = P;\
2242}
2243
2244/* ------------------------- Operations on trees ------------------------- */
2245
2246/* Insert chunk into tree */
2247#define insert_large_chunk(M, X, S) {\
2248 tbinptr* H;\
2249 bindex_t I;\
2250 compute_tree_index(S, I);\
2251 H = treebin_at(M, I);\
2252 X->index = I;\
2253 X->child[0] = X->child[1] = 0;\
2254 if (!treemap_is_marked(M, I)) {\
2255 mark_treemap(M, I);\
2256 *H = X;\
2257 X->parent = (tchunkptr)H;\
2258 X->fd = X->bk = X;\
2259 }\
2260 else {\
2261 tchunkptr T = *H;\
2262 size_t K = S << leftshift_for_tree_index(I);\
2263 for (;;) {\
2264 if (chunksize(T) != S) {\
2265 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
2266 K <<= 1;\
2267 if (*C != 0)\
2268 T = *C;\
2269 else if (RTCHECK(ok_address(M, C))) {\
2270 *C = X;\
2271 X->parent = T;\
2272 X->fd = X->bk = X;\
2273 break;\
2274 }\
2275 else {\
2276 CORRUPTION_ERROR_ACTION(M);\
2277 break;\
2278 }\
2279 }\
2280 else {\
2281 tchunkptr F = T->fd;\
2282 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
2283 T->fd = F->bk = X;\
2284 X->fd = F;\
2285 X->bk = T;\
2286 X->parent = 0;\
2287 break;\
2288 }\
2289 else {\
2290 CORRUPTION_ERROR_ACTION(M);\
2291 break;\
2292 }\
2293 }\
2294 }\
2295 }\
2296}
2297
2298/*
2299 Unlink steps:
2300
2301 1. If x is a chained node, unlink it from its same-sized fd/bk links
2302 and choose its bk node as its replacement.
2303 2. If x was the last node of its size, but not a leaf node, it must
2304 be replaced with a leaf node (not merely one with an open left or
2305 right), to make sure that lefts and rights of descendents
2306 correspond properly to bit masks. We use the rightmost descendent
2307 of x. We could use any other leaf, but this is easy to locate and
2308 tends to counteract removal of leftmosts elsewhere, and so keeps
2309 paths shorter than minimally guaranteed. This doesn't loop much
2310 because on average a node in a tree is near the bottom.
2311 3. If x is the base of a chain (i.e., has parent links) relink
2312 x's parent and children to x's replacement (or null if none).
2313*/
2314
2315#define unlink_large_chunk(M, X) {\
2316 tchunkptr XP = X->parent;\
2317 tchunkptr R;\
2318 if (X->bk != X) {\
2319 tchunkptr F = X->fd;\
2320 R = X->bk;\
2321 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
2322 F->bk = R;\
2323 R->fd = F;\
2324 }\
2325 else {\
2326 CORRUPTION_ERROR_ACTION(M);\
2327 }\
2328 }\
2329 else {\
2330 tchunkptr* RP;\
2331 if (((R = *(RP = &(X->child[1]))) != 0) ||\
2332 ((R = *(RP = &(X->child[0]))) != 0)) {\
2333 tchunkptr* CP;\
2334 while ((*(CP = &(R->child[1])) != 0) ||\
2335 (*(CP = &(R->child[0])) != 0)) {\
2336 R = *(RP = CP);\
2337 }\
2338 if (RTCHECK(ok_address(M, RP)))\
2339 *RP = 0;\
2340 else {\
2341 CORRUPTION_ERROR_ACTION(M);\
2342 }\
2343 }\
2344 }\
2345 if (XP != 0) {\
2346 tbinptr* H = treebin_at(M, X->index);\
2347 if (X == *H) {\
2348 if ((*H = R) == 0) \
2349 clear_treemap(M, X->index);\
2350 }\
2351 else if (RTCHECK(ok_address(M, XP))) {\
2352 if (XP->child[0] == X) \
2353 XP->child[0] = R;\
2354 else \
2355 XP->child[1] = R;\
2356 }\
2357 else\
2358 CORRUPTION_ERROR_ACTION(M);\
2359 if (R != 0) {\
2360 if (RTCHECK(ok_address(M, R))) {\
2361 tchunkptr C0, C1;\
2362 R->parent = XP;\
2363 if ((C0 = X->child[0]) != 0) {\
2364 if (RTCHECK(ok_address(M, C0))) {\
2365 R->child[0] = C0;\
2366 C0->parent = R;\
2367 }\
2368 else\
2369 CORRUPTION_ERROR_ACTION(M);\
2370 }\
2371 if ((C1 = X->child[1]) != 0) {\
2372 if (RTCHECK(ok_address(M, C1))) {\
2373 R->child[1] = C1;\
2374 C1->parent = R;\
2375 }\
2376 else\
2377 CORRUPTION_ERROR_ACTION(M);\
2378 }\
2379 }\
2380 else\
2381 CORRUPTION_ERROR_ACTION(M);\
2382 }\
2383 }\
2384}
2385
2386/* Relays to large vs small bin operations */
2387
2388#define insert_chunk(M, P, S)\
2389 if (is_small(S)) insert_small_chunk(M, P, S)\
2390 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
2391
2392#define unlink_chunk(M, P, S)\
2393 if (is_small(S)) unlink_small_chunk(M, P, S)\
2394 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
2395
2396
2397/* Relays to internal calls to malloc/free from realloc, memalign etc */
2398
2399#if ONLY_MSPACES
2400#define internal_malloc(m, b) mspace_malloc(m, b)
2401#define internal_free(m, mem) mspace_free(m,mem);
2402#else /* ONLY_MSPACES */
2403#if MSPACES
2404#define internal_malloc(m, b)\
2405 ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
2406#define internal_free(m, mem)\
2407 if (m == gm) dlfree(mem); else mspace_free(m,mem);
2408#else /* MSPACES */
2409#define internal_malloc(m, b) dlmalloc(b)
2410#define internal_free(m, mem) dlfree(mem)
2411#endif /* MSPACES */
2412#endif /* ONLY_MSPACES */
2413
2414/* ----------------------- Direct-mmapping chunks ----------------------- */
2415
2416/*
2417 Directly mmapped chunks are set up with an offset to the start of
2418 the mmapped region stored in the prev_foot field of the chunk. This
2419 allows reconstruction of the required argument to MUNMAP when freed,
2420 and also allows adjustment of the returned chunk to meet alignment
2421 requirements (especially in memalign).
2422*/
2423
2424/* Malloc using mmap */
2425static void* mmap_alloc(mstate m, size_t nb) {
2426 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2427 if (m->footprint_limit != 0) {
2428 size_t fp = m->footprint + mmsize;
2429 if (fp <= m->footprint || fp > m->footprint_limit)
2430 return 0;
2431 }
2432 if (mmsize > nb) { /* Check for wrap around 0 */
2433 char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
2434 if (mm != CMFAIL) {
2435 size_t offset = align_offset(chunk2mem(mm));
2436 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
2437 mchunkptr p = (mchunkptr)(mm + offset);
2438 p->prev_foot = offset;
2439 p->head = psize;
2440 mark_inuse_foot(m, p, psize);
2441 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2442 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2443
2444 if (m->least_addr == 0 || mm < m->least_addr)
2445 m->least_addr = mm;
2446 if ((m->footprint += mmsize) > m->max_footprint)
2447 m->max_footprint = m->footprint;
2448 assert(is_aligned(chunk2mem(p)));
2449 check_mmapped_chunk(m, p);
2450 return chunk2mem(p);
2451 }
2452 }
2453 return 0;
2454}
2455
2456/* Realloc using mmap */
2457static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
2458 size_t oldsize = chunksize(oldp);
2459 (void)flags; /* placate people compiling -Wunused */
2460 if (is_small(nb)) /* Can't shrink mmap regions below small size */
2461 return 0;
2462 /* Keep old chunk if big enough but not too big */
2463 if (oldsize >= nb + SIZE_T_SIZE &&
2464 (oldsize - nb) <= (mparams.granularity << 1))
2465 return oldp;
2466 else {
2467 size_t offset = oldp->prev_foot;
2468 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
2469 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2470 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
2471 oldmmsize, newmmsize, flags);
2472 if (cp != CMFAIL) {
2473 mchunkptr newp = (mchunkptr)(cp + offset);
2474 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
2475 newp->head = psize;
2476 mark_inuse_foot(m, newp, psize);
2477 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
2478 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
2479
2480 if (cp < m->least_addr)
2481 m->least_addr = cp;
2482 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
2483 m->max_footprint = m->footprint;
2484 check_mmapped_chunk(m, newp);
2485 return newp;
2486 }
2487 }
2488 return 0;
2489}
2490
2491
2492/* -------------------------- mspace management -------------------------- */
2493
2494/* Initialize top chunk and its size */
Damjan Marion79934e82022-04-05 12:40:31 +02002495__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04002496static void init_top(mstate m, mchunkptr p, size_t psize) {
2497 /* Ensure alignment */
2498 size_t offset = align_offset(chunk2mem(p));
2499 p = (mchunkptr)((char*)p + offset);
2500 psize -= offset;
2501
2502 m->top = p;
2503 m->topsize = psize;
2504 p->head = psize | PINUSE_BIT;
2505 /* set size of fake trailing chunk holding overhead space only once */
2506 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
2507 m->trim_check = mparams.trim_threshold; /* reset on each update */
2508}
2509
2510/* Initialize bins for a new mstate that is otherwise zeroed out */
2511static void init_bins(mstate m) {
2512 /* Establish circular links for smallbins */
2513 bindex_t i;
2514 for (i = 0; i < NSMALLBINS; ++i) {
2515 sbinptr bin = smallbin_at(m,i);
2516 bin->fd = bin->bk = bin;
2517 }
2518}
2519
2520#if PROCEED_ON_ERROR
2521
2522/* default corruption action */
2523static void reset_on_error(mstate m) {
2524 int i;
2525 ++malloc_corruption_error_count;
2526 /* Reinitialize fields to forget about all memory */
2527 m->smallmap = m->treemap = 0;
2528 m->dvsize = m->topsize = 0;
2529 m->seg.base = 0;
2530 m->seg.size = 0;
2531 m->seg.next = 0;
2532 m->top = m->dv = 0;
2533 for (i = 0; i < NTREEBINS; ++i)
2534 *treebin_at(m, i) = 0;
2535 init_bins(m);
2536}
2537#endif /* PROCEED_ON_ERROR */
2538
2539/* Allocate chunk and prepend remainder with chunk in successor base. */
Damjan Marion79934e82022-04-05 12:40:31 +02002540__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04002541static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
2542 size_t nb) {
2543 mchunkptr p = align_as_chunk(newbase);
2544 mchunkptr oldfirst = align_as_chunk(oldbase);
2545 size_t psize = (char*)oldfirst - (char*)p;
2546 mchunkptr q = chunk_plus_offset(p, nb);
2547 size_t qsize = psize - nb;
2548 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2549
2550 assert((char*)oldfirst > (char*)q);
2551 assert(pinuse(oldfirst));
2552 assert(qsize >= MIN_CHUNK_SIZE);
2553
2554 /* consolidate remainder with first chunk of old base */
2555 if (oldfirst == m->top) {
2556 size_t tsize = m->topsize += qsize;
2557 m->top = q;
2558 q->head = tsize | PINUSE_BIT;
2559 check_top_chunk(m, q);
2560 }
2561 else if (oldfirst == m->dv) {
2562 size_t dsize = m->dvsize += qsize;
2563 m->dv = q;
2564 set_size_and_pinuse_of_free_chunk(q, dsize);
2565 }
2566 else {
2567 if (!is_inuse(oldfirst)) {
2568 size_t nsize = chunksize(oldfirst);
2569 unlink_chunk(m, oldfirst, nsize);
2570 oldfirst = chunk_plus_offset(oldfirst, nsize);
2571 qsize += nsize;
2572 }
2573 set_free_with_pinuse(q, qsize, oldfirst);
2574 insert_chunk(m, q, qsize);
2575 check_free_chunk(m, q);
2576 }
2577
2578 check_malloced_chunk(m, chunk2mem(p), nb);
2579 return chunk2mem(p);
2580}
2581
2582/* Add a segment to hold a new noncontiguous region */
Damjan Marion79934e82022-04-05 12:40:31 +02002583__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04002584static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
2585 /* Determine locations and sizes of segment, fenceposts, old top */
2586 char* old_top = (char*)m->top;
2587 msegmentptr oldsp = segment_holding(m, old_top);
2588 char* old_end = oldsp->base + oldsp->size;
2589 size_t ssize = pad_request(sizeof(struct malloc_segment));
2590 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2591 size_t offset = align_offset(chunk2mem(rawsp));
2592 char* asp = rawsp + offset;
2593 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
2594 mchunkptr sp = (mchunkptr)csp;
2595 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
2596 mchunkptr tnext = chunk_plus_offset(sp, ssize);
2597 mchunkptr p = tnext;
2598 int nfences = 0;
2599
2600 /* reset top to new space */
2601 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2602
2603 /* Set up segment record */
2604 assert(is_aligned(ss));
2605 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
2606 *ss = m->seg; /* Push current record */
2607 m->seg.base = tbase;
2608 m->seg.size = tsize;
2609 m->seg.sflags = mmapped;
2610 m->seg.next = ss;
2611
2612 /* Insert trailing fenceposts */
2613 for (;;) {
2614 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
2615 p->head = FENCEPOST_HEAD;
2616 ++nfences;
2617 if ((char*)(&(nextp->head)) < old_end)
2618 p = nextp;
2619 else
2620 break;
2621 }
2622 assert(nfences >= 2);
2623
2624 /* Insert the rest of old top into a bin as an ordinary free chunk */
2625 if (csp != old_top) {
2626 mchunkptr q = (mchunkptr)old_top;
2627 size_t psize = csp - old_top;
2628 mchunkptr tn = chunk_plus_offset(q, psize);
2629 set_free_with_pinuse(q, psize, tn);
2630 insert_chunk(m, q, psize);
2631 }
2632
2633 check_top_chunk(m, m->top);
2634}
2635
2636/* -------------------------- System allocation -------------------------- */
2637
2638/* Get memory from system using MORECORE or MMAP */
Damjan Marion79934e82022-04-05 12:40:31 +02002639__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04002640static void* sys_alloc(mstate m, size_t nb) {
2641 char* tbase = CMFAIL;
2642 size_t tsize = 0;
2643 flag_t mmap_flag = 0;
2644 size_t asize; /* allocation size */
2645
2646 ensure_initialization();
2647
2648 if (use_noexpand(m))
2649 return 0;
2650
2651 /* Directly map large chunks, but only if already initialized */
2652 if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
2653 void* mem = mmap_alloc(m, nb);
2654 if (mem != 0)
2655 return mem;
2656 }
2657
2658 asize = granularity_align(nb + SYS_ALLOC_PADDING);
2659 if (asize <= nb)
2660 return 0; /* wraparound */
2661 if (m->footprint_limit != 0) {
2662 size_t fp = m->footprint + asize;
2663 if (fp <= m->footprint || fp > m->footprint_limit)
2664 return 0;
2665 }
2666
2667 /*
2668 Try getting memory in any of three ways (in most-preferred to
2669 least-preferred order):
2670 1. A call to MORECORE that can normally contiguously extend memory.
2671 (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
2672 or main space is mmapped or a previous contiguous call failed)
2673 2. A call to MMAP new space (disabled if not HAVE_MMAP).
2674 Note that under the default settings, if MORECORE is unable to
2675 fulfill a request, and HAVE_MMAP is true, then mmap is
2676 used as a noncontiguous system allocator. This is a useful backup
2677 strategy for systems with holes in address spaces -- in this case
2678 sbrk cannot contiguously expand the heap, but mmap may be able to
2679 find space.
2680 3. A call to MORECORE that cannot usually contiguously extend memory.
2681 (disabled if not HAVE_MORECORE)
2682
2683 In all cases, we need to request enough bytes from system to ensure
2684 we can malloc nb bytes upon success, so pad with enough space for
2685 top_foot, plus alignment-pad to make sure we don't lose bytes if
2686 not on boundary, and round this up to a granularity unit.
2687 */
2688
2689 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
2690 char* br = CMFAIL;
2691 size_t ssize = asize; /* sbrk call size */
2692 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
2693 ACQUIRE_MALLOC_GLOBAL_LOCK();
2694
2695 if (ss == 0) { /* First time through or recovery */
2696 char* base = (char*)CALL_MORECORE(0);
2697 if (base != CMFAIL) {
2698 size_t fp;
2699 /* Adjust to end on a page boundary */
2700 if (!is_page_aligned(base))
2701 ssize += (page_align((size_t)base) - (size_t)base);
2702 fp = m->footprint + ssize; /* recheck limits */
2703 if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
2704 (m->footprint_limit == 0 ||
2705 (fp > m->footprint && fp <= m->footprint_limit)) &&
2706 (br = (char*)(CALL_MORECORE(ssize))) == base) {
2707 tbase = base;
2708 tsize = ssize;
2709 }
2710 }
2711 }
2712 else {
2713 /* Subtract out existing available top space from MORECORE request. */
2714 ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
2715 /* Use mem here only if it did continuously extend old space */
2716 if (ssize < HALF_MAX_SIZE_T &&
2717 (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
2718 tbase = br;
2719 tsize = ssize;
2720 }
2721 }
2722
2723 if (tbase == CMFAIL) { /* Cope with partial failure */
2724 if (br != CMFAIL) { /* Try to use/extend the space we did get */
2725 if (ssize < HALF_MAX_SIZE_T &&
2726 ssize < nb + SYS_ALLOC_PADDING) {
2727 size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
2728 if (esize < HALF_MAX_SIZE_T) {
2729 char* end = (char*)CALL_MORECORE(esize);
2730 if (end != CMFAIL)
2731 ssize += esize;
2732 else { /* Can't use; try to release */
2733 (void) CALL_MORECORE(-ssize);
2734 br = CMFAIL;
2735 }
2736 }
2737 }
2738 }
2739 if (br != CMFAIL) { /* Use the space we did get */
2740 tbase = br;
2741 tsize = ssize;
2742 }
2743 else
2744 disable_contiguous(m); /* Don't try contiguous path in the future */
2745 }
2746
2747 RELEASE_MALLOC_GLOBAL_LOCK();
2748 }
2749
2750 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
2751 char* mp = (char*)(CALL_MMAP(asize));
2752 if (mp != CMFAIL) {
2753 tbase = mp;
2754 tsize = asize;
2755 mmap_flag = USE_MMAP_BIT;
2756 }
2757 }
2758
2759 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
2760 if (asize < HALF_MAX_SIZE_T) {
2761 char* br = CMFAIL;
2762 char* end = CMFAIL;
2763 ACQUIRE_MALLOC_GLOBAL_LOCK();
2764 br = (char*)(CALL_MORECORE(asize));
2765 end = (char*)(CALL_MORECORE(0));
2766 RELEASE_MALLOC_GLOBAL_LOCK();
2767 if (br != CMFAIL && end != CMFAIL && br < end) {
2768 size_t ssize = end - br;
2769 if (ssize > nb + TOP_FOOT_SIZE) {
2770 tbase = br;
2771 tsize = ssize;
2772 }
2773 }
2774 }
2775 }
2776
2777 if (tbase != CMFAIL) {
2778
2779 if ((m->footprint += tsize) > m->max_footprint)
2780 m->max_footprint = m->footprint;
2781
2782 if (!is_initialized(m)) { /* first-time initialization */
2783 if (m->least_addr == 0 || tbase < m->least_addr)
2784 m->least_addr = tbase;
2785 m->seg.base = tbase;
2786 m->seg.size = tsize;
2787 m->seg.sflags = mmap_flag;
2788 m->magic = mparams.magic;
2789 m->release_checks = MAX_RELEASE_CHECK_RATE;
2790 init_bins(m);
2791#if !ONLY_MSPACES
2792 if (is_global(m))
2793 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2794 else
2795#endif
2796 {
2797 /* Offset top by embedded malloc_state */
2798 mchunkptr mn = next_chunk(mem2chunk(m));
2799 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
2800 }
2801 }
2802
2803 else {
2804 /* Try to merge with an existing segment */
2805 msegmentptr sp = &m->seg;
2806 /* Only consider most recent segment if traversal suppressed */
2807 while (sp != 0 && tbase != sp->base + sp->size)
2808 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2809 if (sp != 0 &&
2810 !is_extern_segment(sp) &&
2811 (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
2812 segment_holds(sp, m->top)) { /* append */
2813 sp->size += tsize;
2814 init_top(m, m->top, m->topsize + tsize);
2815 }
2816 else {
2817 if (tbase < m->least_addr)
2818 m->least_addr = tbase;
2819 sp = &m->seg;
2820 while (sp != 0 && sp->base != tbase + tsize)
2821 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2822 if (sp != 0 &&
2823 !is_extern_segment(sp) &&
2824 (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
2825 char* oldbase = sp->base;
2826 sp->base = tbase;
2827 sp->size += tsize;
2828 return prepend_alloc(m, tbase, oldbase, nb);
2829 }
2830 else
2831 add_segment(m, tbase, tsize, mmap_flag);
2832 }
2833 }
2834
2835 if (nb < m->topsize) { /* Allocate from new or extended top space */
2836 size_t rsize = m->topsize -= nb;
2837 mchunkptr p = m->top;
2838 mchunkptr r = m->top = chunk_plus_offset(p, nb);
2839 r->head = rsize | PINUSE_BIT;
2840 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2841 check_top_chunk(m, m->top);
2842 check_malloced_chunk(m, chunk2mem(p), nb);
2843 return chunk2mem(p);
2844 }
2845 }
2846
2847 MALLOC_FAILURE_ACTION;
2848 return 0;
2849}
2850
2851/* ----------------------- system deallocation -------------------------- */
2852
2853/* Unmap and unlink any mmapped segments that don't contain used chunks */
Damjan Marion79934e82022-04-05 12:40:31 +02002854__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04002855static size_t release_unused_segments(mstate m) {
2856 size_t released = 0;
2857 int nsegs = 0;
2858 msegmentptr pred = &m->seg;
2859 msegmentptr sp = pred->next;
2860 while (sp != 0) {
2861 char* base = sp->base;
2862 size_t size = sp->size;
2863 msegmentptr next = sp->next;
2864 ++nsegs;
2865 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
2866 mchunkptr p = align_as_chunk(base);
2867 size_t psize = chunksize(p);
2868 /* Can unmap if first chunk holds entire segment and not pinned */
2869 if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
2870 tchunkptr tp = (tchunkptr)p;
2871 assert(segment_holds(sp, (char*)sp));
2872 if (p == m->dv) {
2873 m->dv = 0;
2874 m->dvsize = 0;
2875 }
2876 else {
2877 unlink_large_chunk(m, tp);
2878 }
2879 if (CALL_MUNMAP(base, size) == 0) {
2880 released += size;
2881 m->footprint -= size;
2882 /* unlink obsoleted record */
2883 sp = pred;
2884 sp->next = next;
2885 }
2886 else { /* back out if cannot unmap */
2887 insert_large_chunk(m, tp, psize);
2888 }
2889 }
2890 }
2891 if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
2892 break;
2893 pred = sp;
2894 sp = next;
2895 }
2896 /* Reset check counter */
2897 m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
2898 (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
2899 return released;
2900}
2901
Damjan Marion79934e82022-04-05 12:40:31 +02002902__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04002903static int sys_trim(mstate m, size_t pad) {
2904 size_t released = 0;
2905 ensure_initialization();
2906 if (pad < MAX_REQUEST && is_initialized(m)) {
2907 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
2908
2909 if (m->topsize > pad) {
2910 /* Shrink top space in granularity-size units, keeping at least one */
2911 size_t unit = mparams.granularity;
2912 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
2913 SIZE_T_ONE) * unit;
2914 msegmentptr sp = segment_holding(m, (char*)m->top);
2915
2916 if (!is_extern_segment(sp)) {
2917 if (is_mmapped_segment(sp)) {
2918 if (HAVE_MMAP &&
2919 sp->size >= extra &&
2920 !has_segment_link(m, sp)) { /* can't shrink if pinned */
2921 size_t newsize = sp->size - extra;
2922 (void)newsize; /* placate people compiling -Wunused-variable */
2923 /* Prefer mremap, fall back to munmap */
2924 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
2925 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
2926 released = extra;
2927 }
2928 }
2929 }
2930 else if (HAVE_MORECORE) {
2931 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
2932 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
2933 ACQUIRE_MALLOC_GLOBAL_LOCK();
2934 {
2935 /* Make sure end of memory is where we last set it. */
2936 char* old_br = (char*)(CALL_MORECORE(0));
2937 if (old_br == sp->base + sp->size) {
2938 char* rel_br = (char*)(CALL_MORECORE(-extra));
2939 char* new_br = (char*)(CALL_MORECORE(0));
2940 if (rel_br != CMFAIL && new_br < old_br)
2941 released = old_br - new_br;
2942 }
2943 }
2944 RELEASE_MALLOC_GLOBAL_LOCK();
2945 }
2946 }
2947
2948 if (released != 0) {
2949 sp->size -= released;
2950 m->footprint -= released;
2951 init_top(m, m->top, m->topsize - released);
2952 check_top_chunk(m, m->top);
2953 }
2954 }
2955
2956 /* Unmap any unused mmapped segments */
2957 if (HAVE_MMAP)
2958 released += release_unused_segments(m);
2959
2960 /* On failure, disable autotrim to avoid repeated failed future calls */
2961 if (released == 0 && m->topsize > m->trim_check)
2962 m->trim_check = MAX_SIZE_T;
2963 }
2964
2965 return (released != 0)? 1 : 0;
2966}
2967
2968/* Consolidate and bin a chunk. Differs from exported versions
2969 of free mainly in that the chunk need not be marked as inuse.
2970*/
Damjan Marion79934e82022-04-05 12:40:31 +02002971__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04002972static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
2973 mchunkptr next = chunk_plus_offset(p, psize);
2974 if (!pinuse(p)) {
2975 mchunkptr prev;
2976 size_t prevsize = p->prev_foot;
2977 if (is_mmapped(p)) {
2978 psize += prevsize + MMAP_FOOT_PAD;
2979 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
2980 m->footprint -= psize;
2981 return;
2982 }
2983 prev = chunk_minus_offset(p, prevsize);
2984 psize += prevsize;
2985 p = prev;
2986 if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
2987 if (p != m->dv) {
2988 unlink_chunk(m, p, prevsize);
2989 }
2990 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
2991 m->dvsize = psize;
2992 set_free_with_pinuse(p, psize, next);
2993 return;
2994 }
2995 }
2996 else {
2997 CORRUPTION_ERROR_ACTION(m);
2998 return;
2999 }
3000 }
3001 if (RTCHECK(ok_address(m, next))) {
3002 if (!cinuse(next)) { /* consolidate forward */
3003 if (next == m->top) {
3004 size_t tsize = m->topsize += psize;
3005 m->top = p;
3006 p->head = tsize | PINUSE_BIT;
3007 if (p == m->dv) {
3008 m->dv = 0;
3009 m->dvsize = 0;
3010 }
3011 return;
3012 }
3013 else if (next == m->dv) {
3014 size_t dsize = m->dvsize += psize;
3015 m->dv = p;
3016 set_size_and_pinuse_of_free_chunk(p, dsize);
3017 return;
3018 }
3019 else {
3020 size_t nsize = chunksize(next);
3021 psize += nsize;
3022 unlink_chunk(m, next, nsize);
3023 set_size_and_pinuse_of_free_chunk(p, psize);
3024 if (p == m->dv) {
3025 m->dvsize = psize;
3026 return;
3027 }
3028 }
3029 }
3030 else {
3031 set_free_with_pinuse(p, psize, next);
3032 }
3033 insert_chunk(m, p, psize);
3034 }
3035 else {
3036 CORRUPTION_ERROR_ACTION(m);
3037 }
3038}
3039
3040/* ---------------------------- malloc --------------------------- */
3041
3042/* allocate a large request from the best fitting chunk in a treebin */
Damjan Marion79934e82022-04-05 12:40:31 +02003043__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04003044static void* tmalloc_large(mstate m, size_t nb) {
3045 tchunkptr v = 0;
3046 size_t rsize = -nb; /* Unsigned negation */
3047 tchunkptr t;
3048 bindex_t idx;
3049 compute_tree_index(nb, idx);
3050 if ((t = *treebin_at(m, idx)) != 0) {
3051 /* Traverse tree for this bin looking for node with size == nb */
3052 size_t sizebits = nb << leftshift_for_tree_index(idx);
3053 tchunkptr rst = 0; /* The deepest untaken right subtree */
3054 for (;;) {
3055 tchunkptr rt;
3056 size_t trem = chunksize(t) - nb;
3057 if (trem < rsize) {
3058 v = t;
3059 if ((rsize = trem) == 0)
3060 break;
3061 }
3062 rt = t->child[1];
3063 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3064 if (rt != 0 && rt != t)
3065 rst = rt;
3066 if (t == 0) {
3067 t = rst; /* set t to least subtree holding sizes > nb */
3068 break;
3069 }
3070 sizebits <<= 1;
3071 }
3072 }
3073 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3074 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3075 if (leftbits != 0) {
3076 bindex_t i;
3077 binmap_t leastbit = least_bit(leftbits);
3078 compute_bit2idx(leastbit, i);
3079 t = *treebin_at(m, i);
3080 }
3081 }
3082
3083 while (t != 0) { /* find smallest of tree or subtree */
3084 size_t trem = chunksize(t) - nb;
3085 if (trem < rsize) {
3086 rsize = trem;
3087 v = t;
3088 }
3089 t = leftmost_child(t);
3090 }
3091
3092 /* If dv is a better fit, return 0 so malloc will use it */
3093 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
3094 if (RTCHECK(ok_address(m, v))) { /* split */
3095 mchunkptr r = chunk_plus_offset(v, nb);
3096 assert(chunksize(v) == rsize + nb);
3097 if (RTCHECK(ok_next(v, r))) {
3098 unlink_large_chunk(m, v);
3099 if (rsize < MIN_CHUNK_SIZE)
3100 set_inuse_and_pinuse(m, v, (rsize + nb));
3101 else {
3102 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3103 set_size_and_pinuse_of_free_chunk(r, rsize);
3104 insert_chunk(m, r, rsize);
3105 }
3106 return chunk2mem(v);
3107 }
3108 }
3109 CORRUPTION_ERROR_ACTION(m);
3110 }
3111 return 0;
3112}
3113
3114/* allocate a small request from the best fitting chunk in a treebin */
Damjan Marion79934e82022-04-05 12:40:31 +02003115__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04003116static void* tmalloc_small(mstate m, size_t nb) {
3117 tchunkptr t, v;
3118 size_t rsize;
3119 bindex_t i;
3120 binmap_t leastbit = least_bit(m->treemap);
3121 compute_bit2idx(leastbit, i);
3122 v = t = *treebin_at(m, i);
3123 rsize = chunksize(t) - nb;
3124
3125 while ((t = leftmost_child(t)) != 0) {
3126 size_t trem = chunksize(t) - nb;
3127 if (trem < rsize) {
3128 rsize = trem;
3129 v = t;
3130 }
3131 }
3132
3133 if (RTCHECK(ok_address(m, v))) {
3134 mchunkptr r = chunk_plus_offset(v, nb);
3135 assert(chunksize(v) == rsize + nb);
3136 if (RTCHECK(ok_next(v, r))) {
3137 unlink_large_chunk(m, v);
3138 if (rsize < MIN_CHUNK_SIZE)
3139 set_inuse_and_pinuse(m, v, (rsize + nb));
3140 else {
3141 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3142 set_size_and_pinuse_of_free_chunk(r, rsize);
3143 replace_dv(m, r, rsize);
3144 }
3145 return chunk2mem(v);
3146 }
3147 }
3148
3149 CORRUPTION_ERROR_ACTION(m);
3150 return 0;
3151}
3152
3153#if !ONLY_MSPACES
3154
3155void* dlmalloc(size_t bytes) {
3156 /*
3157 Basic algorithm:
3158 If a small request (< 256 bytes minus per-chunk overhead):
3159 1. If one exists, use a remainderless chunk in associated smallbin.
3160 (Remainderless means that there are too few excess bytes to
3161 represent as a chunk.)
3162 2. If it is big enough, use the dv chunk, which is normally the
3163 chunk adjacent to the one used for the most recent small request.
3164 3. If one exists, split the smallest available chunk in a bin,
3165 saving remainder in dv.
3166 4. If it is big enough, use the top chunk.
3167 5. If available, get memory from system and use it
3168 Otherwise, for a large request:
3169 1. Find the smallest available binned chunk that fits, and use it
3170 if it is better fitting than dv chunk, splitting if necessary.
3171 2. If better fitting than any binned chunk, use the dv chunk.
3172 3. If it is big enough, use the top chunk.
3173 4. If request size >= mmap threshold, try to directly mmap this chunk.
3174 5. If available, get memory from system and use it
3175
3176 The ugly goto's here ensure that postaction occurs along all paths.
3177 */
3178
3179#if USE_LOCKS
3180 ensure_initialization(); /* initialize in sys_alloc if not using locks */
3181#endif
3182
3183 if (!PREACTION(gm)) {
3184 void* mem;
3185 size_t nb;
3186 if (bytes <= MAX_SMALL_REQUEST) {
3187 bindex_t idx;
3188 binmap_t smallbits;
3189 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
3190 idx = small_index(nb);
3191 smallbits = gm->smallmap >> idx;
3192
3193 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
3194 mchunkptr b, p;
3195 idx += ~smallbits & 1; /* Uses next bin if idx empty */
3196 b = smallbin_at(gm, idx);
3197 p = b->fd;
3198 assert(chunksize(p) == small_index2size(idx));
3199 unlink_first_small_chunk(gm, b, p, idx);
3200 set_inuse_and_pinuse(gm, p, small_index2size(idx));
3201 mem = chunk2mem(p);
3202 check_malloced_chunk(gm, mem, nb);
3203 goto postaction;
3204 }
3205
3206 else if (nb > gm->dvsize) {
3207 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
3208 mchunkptr b, p, r;
3209 size_t rsize;
3210 bindex_t i;
3211 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
3212 binmap_t leastbit = least_bit(leftbits);
3213 compute_bit2idx(leastbit, i);
3214 b = smallbin_at(gm, i);
3215 p = b->fd;
3216 assert(chunksize(p) == small_index2size(i));
3217 unlink_first_small_chunk(gm, b, p, i);
3218 rsize = small_index2size(i) - nb;
3219 /* Fit here cannot be remainderless if 4byte sizes */
3220 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
3221 set_inuse_and_pinuse(gm, p, small_index2size(i));
3222 else {
3223 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3224 r = chunk_plus_offset(p, nb);
3225 set_size_and_pinuse_of_free_chunk(r, rsize);
3226 replace_dv(gm, r, rsize);
3227 }
3228 mem = chunk2mem(p);
3229 check_malloced_chunk(gm, mem, nb);
3230 goto postaction;
3231 }
3232
3233 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
3234 check_malloced_chunk(gm, mem, nb);
3235 goto postaction;
3236 }
3237 }
3238 }
3239 else if (bytes >= MAX_REQUEST)
3240 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
3241 else {
3242 nb = pad_request(bytes);
3243 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
3244 check_malloced_chunk(gm, mem, nb);
3245 goto postaction;
3246 }
3247 }
3248
3249 if (nb <= gm->dvsize) {
3250 size_t rsize = gm->dvsize - nb;
3251 mchunkptr p = gm->dv;
3252 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
3253 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
3254 gm->dvsize = rsize;
3255 set_size_and_pinuse_of_free_chunk(r, rsize);
3256 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3257 }
3258 else { /* exhaust dv */
3259 size_t dvs = gm->dvsize;
3260 gm->dvsize = 0;
3261 gm->dv = 0;
3262 set_inuse_and_pinuse(gm, p, dvs);
3263 }
3264 mem = chunk2mem(p);
3265 check_malloced_chunk(gm, mem, nb);
3266 goto postaction;
3267 }
3268
3269 else if (nb < gm->topsize) { /* Split top */
3270 size_t rsize = gm->topsize -= nb;
3271 mchunkptr p = gm->top;
3272 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
3273 r->head = rsize | PINUSE_BIT;
3274 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3275 mem = chunk2mem(p);
3276 check_top_chunk(gm, gm->top);
3277 check_malloced_chunk(gm, mem, nb);
3278 goto postaction;
3279 }
3280
3281 mem = sys_alloc(gm, nb);
3282
3283 postaction:
3284 POSTACTION(gm);
3285 return mem;
3286 }
3287
3288 return 0;
3289}
3290
3291/* ---------------------------- free --------------------------- */
3292
3293void dlfree(void* mem) {
3294 /*
Paul Vinciguerraec11b132018-09-24 05:25:00 -07003295 Consolidate freed chunks with preceding or succeeding bordering
Dave Barach6a5adc32018-07-04 10:56:23 -04003296 free chunks, if they exist, and then place in a bin. Intermixed
3297 with special cases for top, dv, mmapped chunks, and usage errors.
3298 */
3299
3300 if (mem != 0) {
3301 mchunkptr p = mem2chunk(mem);
3302#if FOOTERS
3303 mstate fm = get_mstate_for(p);
3304 if (!ok_magic(fm)) {
3305 USAGE_ERROR_ACTION(fm, p);
3306 return;
3307 }
3308#else /* FOOTERS */
3309#define fm gm
3310#endif /* FOOTERS */
3311 if (!PREACTION(fm)) {
3312 check_inuse_chunk(fm, p);
3313 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
3314 size_t psize = chunksize(p);
3315 mchunkptr next = chunk_plus_offset(p, psize);
3316 if (!pinuse(p)) {
3317 size_t prevsize = p->prev_foot;
3318 if (is_mmapped(p)) {
3319 psize += prevsize + MMAP_FOOT_PAD;
3320 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
3321 fm->footprint -= psize;
3322 goto postaction;
3323 }
3324 else {
3325 mchunkptr prev = chunk_minus_offset(p, prevsize);
3326 psize += prevsize;
3327 p = prev;
3328 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
3329 if (p != fm->dv) {
3330 unlink_chunk(fm, p, prevsize);
3331 }
3332 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
3333 fm->dvsize = psize;
3334 set_free_with_pinuse(p, psize, next);
3335 goto postaction;
3336 }
3337 }
3338 else
3339 goto erroraction;
3340 }
3341 }
3342
3343 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
3344 if (!cinuse(next)) { /* consolidate forward */
3345 if (next == fm->top) {
3346 size_t tsize = fm->topsize += psize;
3347 fm->top = p;
3348 p->head = tsize | PINUSE_BIT;
3349 if (p == fm->dv) {
3350 fm->dv = 0;
3351 fm->dvsize = 0;
3352 }
3353 if (should_trim(fm, tsize))
3354 sys_trim(fm, 0);
3355 goto postaction;
3356 }
3357 else if (next == fm->dv) {
3358 size_t dsize = fm->dvsize += psize;
3359 fm->dv = p;
3360 set_size_and_pinuse_of_free_chunk(p, dsize);
3361 goto postaction;
3362 }
3363 else {
3364 size_t nsize = chunksize(next);
3365 psize += nsize;
3366 unlink_chunk(fm, next, nsize);
3367 set_size_and_pinuse_of_free_chunk(p, psize);
3368 if (p == fm->dv) {
3369 fm->dvsize = psize;
3370 goto postaction;
3371 }
3372 }
3373 }
3374 else
3375 set_free_with_pinuse(p, psize, next);
3376
3377 if (is_small(psize)) {
3378 insert_small_chunk(fm, p, psize);
3379 check_free_chunk(fm, p);
3380 }
3381 else {
3382 tchunkptr tp = (tchunkptr)p;
3383 insert_large_chunk(fm, tp, psize);
3384 check_free_chunk(fm, p);
3385 if (--fm->release_checks == 0)
3386 release_unused_segments(fm);
3387 }
3388 goto postaction;
3389 }
3390 }
3391 erroraction:
3392 USAGE_ERROR_ACTION(fm, p);
3393 postaction:
3394 POSTACTION(fm);
3395 }
3396 }
3397#if !FOOTERS
3398#undef fm
3399#endif /* FOOTERS */
3400}
3401
3402void* dlcalloc(size_t n_elements, size_t elem_size) {
3403 void* mem;
3404 size_t req = 0;
3405 if (n_elements != 0) {
3406 req = n_elements * elem_size;
3407 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
3408 (req / n_elements != elem_size))
3409 req = MAX_SIZE_T; /* force downstream failure on overflow */
3410 }
3411 mem = dlmalloc(req);
3412 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
3413 memset(mem, 0, req);
3414 return mem;
3415}
3416
3417#endif /* !ONLY_MSPACES */
3418
3419/* ------------ Internal support for realloc, memalign, etc -------------- */
3420
3421/* Try to realloc; only in-place unless can_move true */
Damjan Marion79934e82022-04-05 12:40:31 +02003422static __clib_nosanitize_addr mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
Dave Barach6a5adc32018-07-04 10:56:23 -04003423 int can_move) {
3424 mchunkptr newp = 0;
3425 size_t oldsize = chunksize(p);
3426 mchunkptr next = chunk_plus_offset(p, oldsize);
3427 if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
3428 ok_next(p, next) && ok_pinuse(next))) {
3429 if (is_mmapped(p)) {
3430 newp = mmap_resize(m, p, nb, can_move);
3431 }
3432 else if (oldsize >= nb) { /* already big enough */
3433 size_t rsize = oldsize - nb;
3434 if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
3435 mchunkptr r = chunk_plus_offset(p, nb);
3436 set_inuse(m, p, nb);
3437 set_inuse(m, r, rsize);
3438 dispose_chunk(m, r, rsize);
3439 }
3440 newp = p;
3441 }
3442 else if (next == m->top) { /* extend into top */
3443 if (oldsize + m->topsize > nb) {
3444 size_t newsize = oldsize + m->topsize;
3445 size_t newtopsize = newsize - nb;
3446 mchunkptr newtop = chunk_plus_offset(p, nb);
3447 set_inuse(m, p, nb);
3448 newtop->head = newtopsize |PINUSE_BIT;
3449 m->top = newtop;
3450 m->topsize = newtopsize;
3451 newp = p;
3452 }
3453 }
3454 else if (next == m->dv) { /* extend into dv */
3455 size_t dvs = m->dvsize;
3456 if (oldsize + dvs >= nb) {
3457 size_t dsize = oldsize + dvs - nb;
3458 if (dsize >= MIN_CHUNK_SIZE) {
3459 mchunkptr r = chunk_plus_offset(p, nb);
3460 mchunkptr n = chunk_plus_offset(r, dsize);
3461 set_inuse(m, p, nb);
3462 set_size_and_pinuse_of_free_chunk(r, dsize);
3463 clear_pinuse(n);
3464 m->dvsize = dsize;
3465 m->dv = r;
3466 }
3467 else { /* exhaust dv */
3468 size_t newsize = oldsize + dvs;
3469 set_inuse(m, p, newsize);
3470 m->dvsize = 0;
3471 m->dv = 0;
3472 }
3473 newp = p;
3474 }
3475 }
3476 else if (!cinuse(next)) { /* extend into next free chunk */
3477 size_t nextsize = chunksize(next);
3478 if (oldsize + nextsize >= nb) {
3479 size_t rsize = oldsize + nextsize - nb;
3480 unlink_chunk(m, next, nextsize);
3481 if (rsize < MIN_CHUNK_SIZE) {
3482 size_t newsize = oldsize + nextsize;
3483 set_inuse(m, p, newsize);
3484 }
3485 else {
3486 mchunkptr r = chunk_plus_offset(p, nb);
3487 set_inuse(m, p, nb);
3488 set_inuse(m, r, rsize);
3489 dispose_chunk(m, r, rsize);
3490 }
3491 newp = p;
3492 }
3493 }
3494 }
3495 else {
3496 USAGE_ERROR_ACTION(m, chunk2mem(p));
3497 }
3498 return newp;
3499}
3500
Damjan Marion79934e82022-04-05 12:40:31 +02003501__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04003502static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
3503 void* mem = 0;
3504 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
3505 alignment = MIN_CHUNK_SIZE;
3506 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
3507 size_t a = MALLOC_ALIGNMENT << 1;
3508 while (a < alignment) a <<= 1;
3509 alignment = a;
3510 }
3511 if (bytes >= MAX_REQUEST - alignment) {
3512 if (m != 0) { /* Test isn't needed but avoids compiler warning */
3513 MALLOC_FAILURE_ACTION;
3514 }
3515 }
3516 else {
3517 size_t nb = request2size(bytes);
3518 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3519 mem = internal_malloc(m, req);
3520 if (mem != 0) {
3521 mchunkptr p = mem2chunk(mem);
3522 if (PREACTION(m))
3523 return 0;
3524 if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
3525 /*
3526 Find an aligned spot inside chunk. Since we need to give
3527 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
3528 the first calculation places us at a spot with less than
3529 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
3530 We've allocated enough total room so that this is always
3531 possible.
3532 */
3533 char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
3534 SIZE_T_ONE)) &
3535 -alignment));
3536 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
3537 br : br+alignment;
3538 mchunkptr newp = (mchunkptr)pos;
3539 size_t leadsize = pos - (char*)(p);
3540 size_t newsize = chunksize(p) - leadsize;
3541
3542 if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
3543 newp->prev_foot = p->prev_foot + leadsize;
3544 newp->head = newsize;
3545 }
3546 else { /* Otherwise, give back leader, use the rest */
3547 set_inuse(m, newp, newsize);
3548 set_inuse(m, p, leadsize);
3549 dispose_chunk(m, p, leadsize);
3550 }
3551 p = newp;
3552 }
3553
3554 /* Give back spare room at the end */
3555 if (!is_mmapped(p)) {
3556 size_t size = chunksize(p);
3557 if (size > nb + MIN_CHUNK_SIZE) {
3558 size_t remainder_size = size - nb;
3559 mchunkptr remainder = chunk_plus_offset(p, nb);
3560 set_inuse(m, p, nb);
3561 set_inuse(m, remainder, remainder_size);
3562 dispose_chunk(m, remainder, remainder_size);
3563 }
3564 }
3565
3566 mem = chunk2mem(p);
3567 assert (chunksize(p) >= nb);
3568 assert(((size_t)mem & (alignment - 1)) == 0);
3569 check_inuse_chunk(m, p);
3570 POSTACTION(m);
3571 }
3572 }
3573 return mem;
3574}
3575
3576/*
3577 Common support for independent_X routines, handling
3578 all of the combinations that can result.
3579 The opts arg has:
3580 bit 0 set if all elements are same size (using sizes[0])
3581 bit 1 set if elements should be zeroed
3582*/
3583static void** ialloc(mstate m,
3584 size_t n_elements,
3585 size_t* sizes,
3586 int opts,
3587 void* chunks[]) {
3588
3589 size_t element_size; /* chunksize of each element, if all same */
3590 size_t contents_size; /* total size of elements */
3591 size_t array_size; /* request size of pointer array */
3592 void* mem; /* malloced aggregate space */
3593 mchunkptr p; /* corresponding chunk */
3594 size_t remainder_size; /* remaining bytes while splitting */
3595 void** marray; /* either "chunks" or malloced ptr array */
3596 mchunkptr array_chunk; /* chunk for malloced ptr array */
3597 flag_t was_enabled; /* to disable mmap */
3598 size_t size;
3599 size_t i;
3600
3601 ensure_initialization();
3602 /* compute array length, if needed */
3603 if (chunks != 0) {
3604 if (n_elements == 0)
3605 return chunks; /* nothing to do */
3606 marray = chunks;
3607 array_size = 0;
3608 }
3609 else {
3610 /* if empty req, must still return chunk representing empty array */
3611 if (n_elements == 0)
3612 return (void**)internal_malloc(m, 0);
3613 marray = 0;
3614 array_size = request2size(n_elements * (sizeof(void*)));
3615 }
3616
3617 /* compute total element size */
3618 if (opts & 0x1) { /* all-same-size */
3619 element_size = request2size(*sizes);
3620 contents_size = n_elements * element_size;
3621 }
3622 else { /* add up all the sizes */
3623 element_size = 0;
3624 contents_size = 0;
3625 for (i = 0; i != n_elements; ++i)
3626 contents_size += request2size(sizes[i]);
3627 }
3628
3629 size = contents_size + array_size;
3630
3631 /*
3632 Allocate the aggregate chunk. First disable direct-mmapping so
3633 malloc won't use it, since we would not be able to later
3634 free/realloc space internal to a segregated mmap region.
3635 */
3636 was_enabled = use_mmap(m);
3637 disable_mmap(m);
3638 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
3639 if (was_enabled)
3640 enable_mmap(m);
3641 if (mem == 0)
3642 return 0;
3643
3644 if (PREACTION(m)) return 0;
3645 p = mem2chunk(mem);
3646 remainder_size = chunksize(p);
3647
3648 assert(!is_mmapped(p));
3649
3650 if (opts & 0x2) { /* optionally clear the elements */
3651 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
3652 }
3653
3654 /* If not provided, allocate the pointer array as final part of chunk */
3655 if (marray == 0) {
3656 size_t array_chunk_size;
3657 array_chunk = chunk_plus_offset(p, contents_size);
3658 array_chunk_size = remainder_size - contents_size;
3659 marray = (void**) (chunk2mem(array_chunk));
3660 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
3661 remainder_size = contents_size;
3662 }
3663
3664 /* split out elements */
3665 for (i = 0; ; ++i) {
3666 marray[i] = chunk2mem(p);
3667 if (i != n_elements-1) {
3668 if (element_size != 0)
3669 size = element_size;
3670 else
3671 size = request2size(sizes[i]);
3672 remainder_size -= size;
3673 set_size_and_pinuse_of_inuse_chunk(m, p, size);
3674 p = chunk_plus_offset(p, size);
3675 }
3676 else { /* the final element absorbs any overallocation slop */
3677 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
3678 break;
3679 }
3680 }
3681
3682#if DEBUG
3683 if (marray != chunks) {
3684 /* final element must have exactly exhausted chunk */
3685 if (element_size != 0) {
3686 assert(remainder_size == element_size);
3687 }
3688 else {
3689 assert(remainder_size == request2size(sizes[i]));
3690 }
3691 check_inuse_chunk(m, mem2chunk(marray));
3692 }
3693 for (i = 0; i != n_elements; ++i)
3694 check_inuse_chunk(m, mem2chunk(marray[i]));
3695
3696#endif /* DEBUG */
3697
3698 POSTACTION(m);
3699 return marray;
3700}
3701
3702/* Try to free all pointers in the given array.
3703 Note: this could be made faster, by delaying consolidation,
3704 at the price of disabling some user integrity checks, We
3705 still optimize some consolidations by combining adjacent
3706 chunks before freeing, which will occur often if allocated
3707 with ialloc or the array is sorted.
3708*/
3709static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
3710 size_t unfreed = 0;
3711 if (!PREACTION(m)) {
3712 void** a;
3713 void** fence = &(array[nelem]);
3714 for (a = array; a != fence; ++a) {
3715 void* mem = *a;
3716 if (mem != 0) {
3717 mchunkptr p = mem2chunk(mem);
3718 size_t psize = chunksize(p);
3719#if FOOTERS
3720 if (get_mstate_for(p) != m) {
3721 ++unfreed;
3722 continue;
3723 }
3724#endif
3725 check_inuse_chunk(m, p);
3726 *a = 0;
3727 if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
3728 void ** b = a + 1; /* try to merge with next chunk */
3729 mchunkptr next = next_chunk(p);
3730 if (b != fence && *b == chunk2mem(next)) {
3731 size_t newsize = chunksize(next) + psize;
3732 set_inuse(m, p, newsize);
3733 *b = chunk2mem(p);
3734 }
3735 else
3736 dispose_chunk(m, p, psize);
3737 }
3738 else {
3739 CORRUPTION_ERROR_ACTION(m);
3740 break;
3741 }
3742 }
3743 }
3744 if (should_trim(m, m->topsize))
3745 sys_trim(m, 0);
3746 POSTACTION(m);
3747 }
3748 return unfreed;
3749}
3750
3751/* Traversal */
3752#if MALLOC_INSPECT_ALL
3753static void internal_inspect_all(mstate m,
3754 void(*handler)(void *start,
3755 void *end,
3756 size_t used_bytes,
3757 void* callback_arg),
3758 void* arg) {
3759 if (is_initialized(m)) {
3760 mchunkptr top = m->top;
3761 msegmentptr s;
3762 for (s = &m->seg; s != 0; s = s->next) {
3763 mchunkptr q = align_as_chunk(s->base);
3764 while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
3765 mchunkptr next = next_chunk(q);
3766 size_t sz = chunksize(q);
3767 size_t used;
3768 void* start;
3769 if (is_inuse(q)) {
3770 used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
3771 start = chunk2mem(q);
3772 }
3773 else {
3774 used = 0;
3775 if (is_small(sz)) { /* offset by possible bookkeeping */
3776 start = (void*)((char*)q + sizeof(struct malloc_chunk));
3777 }
3778 else {
3779 start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
3780 }
3781 }
3782 if (start < (void*)next) /* skip if all space is bookkeeping */
3783 handler(start, next, used, arg);
3784 if (q == top)
3785 break;
3786 q = next;
3787 }
3788 }
3789 }
3790}
3791#endif /* MALLOC_INSPECT_ALL */
3792
3793/* ------------------ Exported realloc, memalign, etc -------------------- */
3794
3795#if !ONLY_MSPACES
3796
3797void* dlrealloc(void* oldmem, size_t bytes) {
3798 void* mem = 0;
3799 if (oldmem == 0) {
3800 mem = dlmalloc(bytes);
3801 }
3802 else if (bytes >= MAX_REQUEST) {
3803 MALLOC_FAILURE_ACTION;
3804 }
3805#ifdef REALLOC_ZERO_BYTES_FREES
3806 else if (bytes == 0) {
3807 dlfree(oldmem);
3808 }
3809#endif /* REALLOC_ZERO_BYTES_FREES */
3810 else {
3811 size_t nb = request2size(bytes);
3812 mchunkptr oldp = mem2chunk(oldmem);
3813#if ! FOOTERS
3814 mstate m = gm;
3815#else /* FOOTERS */
3816 mstate m = get_mstate_for(oldp);
3817 if (!ok_magic(m)) {
3818 USAGE_ERROR_ACTION(m, oldmem);
3819 return 0;
3820 }
3821#endif /* FOOTERS */
3822 if (!PREACTION(m)) {
3823 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
3824 POSTACTION(m);
3825 if (newp != 0) {
3826 check_inuse_chunk(m, newp);
3827 mem = chunk2mem(newp);
3828 }
3829 else {
3830 mem = internal_malloc(m, bytes);
3831 if (mem != 0) {
3832 size_t oc = chunksize(oldp) - overhead_for(oldp);
3833 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
3834 internal_free(m, oldmem);
3835 }
3836 }
3837 }
3838 }
3839 return mem;
3840}
3841
3842void* dlrealloc_in_place(void* oldmem, size_t bytes) {
3843 void* mem = 0;
3844 if (oldmem != 0) {
3845 if (bytes >= MAX_REQUEST) {
3846 MALLOC_FAILURE_ACTION;
3847 }
3848 else {
3849 size_t nb = request2size(bytes);
3850 mchunkptr oldp = mem2chunk(oldmem);
3851#if ! FOOTERS
3852 mstate m = gm;
3853#else /* FOOTERS */
3854 mstate m = get_mstate_for(oldp);
3855 if (!ok_magic(m)) {
3856 USAGE_ERROR_ACTION(m, oldmem);
3857 return 0;
3858 }
3859#endif /* FOOTERS */
3860 if (!PREACTION(m)) {
3861 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
3862 POSTACTION(m);
3863 if (newp == oldp) {
3864 check_inuse_chunk(m, newp);
3865 mem = oldmem;
3866 }
3867 }
3868 }
3869 }
3870 return mem;
3871}
3872
3873void* dlmemalign(size_t alignment, size_t bytes) {
3874 if (alignment <= MALLOC_ALIGNMENT) {
3875 return dlmalloc(bytes);
3876 }
3877 return internal_memalign(gm, alignment, bytes);
3878}
3879
3880int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
3881 void* mem = 0;
3882 if (alignment == MALLOC_ALIGNMENT)
3883 mem = dlmalloc(bytes);
3884 else {
3885 size_t d = alignment / sizeof(void*);
3886 size_t r = alignment % sizeof(void*);
3887 if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
3888 return EINVAL;
3889 else if (bytes <= MAX_REQUEST - alignment) {
3890 if (alignment < MIN_CHUNK_SIZE)
3891 alignment = MIN_CHUNK_SIZE;
3892 mem = internal_memalign(gm, alignment, bytes);
3893 }
3894 }
3895 if (mem == 0)
3896 return ENOMEM;
3897 else {
3898 *pp = mem;
3899 return 0;
3900 }
3901}
3902
3903void* dlvalloc(size_t bytes) {
3904 size_t pagesz;
3905 ensure_initialization();
3906 pagesz = mparams.page_size;
3907 return dlmemalign(pagesz, bytes);
3908}
3909
3910void* dlpvalloc(size_t bytes) {
3911 size_t pagesz;
3912 ensure_initialization();
3913 pagesz = mparams.page_size;
3914 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
3915}
3916
3917void** dlindependent_calloc(size_t n_elements, size_t elem_size,
3918 void* chunks[]) {
3919 size_t sz = elem_size; /* serves as 1-element array */
3920 return ialloc(gm, n_elements, &sz, 3, chunks);
3921}
3922
3923void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
3924 void* chunks[]) {
3925 return ialloc(gm, n_elements, sizes, 0, chunks);
3926}
3927
3928size_t dlbulk_free(void* array[], size_t nelem) {
3929 return internal_bulk_free(gm, array, nelem);
3930}
3931
3932#if MALLOC_INSPECT_ALL
3933void dlmalloc_inspect_all(void(*handler)(void *start,
3934 void *end,
3935 size_t used_bytes,
3936 void* callback_arg),
3937 void* arg) {
3938 ensure_initialization();
3939 if (!PREACTION(gm)) {
3940 internal_inspect_all(gm, handler, arg);
3941 POSTACTION(gm);
3942 }
3943}
3944#endif /* MALLOC_INSPECT_ALL */
3945
3946int dlmalloc_trim(size_t pad) {
3947 int result = 0;
3948 ensure_initialization();
3949 if (!PREACTION(gm)) {
3950 result = sys_trim(gm, pad);
3951 POSTACTION(gm);
3952 }
3953 return result;
3954}
3955
3956size_t dlmalloc_footprint(void) {
3957 return gm->footprint;
3958}
3959
3960size_t dlmalloc_max_footprint(void) {
3961 return gm->max_footprint;
3962}
3963
3964size_t dlmalloc_footprint_limit(void) {
3965 size_t maf = gm->footprint_limit;
3966 return maf == 0 ? MAX_SIZE_T : maf;
3967}
3968
3969size_t dlmalloc_set_footprint_limit(size_t bytes) {
3970 size_t result; /* invert sense of 0 */
3971 if (bytes == 0)
3972 result = granularity_align(1); /* Use minimal size */
3973 if (bytes == MAX_SIZE_T)
3974 result = 0; /* disable */
3975 else
3976 result = granularity_align(bytes);
3977 return gm->footprint_limit = result;
3978}
3979
3980#if !NO_MALLINFO
Dave Barachaf7dd5b2018-08-23 17:08:44 -04003981struct dlmallinfo dlmallinfo(void) {
Dave Barach6a5adc32018-07-04 10:56:23 -04003982 return internal_mallinfo(gm);
3983}
3984#endif /* NO_MALLINFO */
3985
3986#if !NO_MALLOC_STATS
3987void dlmalloc_stats() {
3988 internal_malloc_stats(gm);
3989}
3990#endif /* NO_MALLOC_STATS */
3991
3992int dlmallopt(int param_number, int value) {
3993 return change_mparam(param_number, value);
3994}
3995
3996size_t dlmalloc_usable_size(void* mem) {
3997 if (mem != 0) {
3998 mchunkptr p = mem2chunk(mem);
3999 if (is_inuse(p))
4000 return chunksize(p) - overhead_for(p);
4001 }
4002 return 0;
4003}
4004
4005#endif /* !ONLY_MSPACES */
4006
4007/* ----------------------------- user mspaces ---------------------------- */
4008
4009#if MSPACES
4010
4011static mstate init_user_mstate(char* tbase, size_t tsize) {
4012 size_t msize = pad_request(sizeof(struct malloc_state));
4013 mchunkptr mn;
4014 mchunkptr msp = align_as_chunk(tbase);
4015 mstate m = (mstate)(chunk2mem(msp));
4016 memset(m, 0, msize);
4017 (void)INITIAL_LOCK(&m->mutex);
4018 msp->head = (msize|INUSE_BITS);
4019 m->seg.base = m->least_addr = tbase;
4020 m->seg.size = m->footprint = m->max_footprint = tsize;
4021 m->magic = mparams.magic;
4022 m->release_checks = MAX_RELEASE_CHECK_RATE;
4023 m->mflags = mparams.default_mflags;
4024 m->extp = 0;
4025 m->exts = 0;
4026 disable_contiguous(m);
4027 init_bins(m);
4028 mn = next_chunk(mem2chunk(m));
4029 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
4030 check_top_chunk(m, m->top);
4031 return m;
4032}
4033
4034mspace create_mspace(size_t capacity, int locked) {
4035 mstate m = 0;
4036 size_t msize;
4037 ensure_initialization();
4038 msize = pad_request(sizeof(struct malloc_state));
4039 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4040 size_t rs = ((capacity == 0)? mparams.granularity :
4041 (capacity + TOP_FOOT_SIZE + msize));
4042 size_t tsize = granularity_align(rs);
4043 char* tbase = (char*)(CALL_MMAP(tsize));
4044 if (tbase != CMFAIL) {
4045 m = init_user_mstate(tbase, tsize);
4046 m->seg.sflags = USE_MMAP_BIT;
4047 set_lock(m, locked);
4048 }
4049 }
4050 return (mspace)m;
4051}
4052
4053mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
4054 mstate m = 0;
4055 size_t msize;
4056 ensure_initialization();
4057 msize = pad_request(sizeof(struct malloc_state));
4058 if (capacity > msize + TOP_FOOT_SIZE &&
4059 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4060 m = init_user_mstate((char*)base, capacity);
4061 m->seg.sflags = EXTERN_BIT;
4062 set_lock(m, locked);
4063 }
4064 return (mspace)m;
4065}
4066
4067int mspace_track_large_chunks(mspace msp, int enable) {
4068 int ret = 0;
4069 mstate ms = (mstate)msp;
4070 if (!PREACTION(ms)) {
4071 if (!use_mmap(ms)) {
4072 ret = 1;
4073 }
4074 if (!enable) {
4075 enable_mmap(ms);
4076 } else {
4077 disable_mmap(ms);
4078 }
4079 POSTACTION(ms);
4080 }
4081 return ret;
4082}
4083
Damjan Marion79934e82022-04-05 12:40:31 +02004084__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04004085size_t destroy_mspace(mspace msp) {
4086 size_t freed = 0;
4087 mstate ms = (mstate)msp;
4088 if (ok_magic(ms)) {
4089 msegmentptr sp = &ms->seg;
4090 (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
4091 while (sp != 0) {
4092 char* base = sp->base;
4093 size_t size = sp->size;
4094 flag_t flag = sp->sflags;
4095 (void)base; /* placate people compiling -Wunused-variable */
4096 sp = sp->next;
4097 if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
4098 CALL_MUNMAP(base, size) == 0)
4099 freed += size;
4100 }
4101 }
4102 else {
4103 USAGE_ERROR_ACTION(ms,ms);
4104 }
4105 return freed;
4106}
4107
David Johnsond9818dd2018-12-14 14:53:41 -05004108void mspace_get_address_and_size (mspace msp, char **addrp, size_t *sizep)
Dave Barach6a5adc32018-07-04 10:56:23 -04004109{
4110 mstate ms;
4111 msegment *this_seg;
Dave Barachd67a4282019-06-15 12:46:13 -04004112
Dave Barach6a5adc32018-07-04 10:56:23 -04004113 ms = (mstate)msp;
4114 this_seg = &ms->seg;
4115
David Johnsond9818dd2018-12-14 14:53:41 -05004116 *addrp = this_seg->base;
Dave Barach6a5adc32018-07-04 10:56:23 -04004117 *sizep = this_seg->size;
4118}
4119
Damjan Marion79934e82022-04-05 12:40:31 +02004120__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04004121int mspace_is_heap_object (mspace msp, void *p)
4122{
4123 msegment *this_seg;
4124 char *pp, *base;
4125 mstate ms;
4126
4127 ms = (mstate)msp;
4128
4129 this_seg = &ms->seg;
4130 pp = (char *) p;
4131
4132 while (this_seg)
4133 {
4134 base = this_seg->base;
4135 if (pp >= base && pp < (base + this_seg->size))
4136 return 1;
4137 this_seg = this_seg->next;
4138 }
Dave Barachb76590a2018-08-23 11:23:00 -04004139
4140 if (pp > ms->least_addr && pp <= ms->least_addr + ms->footprint)
4141 return 1;
4142
Dave Barach6a5adc32018-07-04 10:56:23 -04004143 return 0;
4144}
4145
Damjan Marion79934e82022-04-05 12:40:31 +02004146__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04004147void *mspace_least_addr (mspace msp)
4148{
4149 mstate ms = (mstate) msp;
4150 return (void *) ms->least_addr;
4151}
4152
4153void mspace_disable_expand (mspace msp)
4154{
4155 mstate ms = (mstate)msp;
4156
4157 disable_expand (ms);
4158}
4159
Damjan Marion79934e82022-04-05 12:40:31 +02004160__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04004161int mspace_enable_disable_trace (mspace msp, int enable)
4162{
4163 mstate ms = (mstate)msp;
4164 int was_enabled = 0;
4165
Dave Barached2fe932018-07-19 12:29:45 -04004166 if (use_trace(ms))
Dave Barach6a5adc32018-07-04 10:56:23 -04004167 was_enabled = 1;
4168
4169 if (enable)
4170 enable_trace (ms);
4171 else
4172 disable_trace (ms);
4173
4174 return (was_enabled);
4175}
4176
Damjan Marion79934e82022-04-05 12:40:31 +02004177__clib_nosanitize_addr
Dave Barachd67a4282019-06-15 12:46:13 -04004178int mspace_is_traced (mspace msp)
4179{
4180 mstate ms = (mstate)msp;
4181
4182 if (use_trace(ms))
4183 return 1;
4184 return 0;
4185}
4186
Damjan Marion79934e82022-04-05 12:40:31 +02004187__clib_nosanitize_addr
Dave Barachd67a4282019-06-15 12:46:13 -04004188void* mspace_get_aligned (mspace msp,
David Johnsond9818dd2018-12-14 14:53:41 -05004189 unsigned long n_user_data_bytes,
Dave Barachd67a4282019-06-15 12:46:13 -04004190 unsigned long align,
David Johnsond9818dd2018-12-14 14:53:41 -05004191 unsigned long align_offset) {
Dave Barach6a5adc32018-07-04 10:56:23 -04004192 char *rv;
David Johnsond9818dd2018-12-14 14:53:41 -05004193 unsigned long searchp;
Dave Barach6a5adc32018-07-04 10:56:23 -04004194 unsigned *wwp; /* "where's Waldo" pointer */
4195 mstate ms = (mstate)msp;
4196
4197 /*
Dave Barachd67a4282019-06-15 12:46:13 -04004198 * Allocate space for the "Where's Waldo?" pointer
Dave Barach6a5adc32018-07-04 10:56:23 -04004199 * the base of the dlmalloc object
4200 */
4201 n_user_data_bytes += sizeof(unsigned);
Damjan Mariondcb1db92020-11-05 14:54:08 +01004202 align = align < MALLOC_ALIGNMENT ? MALLOC_ALIGNMENT : align;
Dave Barach6a5adc32018-07-04 10:56:23 -04004203
4204 /*
4205 * Alignment requests greater than 4K must be at offset zero,
4206 * and must be freed using mspace_free_no_offset - or never freed -
4207 * since the "Where's Waldo?" pointer would waste too much space.
Dave Barachd67a4282019-06-15 12:46:13 -04004208 *
4209 * Waldo is the address of the chunk of memory returned by mspace_malloc,
Dave Barach6a5adc32018-07-04 10:56:23 -04004210 * which we need later to call mspace_free...
4211 */
David Johnsond9818dd2018-12-14 14:53:41 -05004212 if (align > 4<<10 || align_offset == ~0UL) {
Dave Barach6a5adc32018-07-04 10:56:23 -04004213 n_user_data_bytes -= sizeof(unsigned);
4214 assert(align_offset == 0);
4215 rv = internal_memalign(ms, (size_t)align, n_user_data_bytes);
Dave Barachd67a4282019-06-15 12:46:13 -04004216
Dave Barach6a5adc32018-07-04 10:56:23 -04004217 /* Trace the allocation */
4218 if (rv && use_trace(ms)) {
4219 mchunkptr p = mem2chunk(rv);
4220 size_t psize = chunksize(p);
David Johnsond9818dd2018-12-14 14:53:41 -05004221 mheap_get_trace ((unsigned long)rv, psize);
Dave Barach6a5adc32018-07-04 10:56:23 -04004222 }
4223 return rv;
4224 }
4225
4226 align = clib_max (align, MALLOC_ALIGNMENT);
4227 align = max_pow2 (align);
4228
4229 /* Correct align offset to be smaller than alignment. */
4230 align_offset &= (align - 1);
4231
4232 n_user_data_bytes += align;
4233 rv = mspace_malloc (msp, n_user_data_bytes);
4234
4235 if (rv == 0)
4236 return rv;
4237
4238 /* Honor the alignment request */
David Johnsond9818dd2018-12-14 14:53:41 -05004239 searchp = (unsigned long)(rv + sizeof (unsigned));
Dave Barach6a5adc32018-07-04 10:56:23 -04004240
4241#if 0 /* this is the idea... */
4242 while ((searchp + align_offset) % align)
4243 searchp++;
4244#endif
4245
4246 {
David Johnsond9818dd2018-12-14 14:53:41 -05004247 unsigned long where_now, delta;
Dave Barach6a5adc32018-07-04 10:56:23 -04004248
4249 where_now = (searchp + align_offset) % align;
4250 delta = align - where_now;
4251
4252 searchp += delta;
4253 }
4254
4255 wwp = (unsigned *)(searchp - sizeof(unsigned));
David Johnsond9818dd2018-12-14 14:53:41 -05004256 *wwp = (searchp - (((unsigned long) rv) + sizeof (*wwp)));
Dave Barach6a5adc32018-07-04 10:56:23 -04004257 assert (*wwp < align);
4258
4259 if (use_trace(ms)) {
4260 mchunkptr p = mem2chunk(rv);
4261 size_t psize = chunksize(p);
Wei CHEN5e282e92019-04-09 12:38:40 +08004262 mheap_get_trace (searchp, psize);
Dave Barach6a5adc32018-07-04 10:56:23 -04004263 }
4264 return (void *) searchp;
4265}
4266
Damjan Marion79934e82022-04-05 12:40:31 +02004267__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04004268void mspace_put (mspace msp, void *p_arg)
4269{
4270 char *object_header;
4271 unsigned *wwp;
4272 mstate ms = (mstate)msp;
4273
4274 /* Find the object header delta */
4275 wwp = (unsigned *)p_arg;
4276 wwp --;
4277
4278 /* Recover the dlmalloc object pointer */
4279 object_header = (char *)wwp;
4280 object_header -= *wwp;
4281
4282 /* Tracing (if enabled) */
4283 if (use_trace(ms))
4284 {
4285 mchunkptr p = mem2chunk(object_header);
4286 size_t psize = chunksize(p);
4287
David Johnsond9818dd2018-12-14 14:53:41 -05004288 mheap_put_trace ((unsigned long)p_arg, psize);
Dave Barach6a5adc32018-07-04 10:56:23 -04004289 }
4290
Benoît Ganne9fb6d402019-04-15 15:28:21 +02004291#if CLIB_DEBUG > 0 && !defined(CLIB_SANITIZE_ADDR)
Dave Barach1c7bf5d2018-07-31 10:39:30 -04004292 /* Poison the object */
4293 {
4294 size_t psize = mspace_usable_size (object_header);
4295 memset (object_header, 0x13, psize);
4296 }
4297#endif
4298
Dave Barach6a5adc32018-07-04 10:56:23 -04004299 /* And free it... */
4300 mspace_free (msp, object_header);
4301}
4302
4303void mspace_put_no_offset (mspace msp, void *p_arg)
4304{
4305 mstate ms = (mstate)msp;
4306
4307 if (use_trace(ms))
4308 {
4309 mchunkptr p = mem2chunk(p_arg);
4310 size_t psize = chunksize(p);
4311
David Johnsond9818dd2018-12-14 14:53:41 -05004312 mheap_put_trace ((unsigned long)p_arg, psize);
Dave Barach6a5adc32018-07-04 10:56:23 -04004313 }
4314 mspace_free (msp, p_arg);
4315}
4316
Damjan Marion79934e82022-04-05 12:40:31 +02004317__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04004318size_t mspace_usable_size_with_delta (const void *p)
4319{
4320 size_t usable_size;
4321 char *object_header;
4322 unsigned *wwp;
4323
4324 /* Find the object header delta */
4325 wwp = (unsigned *)p;
4326 wwp --;
4327
4328 /* Recover the dlmalloc object pointer */
4329 object_header = (char *)wwp;
4330 object_header -= *wwp;
4331
4332 usable_size = mspace_usable_size (object_header);
4333 /* account for the offset and the size of the offset... */
4334 usable_size -= (*wwp + sizeof (*wwp));
4335 return usable_size;
4336}
4337
4338/*
4339 mspace versions of routines are near-clones of the global
4340 versions. This is not so nice but better than the alternatives.
4341*/
4342
Damjan Marion79934e82022-04-05 12:40:31 +02004343__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04004344void* mspace_malloc(mspace msp, size_t bytes) {
4345 mstate ms = (mstate)msp;
4346 if (!ok_magic(ms)) {
4347 USAGE_ERROR_ACTION(ms,ms);
4348 return 0;
4349 }
4350 if (!PREACTION(ms)) {
4351 void* mem;
4352 size_t nb;
4353 if (bytes <= MAX_SMALL_REQUEST) {
4354 bindex_t idx;
4355 binmap_t smallbits;
4356 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4357 idx = small_index(nb);
4358 smallbits = ms->smallmap >> idx;
4359
4360 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4361 mchunkptr b, p;
4362 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4363 b = smallbin_at(ms, idx);
4364 p = b->fd;
4365 assert(chunksize(p) == small_index2size(idx));
4366 unlink_first_small_chunk(ms, b, p, idx);
4367 set_inuse_and_pinuse(ms, p, small_index2size(idx));
4368 mem = chunk2mem(p);
4369 check_malloced_chunk(ms, mem, nb);
4370 goto postaction;
4371 }
4372
4373 else if (nb > ms->dvsize) {
4374 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4375 mchunkptr b, p, r;
4376 size_t rsize;
4377 bindex_t i;
4378 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4379 binmap_t leastbit = least_bit(leftbits);
4380 compute_bit2idx(leastbit, i);
4381 b = smallbin_at(ms, i);
4382 p = b->fd;
4383 assert(chunksize(p) == small_index2size(i));
4384 unlink_first_small_chunk(ms, b, p, i);
4385 rsize = small_index2size(i) - nb;
4386 /* Fit here cannot be remainderless if 4byte sizes */
4387 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4388 set_inuse_and_pinuse(ms, p, small_index2size(i));
4389 else {
4390 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4391 r = chunk_plus_offset(p, nb);
4392 set_size_and_pinuse_of_free_chunk(r, rsize);
4393 replace_dv(ms, r, rsize);
4394 }
4395 mem = chunk2mem(p);
4396 check_malloced_chunk(ms, mem, nb);
4397 goto postaction;
4398 }
4399
4400 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4401 check_malloced_chunk(ms, mem, nb);
4402 goto postaction;
4403 }
4404 }
4405 }
4406 else if (bytes >= MAX_REQUEST)
4407 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4408 else {
4409 nb = pad_request(bytes);
4410 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4411 check_malloced_chunk(ms, mem, nb);
4412 goto postaction;
4413 }
4414 }
4415
4416 if (nb <= ms->dvsize) {
4417 size_t rsize = ms->dvsize - nb;
4418 mchunkptr p = ms->dv;
4419 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4420 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4421 ms->dvsize = rsize;
4422 set_size_and_pinuse_of_free_chunk(r, rsize);
4423 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4424 }
4425 else { /* exhaust dv */
4426 size_t dvs = ms->dvsize;
4427 ms->dvsize = 0;
4428 ms->dv = 0;
4429 set_inuse_and_pinuse(ms, p, dvs);
4430 }
4431 mem = chunk2mem(p);
4432 check_malloced_chunk(ms, mem, nb);
4433 goto postaction;
4434 }
4435
4436 else if (nb < ms->topsize) { /* Split top */
4437 size_t rsize = ms->topsize -= nb;
4438 mchunkptr p = ms->top;
4439 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4440 r->head = rsize | PINUSE_BIT;
4441 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4442 mem = chunk2mem(p);
4443 check_top_chunk(ms, ms->top);
4444 check_malloced_chunk(ms, mem, nb);
4445 goto postaction;
4446 }
4447
4448 mem = sys_alloc(ms, nb);
4449
4450 postaction:
4451 POSTACTION(ms);
4452 return mem;
4453 }
4454
4455 return 0;
4456}
4457
Damjan Marion79934e82022-04-05 12:40:31 +02004458__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04004459void mspace_free(mspace msp, void* mem) {
4460 if (mem != 0) {
4461 mchunkptr p = mem2chunk(mem);
4462#if FOOTERS
4463 mstate fm = get_mstate_for(p);
4464 (void)msp; /* placate people compiling -Wunused */
4465#else /* FOOTERS */
4466 mstate fm = (mstate)msp;
4467#endif /* FOOTERS */
4468 if (!ok_magic(fm)) {
4469 USAGE_ERROR_ACTION(fm, p);
4470 return;
4471 }
4472 if (!PREACTION(fm)) {
4473 check_inuse_chunk(fm, p);
4474 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
4475 size_t psize = chunksize(p);
4476 mchunkptr next = chunk_plus_offset(p, psize);
4477 if (!pinuse(p)) {
4478 size_t prevsize = p->prev_foot;
4479 if (is_mmapped(p)) {
4480 psize += prevsize + MMAP_FOOT_PAD;
4481 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4482 fm->footprint -= psize;
4483 goto postaction;
4484 }
4485 else {
4486 mchunkptr prev = chunk_minus_offset(p, prevsize);
4487 psize += prevsize;
4488 p = prev;
4489 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4490 if (p != fm->dv) {
4491 unlink_chunk(fm, p, prevsize);
4492 }
4493 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4494 fm->dvsize = psize;
4495 set_free_with_pinuse(p, psize, next);
4496 goto postaction;
4497 }
4498 }
4499 else
4500 goto erroraction;
4501 }
4502 }
4503
4504 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4505 if (!cinuse(next)) { /* consolidate forward */
4506 if (next == fm->top) {
4507 size_t tsize = fm->topsize += psize;
4508 fm->top = p;
4509 p->head = tsize | PINUSE_BIT;
4510 if (p == fm->dv) {
4511 fm->dv = 0;
4512 fm->dvsize = 0;
4513 }
4514 if (should_trim(fm, tsize))
4515 sys_trim(fm, 0);
4516 goto postaction;
4517 }
4518 else if (next == fm->dv) {
4519 size_t dsize = fm->dvsize += psize;
4520 fm->dv = p;
4521 set_size_and_pinuse_of_free_chunk(p, dsize);
4522 goto postaction;
4523 }
4524 else {
4525 size_t nsize = chunksize(next);
4526 psize += nsize;
4527 unlink_chunk(fm, next, nsize);
4528 set_size_and_pinuse_of_free_chunk(p, psize);
4529 if (p == fm->dv) {
4530 fm->dvsize = psize;
4531 goto postaction;
4532 }
4533 }
4534 }
4535 else
4536 set_free_with_pinuse(p, psize, next);
4537
4538 if (is_small(psize)) {
4539 insert_small_chunk(fm, p, psize);
4540 check_free_chunk(fm, p);
4541 }
4542 else {
4543 tchunkptr tp = (tchunkptr)p;
4544 insert_large_chunk(fm, tp, psize);
4545 check_free_chunk(fm, p);
4546 if (--fm->release_checks == 0)
4547 release_unused_segments(fm);
4548 }
4549 goto postaction;
4550 }
4551 }
4552 erroraction:
4553 USAGE_ERROR_ACTION(fm, p);
4554 postaction:
4555 POSTACTION(fm);
4556 }
4557 }
4558}
4559
4560void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
4561 void* mem;
4562 size_t req = 0;
4563 mstate ms = (mstate)msp;
4564 if (!ok_magic(ms)) {
4565 USAGE_ERROR_ACTION(ms,ms);
4566 return 0;
4567 }
4568 if (n_elements != 0) {
4569 req = n_elements * elem_size;
4570 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
4571 (req / n_elements != elem_size))
4572 req = MAX_SIZE_T; /* force downstream failure on overflow */
4573 }
4574 mem = internal_malloc(ms, req);
4575 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4576 memset(mem, 0, req);
4577 return mem;
4578}
4579
4580void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
4581 void* mem = 0;
4582 if (oldmem == 0) {
4583 mem = mspace_malloc(msp, bytes);
4584 }
4585 else if (bytes >= MAX_REQUEST) {
4586 MALLOC_FAILURE_ACTION;
4587 }
4588#ifdef REALLOC_ZERO_BYTES_FREES
4589 else if (bytes == 0) {
4590 mspace_free(msp, oldmem);
4591 }
4592#endif /* REALLOC_ZERO_BYTES_FREES */
4593 else {
4594 size_t nb = request2size(bytes);
4595 mchunkptr oldp = mem2chunk(oldmem);
4596#if ! FOOTERS
4597 mstate m = (mstate)msp;
4598#else /* FOOTERS */
4599 mstate m = get_mstate_for(oldp);
4600 if (!ok_magic(m)) {
4601 USAGE_ERROR_ACTION(m, oldmem);
4602 return 0;
4603 }
4604#endif /* FOOTERS */
4605 if (!PREACTION(m)) {
4606 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
4607 POSTACTION(m);
4608 if (newp != 0) {
4609 check_inuse_chunk(m, newp);
4610 mem = chunk2mem(newp);
4611 }
4612 else {
4613 mem = mspace_malloc(m, bytes);
4614 if (mem != 0) {
4615 size_t oc = chunksize(oldp) - overhead_for(oldp);
4616 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
4617 mspace_free(m, oldmem);
4618 }
4619 }
4620 }
4621 }
4622 return mem;
4623}
4624
Damjan Marion79934e82022-04-05 12:40:31 +02004625__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04004626void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
4627 void* mem = 0;
4628 if (oldmem != 0) {
4629 if (bytes >= MAX_REQUEST) {
4630 MALLOC_FAILURE_ACTION;
4631 }
4632 else {
4633 size_t nb = request2size(bytes);
4634 mchunkptr oldp = mem2chunk(oldmem);
4635#if ! FOOTERS
4636 mstate m = (mstate)msp;
4637#else /* FOOTERS */
4638 mstate m = get_mstate_for(oldp);
4639 (void)msp; /* placate people compiling -Wunused */
4640 if (!ok_magic(m)) {
4641 USAGE_ERROR_ACTION(m, oldmem);
4642 return 0;
4643 }
4644#endif /* FOOTERS */
4645 if (!PREACTION(m)) {
4646 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
4647 POSTACTION(m);
4648 if (newp == oldp) {
4649 check_inuse_chunk(m, newp);
4650 mem = oldmem;
4651 }
4652 }
4653 }
4654 }
4655 return mem;
4656}
4657
Damjan Marion79934e82022-04-05 12:40:31 +02004658__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04004659void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
4660 mstate ms = (mstate)msp;
4661 if (!ok_magic(ms)) {
4662 USAGE_ERROR_ACTION(ms,ms);
4663 return 0;
4664 }
4665 if (alignment <= MALLOC_ALIGNMENT)
4666 return mspace_malloc(msp, bytes);
4667 return internal_memalign(ms, alignment, bytes);
4668}
4669
4670void** mspace_independent_calloc(mspace msp, size_t n_elements,
4671 size_t elem_size, void* chunks[]) {
4672 size_t sz = elem_size; /* serves as 1-element array */
4673 mstate ms = (mstate)msp;
4674 if (!ok_magic(ms)) {
4675 USAGE_ERROR_ACTION(ms,ms);
4676 return 0;
4677 }
4678 return ialloc(ms, n_elements, &sz, 3, chunks);
4679}
4680
4681void** mspace_independent_comalloc(mspace msp, size_t n_elements,
4682 size_t sizes[], void* chunks[]) {
4683 mstate ms = (mstate)msp;
4684 if (!ok_magic(ms)) {
4685 USAGE_ERROR_ACTION(ms,ms);
4686 return 0;
4687 }
4688 return ialloc(ms, n_elements, sizes, 0, chunks);
4689}
4690
4691size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
4692 return internal_bulk_free((mstate)msp, array, nelem);
4693}
4694
4695#if MALLOC_INSPECT_ALL
4696void mspace_inspect_all(mspace msp,
4697 void(*handler)(void *start,
4698 void *end,
4699 size_t used_bytes,
4700 void* callback_arg),
4701 void* arg) {
4702 mstate ms = (mstate)msp;
4703 if (ok_magic(ms)) {
4704 if (!PREACTION(ms)) {
4705 internal_inspect_all(ms, handler, arg);
4706 POSTACTION(ms);
4707 }
4708 }
4709 else {
4710 USAGE_ERROR_ACTION(ms,ms);
4711 }
4712}
4713#endif /* MALLOC_INSPECT_ALL */
4714
4715int mspace_trim(mspace msp, size_t pad) {
4716 int result = 0;
4717 mstate ms = (mstate)msp;
4718 if (ok_magic(ms)) {
4719 if (!PREACTION(ms)) {
4720 result = sys_trim(ms, pad);
4721 POSTACTION(ms);
4722 }
4723 }
4724 else {
4725 USAGE_ERROR_ACTION(ms,ms);
4726 }
4727 return result;
4728}
4729
4730#if !NO_MALLOC_STATS
4731void mspace_malloc_stats(mspace msp) {
4732 mstate ms = (mstate)msp;
4733 if (ok_magic(ms)) {
4734 internal_malloc_stats(ms);
4735 }
4736 else {
4737 USAGE_ERROR_ACTION(ms,ms);
4738 }
4739}
4740#endif /* NO_MALLOC_STATS */
4741
4742size_t mspace_footprint(mspace msp) {
4743 size_t result = 0;
4744 mstate ms = (mstate)msp;
4745 if (ok_magic(ms)) {
4746 result = ms->footprint;
4747 }
4748 else {
4749 USAGE_ERROR_ACTION(ms,ms);
4750 }
4751 return result;
4752}
4753
4754size_t mspace_max_footprint(mspace msp) {
4755 size_t result = 0;
4756 mstate ms = (mstate)msp;
4757 if (ok_magic(ms)) {
4758 result = ms->max_footprint;
4759 }
4760 else {
4761 USAGE_ERROR_ACTION(ms,ms);
4762 }
4763 return result;
4764}
4765
4766size_t mspace_footprint_limit(mspace msp) {
4767 size_t result = 0;
4768 mstate ms = (mstate)msp;
4769 if (ok_magic(ms)) {
4770 size_t maf = ms->footprint_limit;
4771 result = (maf == 0) ? MAX_SIZE_T : maf;
4772 }
4773 else {
4774 USAGE_ERROR_ACTION(ms,ms);
4775 }
4776 return result;
4777}
4778
4779size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
4780 size_t result = 0;
4781 mstate ms = (mstate)msp;
4782 if (ok_magic(ms)) {
4783 if (bytes == 0)
4784 result = granularity_align(1); /* Use minimal size */
4785 if (bytes == MAX_SIZE_T)
4786 result = 0; /* disable */
4787 else
4788 result = granularity_align(bytes);
4789 ms->footprint_limit = result;
4790 }
4791 else {
4792 USAGE_ERROR_ACTION(ms,ms);
4793 }
4794 return result;
4795}
4796
4797#if !NO_MALLINFO
Damjan Marion79934e82022-04-05 12:40:31 +02004798__clib_nosanitize_addr
Dave Barachaf7dd5b2018-08-23 17:08:44 -04004799struct dlmallinfo mspace_mallinfo(mspace msp) {
Dave Barach6a5adc32018-07-04 10:56:23 -04004800 mstate ms = (mstate)msp;
4801 if (!ok_magic(ms)) {
4802 USAGE_ERROR_ACTION(ms,ms);
4803 }
4804 return internal_mallinfo(ms);
4805}
4806#endif /* NO_MALLINFO */
4807
Damjan Marion79934e82022-04-05 12:40:31 +02004808__clib_nosanitize_addr
Dave Barach6a5adc32018-07-04 10:56:23 -04004809size_t mspace_usable_size(const void* mem) {
4810 if (mem != 0) {
4811 mchunkptr p = mem2chunk(mem);
4812 if (is_inuse(p))
4813 return chunksize(p) - overhead_for(p);
4814 }
4815 return 0;
4816}
4817
4818int mspace_mallopt(int param_number, int value) {
4819 return change_mparam(param_number, value);
4820}
4821
4822#endif /* MSPACES */
4823
4824
4825/* -------------------- Alternative MORECORE functions ------------------- */
4826
4827/*
4828 Guidelines for creating a custom version of MORECORE:
4829
4830 * For best performance, MORECORE should allocate in multiples of pagesize.
4831 * MORECORE may allocate more memory than requested. (Or even less,
4832 but this will usually result in a malloc failure.)
4833 * MORECORE must not allocate memory when given argument zero, but
4834 instead return one past the end address of memory from previous
4835 nonzero call.
4836 * For best performance, consecutive calls to MORECORE with positive
4837 arguments should return increasing addresses, indicating that
4838 space has been contiguously extended.
4839 * Even though consecutive calls to MORECORE need not return contiguous
4840 addresses, it must be OK for malloc'ed chunks to span multiple
4841 regions in those cases where they do happen to be contiguous.
4842 * MORECORE need not handle negative arguments -- it may instead
4843 just return MFAIL when given negative arguments.
4844 Negative arguments are always multiples of pagesize. MORECORE
4845 must not misinterpret negative args as large positive unsigned
4846 args. You can suppress all such calls from even occurring by defining
4847 MORECORE_CANNOT_TRIM,
4848
4849 As an example alternative MORECORE, here is a custom allocator
4850 kindly contributed for pre-OSX macOS. It uses virtually but not
4851 necessarily physically contiguous non-paged memory (locked in,
4852 present and won't get swapped out). You can use it by uncommenting
4853 this section, adding some #includes, and setting up the appropriate
4854 defines above:
4855
4856 #define MORECORE osMoreCore
4857
4858 There is also a shutdown routine that should somehow be called for
4859 cleanup upon program exit.
4860
4861 #define MAX_POOL_ENTRIES 100
4862 #define MINIMUM_MORECORE_SIZE (64 * 1024U)
4863 static int next_os_pool;
4864 void *our_os_pools[MAX_POOL_ENTRIES];
4865
4866 void *osMoreCore(int size)
4867 {
4868 void *ptr = 0;
4869 static void *sbrk_top = 0;
4870
4871 if (size > 0)
4872 {
4873 if (size < MINIMUM_MORECORE_SIZE)
4874 size = MINIMUM_MORECORE_SIZE;
4875 if (CurrentExecutionLevel() == kTaskLevel)
4876 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4877 if (ptr == 0)
4878 {
4879 return (void *) MFAIL;
4880 }
4881 // save ptrs so they can be freed during cleanup
4882 our_os_pools[next_os_pool] = ptr;
4883 next_os_pool++;
4884 ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4885 sbrk_top = (char *) ptr + size;
4886 return ptr;
4887 }
4888 else if (size < 0)
4889 {
4890 // we don't currently support shrink behavior
4891 return (void *) MFAIL;
4892 }
4893 else
4894 {
4895 return sbrk_top;
4896 }
4897 }
4898
4899 // cleanup any allocated memory pools
4900 // called as last thing before shutting down driver
4901
4902 void osCleanupMem(void)
4903 {
4904 void **ptr;
4905
4906 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4907 if (*ptr)
4908 {
4909 PoolDeallocate(*ptr);
4910 *ptr = 0;
4911 }
4912 }
4913
4914*/
4915
4916
4917/* -----------------------------------------------------------------------
4918History:
4919 v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
4920 * fix bad comparison in dlposix_memalign
4921 * don't reuse adjusted asize in sys_alloc
4922 * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion
4923 * reduce compiler warnings -- thanks to all who reported/suggested these
4924
4925 v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)
4926 * Always perform unlink checks unless INSECURE
4927 * Add posix_memalign.
4928 * Improve realloc to expand in more cases; expose realloc_in_place.
4929 Thanks to Peter Buhr for the suggestion.
4930 * Add footprint_limit, inspect_all, bulk_free. Thanks
4931 to Barry Hayes and others for the suggestions.
4932 * Internal refactorings to avoid calls while holding locks
4933 * Use non-reentrant locks by default. Thanks to Roland McGrath
4934 for the suggestion.
4935 * Small fixes to mspace_destroy, reset_on_error.
4936 * Various configuration extensions/changes. Thanks
4937 to all who contributed these.
4938
4939 V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)
4940 * Update Creative Commons URL
4941
4942 V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
4943 * Use zeros instead of prev foot for is_mmapped
4944 * Add mspace_track_large_chunks; thanks to Jean Brouwers
4945 * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
4946 * Fix insufficient sys_alloc padding when using 16byte alignment
4947 * Fix bad error check in mspace_footprint
4948 * Adaptations for ptmalloc; thanks to Wolfram Gloger.
4949 * Reentrant spin locks; thanks to Earl Chew and others
4950 * Win32 improvements; thanks to Niall Douglas and Earl Chew
4951 * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
4952 * Extension hook in malloc_state
4953 * Various small adjustments to reduce warnings on some compilers
4954 * Various configuration extensions/changes for more platforms. Thanks
4955 to all who contributed these.
4956
4957 V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
4958 * Add max_footprint functions
4959 * Ensure all appropriate literals are size_t
4960 * Fix conditional compilation problem for some #define settings
4961 * Avoid concatenating segments with the one provided
4962 in create_mspace_with_base
4963 * Rename some variables to avoid compiler shadowing warnings
4964 * Use explicit lock initialization.
4965 * Better handling of sbrk interference.
4966 * Simplify and fix segment insertion, trimming and mspace_destroy
4967 * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
4968 * Thanks especially to Dennis Flanagan for help on these.
4969
4970 V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
4971 * Fix memalign brace error.
4972
4973 V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
4974 * Fix improper #endif nesting in C++
4975 * Add explicit casts needed for C++
4976
4977 V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
4978 * Use trees for large bins
4979 * Support mspaces
4980 * Use segments to unify sbrk-based and mmap-based system allocation,
4981 removing need for emulation on most platforms without sbrk.
4982 * Default safety checks
4983 * Optional footer checks. Thanks to William Robertson for the idea.
4984 * Internal code refactoring
4985 * Incorporate suggestions and platform-specific changes.
4986 Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
4987 Aaron Bachmann, Emery Berger, and others.
4988 * Speed up non-fastbin processing enough to remove fastbins.
4989 * Remove useless cfree() to avoid conflicts with other apps.
4990 * Remove internal memcpy, memset. Compilers handle builtins better.
4991 * Remove some options that no one ever used and rename others.
4992
4993 V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
4994 * Fix malloc_state bitmap array misdeclaration
4995
4996 V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
4997 * Allow tuning of FIRST_SORTED_BIN_SIZE
4998 * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
4999 * Better detection and support for non-contiguousness of MORECORE.
5000 Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5001 * Bypass most of malloc if no frees. Thanks To Emery Berger.
5002 * Fix freeing of old top non-contiguous chunk im sysmalloc.
5003 * Raised default trim and map thresholds to 256K.
5004 * Fix mmap-related #defines. Thanks to Lubos Lunak.
5005 * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5006 * Branch-free bin calculation
5007 * Default trim and mmap thresholds now 256K.
5008
5009 V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5010 * Introduce independent_comalloc and independent_calloc.
5011 Thanks to Michael Pachos for motivation and help.
5012 * Make optional .h file available
5013 * Allow > 2GB requests on 32bit systems.
5014 * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5015 Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5016 and Anonymous.
5017 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5018 helping test this.)
5019 * memalign: check alignment arg
5020 * realloc: don't try to shift chunks backwards, since this
5021 leads to more fragmentation in some programs and doesn't
5022 seem to help in any others.
5023 * Collect all cases in malloc requiring system memory into sysmalloc
5024 * Use mmap as backup to sbrk
5025 * Place all internal state in malloc_state
5026 * Introduce fastbins (although similar to 2.5.1)
5027 * Many minor tunings and cosmetic improvements
5028 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5029 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5030 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5031 * Include errno.h to support default failure action.
5032
5033 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5034 * return null for negative arguments
5035 * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5036 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5037 (e.g. WIN32 platforms)
5038 * Cleanup header file inclusion for WIN32 platforms
5039 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5040 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5041 memory allocation routines
5042 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5043 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5044 usage of 'assert' in non-WIN32 code
5045 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5046 avoid infinite loop
5047 * Always call 'fREe()' rather than 'free()'
5048
5049 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5050 * Fixed ordering problem with boundary-stamping
5051
5052 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5053 * Added pvalloc, as recommended by H.J. Liu
5054 * Added 64bit pointer support mainly from Wolfram Gloger
5055 * Added anonymously donated WIN32 sbrk emulation
5056 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5057 * malloc_extend_top: fix mask error that caused wastage after
5058 foreign sbrks
5059 * Add linux mremap support code from HJ Liu
5060
5061 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5062 * Integrated most documentation with the code.
5063 * Add support for mmap, with help from
5064 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5065 * Use last_remainder in more cases.
5066 * Pack bins using idea from colin@nyx10.cs.du.edu
Paul Vinciguerraec11b132018-09-24 05:25:00 -07005067 * Use ordered bins instead of best-fit threshold
Dave Barach6a5adc32018-07-04 10:56:23 -04005068 * Eliminate block-local decls to simplify tracing and debugging.
5069 * Support another case of realloc via move into top
Paul Vinciguerraec11b132018-09-24 05:25:00 -07005070 * Fix error occurring when initial sbrk_base not word-aligned.
Dave Barach6a5adc32018-07-04 10:56:23 -04005071 * Rely on page size for units instead of SBRK_UNIT to
5072 avoid surprises about sbrk alignment conventions.
5073 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5074 (raymond@es.ele.tue.nl) for the suggestion.
5075 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5076 * More precautions for cases where other routines call sbrk,
5077 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5078 * Added macros etc., allowing use in linux libc from
5079 H.J. Lu (hjl@gnu.ai.mit.edu)
5080 * Inverted this history list
5081
5082 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5083 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5084 * Removed all preallocation code since under current scheme
5085 the work required to undo bad preallocations exceeds
5086 the work saved in good cases for most test programs.
5087 * No longer use return list or unconsolidated bins since
5088 no scheme using them consistently outperforms those that don't
5089 given above changes.
5090 * Use best fit for very large chunks to prevent some worst-cases.
5091 * Added some support for debugging
5092
5093 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5094 * Removed footers when chunks are in use. Thanks to
5095 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5096
5097 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5098 * Added malloc_trim, with help from Wolfram Gloger
5099 (wmglo@Dent.MED.Uni-Muenchen.DE).
5100
5101 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5102
5103 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5104 * realloc: try to expand in both directions
5105 * malloc: swap order of clean-bin strategy;
5106 * realloc: only conditionally expand backwards
5107 * Try not to scavenge used bins
5108 * Use bin counts as a guide to preallocation
5109 * Occasionally bin return list chunks in first scan
5110 * Add a few optimizations from colin@nyx10.cs.du.edu
5111
5112 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5113 * faster bin computation & slightly different binning
5114 * merged all consolidations to one part of malloc proper
5115 (eliminating old malloc_find_space & malloc_clean_bin)
5116 * Scan 2 returns chunks (not just 1)
5117 * Propagate failure in realloc if malloc returns 0
5118 * Add stuff to allow compilation on non-ANSI compilers
5119 from kpv@research.att.com
5120
5121 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5122 * removed potential for odd address access in prev_chunk
5123 * removed dependency on getpagesize.h
5124 * misc cosmetics and a bit more internal documentation
5125 * anticosmetics: mangled names in macros to evade debugger strangeness
5126 * tested on sparc, hp-700, dec-mips, rs6000
5127 with gcc & native cc (hp, dec only) allowing
5128 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5129
5130 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5131 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5132 structure of old version, but most details differ.)
5133
5134*/