| /* |
| This is a version (aka dlmalloc) of malloc/free/realloc written by |
| Doug Lea and released to the public domain, as explained at |
| http://creativecommons.org/publicdomain/zero/1.0/ Send questions, |
| comments, complaints, performance data, etc to dl@cs.oswego.edu |
| */ |
| |
| #include <vppinfra/clib.h> |
| #include <vppinfra/dlmalloc.h> |
| |
| /*------------------------------ internal #includes ---------------------- */ |
| |
| #ifdef _MSC_VER |
| #pragma warning( disable : 4146 ) /* no "unsigned" warnings */ |
| #endif /* _MSC_VER */ |
| #if !NO_MALLOC_STATS |
| #include <stdio.h> /* for printing in malloc_stats */ |
| #endif /* NO_MALLOC_STATS */ |
| #ifndef LACKS_ERRNO_H |
| #include <errno.h> /* for MALLOC_FAILURE_ACTION */ |
| #endif /* LACKS_ERRNO_H */ |
| #ifdef DEBUG |
| #if DLM_ABORT_ON_ASSERT_FAILURE |
| #undef assert |
| #define assert(x) if(!(x)) DLM_ABORT |
| #else /* DLM_ABORT_ON_ASSERT_FAILURE */ |
| #include <assert.h> |
| #endif /* DLM_ABORT_ON_ASSERT_FAILURE */ |
| #else /* DEBUG */ |
| #ifndef assert |
| #define assert(x) |
| #endif |
| #define DEBUG 0 |
| #endif /* DEBUG */ |
| #if !defined(WIN32) && !defined(LACKS_TIME_H) |
| #include <time.h> /* for magic initialization */ |
| #endif /* WIN32 */ |
| #ifndef LACKS_STDLIB_H |
| #include <stdlib.h> /* for abort() */ |
| #endif /* LACKS_STDLIB_H */ |
| #ifndef LACKS_STRING_H |
| #include <string.h> /* for memset etc */ |
| #endif /* LACKS_STRING_H */ |
| #if USE_BUILTIN_FFS |
| #ifndef LACKS_STRINGS_H |
| #include <strings.h> /* for ffs */ |
| #endif /* LACKS_STRINGS_H */ |
| #endif /* USE_BUILTIN_FFS */ |
| #if HAVE_MMAP |
| #ifndef LACKS_SYS_MMAN_H |
| /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ |
| #if (defined(linux) && !defined(__USE_GNU)) |
| #define __USE_GNU 1 |
| #include <sys/mman.h> /* for mmap */ |
| #undef __USE_GNU |
| #else |
| #include <sys/mman.h> /* for mmap */ |
| #endif /* linux */ |
| #endif /* LACKS_SYS_MMAN_H */ |
| #ifndef LACKS_FCNTL_H |
| #include <fcntl.h> |
| #endif /* LACKS_FCNTL_H */ |
| #endif /* HAVE_MMAP */ |
| #ifndef LACKS_UNISTD_H |
| #include <unistd.h> /* for sbrk, sysconf */ |
| #else /* LACKS_UNISTD_H */ |
| #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) |
| extern void* sbrk(ptrdiff_t); |
| #endif /* FreeBSD etc */ |
| #endif /* LACKS_UNISTD_H */ |
| |
| /* Declarations for locking */ |
| #if USE_LOCKS |
| #ifndef WIN32 |
| #if defined (__SVR4) && defined (__sun) /* solaris */ |
| #include <thread.h> |
| #elif !defined(LACKS_SCHED_H) |
| #include <sched.h> |
| #endif /* solaris or LACKS_SCHED_H */ |
| #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS |
| #include <pthread.h> |
| #endif /* USE_RECURSIVE_LOCKS ... */ |
| #elif defined(_MSC_VER) |
| #ifndef _M_AMD64 |
| /* These are already defined on AMD64 builds */ |
| #ifdef __cplusplus |
| extern "C" { |
| #endif /* __cplusplus */ |
| LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); |
| LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); |
| #ifdef __cplusplus |
| } |
| #endif /* __cplusplus */ |
| #endif /* _M_AMD64 */ |
| #pragma intrinsic (_InterlockedCompareExchange) |
| #pragma intrinsic (_InterlockedExchange) |
| #define interlockedcompareexchange _InterlockedCompareExchange |
| #define interlockedexchange _InterlockedExchange |
| #elif defined(WIN32) && defined(__GNUC__) |
| #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) |
| #define interlockedexchange __sync_lock_test_and_set |
| #endif /* Win32 */ |
| #else /* USE_LOCKS */ |
| #endif /* USE_LOCKS */ |
| |
| #ifndef LOCK_AT_FORK |
| #define LOCK_AT_FORK 0 |
| #endif |
| |
| /* Declarations for bit scanning on win32 */ |
| #if defined(_MSC_VER) && _MSC_VER>=1300 |
| #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ |
| #ifdef __cplusplus |
| extern "C" { |
| #endif /* __cplusplus */ |
| unsigned char _BitScanForward(unsigned long *index, unsigned long mask); |
| unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); |
| #ifdef __cplusplus |
| } |
| #endif /* __cplusplus */ |
| |
| #define BitScanForward _BitScanForward |
| #define BitScanReverse _BitScanReverse |
| #pragma intrinsic(_BitScanForward) |
| #pragma intrinsic(_BitScanReverse) |
| #endif /* BitScanForward */ |
| #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ |
| |
| #ifndef WIN32 |
| #ifndef malloc_getpagesize |
| # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ |
| # ifndef _SC_PAGE_SIZE |
| # define _SC_PAGE_SIZE _SC_PAGESIZE |
| # endif |
| # endif |
| # ifdef _SC_PAGE_SIZE |
| # define malloc_getpagesize sysconf(_SC_PAGE_SIZE) |
| # else |
| # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) |
| extern size_t getpagesize(); |
| # define malloc_getpagesize getpagesize() |
| # else |
| # ifdef WIN32 /* use supplied emulation of getpagesize */ |
| # define malloc_getpagesize getpagesize() |
| # else |
| # ifndef LACKS_SYS_PARAM_H |
| # include <sys/param.h> |
| # endif |
| # ifdef EXEC_PAGESIZE |
| # define malloc_getpagesize EXEC_PAGESIZE |
| # else |
| # ifdef NBPG |
| # ifndef CLSIZE |
| # define malloc_getpagesize NBPG |
| # else |
| # define malloc_getpagesize (NBPG * CLSIZE) |
| # endif |
| # else |
| # ifdef NBPC |
| # define malloc_getpagesize NBPC |
| # else |
| # ifdef PAGESIZE |
| # define malloc_getpagesize PAGESIZE |
| # else /* just guess */ |
| # define malloc_getpagesize ((size_t)4096U) |
| # endif |
| # endif |
| # endif |
| # endif |
| # endif |
| # endif |
| # endif |
| #endif |
| #endif |
| |
| /* ------------------- size_t and alignment properties -------------------- */ |
| |
| /* The byte and bit size of a size_t */ |
| #define SIZE_T_SIZE (sizeof(size_t)) |
| #define SIZE_T_BITSIZE (sizeof(size_t) << 3) |
| |
| /* Some constants coerced to size_t */ |
| /* Annoying but necessary to avoid errors on some platforms */ |
| #define SIZE_T_ZERO ((size_t)0) |
| #define SIZE_T_ONE ((size_t)1) |
| #define SIZE_T_TWO ((size_t)2) |
| #define SIZE_T_FOUR ((size_t)4) |
| #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) |
| #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) |
| #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) |
| #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) |
| |
| /* The bit mask value corresponding to MALLOC_ALIGNMENT */ |
| #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) |
| |
| /* True if address a has acceptable alignment */ |
| #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) |
| |
| /* the number of bytes to offset an address to align it */ |
| #define align_offset(A)\ |
| ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ |
| ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) |
| |
| /* -------------------------- MMAP preliminaries ------------------------- */ |
| |
| /* |
| If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and |
| checks to fail so compiler optimizer can delete code rather than |
| using so many "#if"s. |
| */ |
| |
| |
| /* MORECORE and MMAP must return MFAIL on failure */ |
| #define MFAIL ((void*)(MAX_SIZE_T)) |
| #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ |
| |
| #if HAVE_MMAP |
| |
| #ifndef WIN32 |
| #define MUNMAP_DEFAULT(a, s) munmap((a), (s)) |
| #define MMAP_PROT (PROT_READ|PROT_WRITE) |
| #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
| #define MAP_ANONYMOUS MAP_ANON |
| #endif /* MAP_ANON */ |
| #ifdef MAP_ANONYMOUS |
| #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) |
| #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) |
| #else /* MAP_ANONYMOUS */ |
| /* |
| Nearly all versions of mmap support MAP_ANONYMOUS, so the following |
| is unlikely to be needed, but is supplied just in case. |
| */ |
| #define MMAP_FLAGS (MAP_PRIVATE) |
| static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ |
| #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ |
| (dev_zero_fd = open("/dev/zero", O_RDWR), \ |
| mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ |
| mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) |
| #endif /* MAP_ANONYMOUS */ |
| |
| #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) |
| |
| #else /* WIN32 */ |
| |
| /* Win32 MMAP via VirtualAlloc */ |
| static FORCEINLINE void* win32mmap(size_t size) { |
| void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); |
| return (ptr != 0)? ptr: MFAIL; |
| } |
| |
| /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ |
| static FORCEINLINE void* win32direct_mmap(size_t size) { |
| void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, |
| PAGE_READWRITE); |
| return (ptr != 0)? ptr: MFAIL; |
| } |
| |
| /* This function supports releasing coalesed segments */ |
| static FORCEINLINE int win32munmap(void* ptr, size_t size) { |
| MEMORY_BASIC_INFORMATION minfo; |
| char* cptr = (char*)ptr; |
| while (size) { |
| if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) |
| return -1; |
| if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || |
| minfo.State != MEM_COMMIT || minfo.RegionSize > size) |
| return -1; |
| if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) |
| return -1; |
| cptr += minfo.RegionSize; |
| size -= minfo.RegionSize; |
| } |
| return 0; |
| } |
| |
| #define MMAP_DEFAULT(s) win32mmap(s) |
| #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) |
| #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) |
| #endif /* WIN32 */ |
| #endif /* HAVE_MMAP */ |
| |
| #if HAVE_MREMAP |
| #ifndef WIN32 |
| #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) |
| #endif /* WIN32 */ |
| #endif /* HAVE_MREMAP */ |
| |
| /** |
| * Define CALL_MORECORE |
| */ |
| #if HAVE_MORECORE |
| #ifdef MORECORE |
| #define CALL_MORECORE(S) MORECORE(S) |
| #else /* MORECORE */ |
| #define CALL_MORECORE(S) MORECORE_DEFAULT(S) |
| #endif /* MORECORE */ |
| #else /* HAVE_MORECORE */ |
| #define CALL_MORECORE(S) MFAIL |
| #endif /* HAVE_MORECORE */ |
| |
| /** |
| * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP |
| */ |
| #if HAVE_MMAP |
| #define USE_MMAP_BIT (SIZE_T_ONE) |
| |
| #ifdef MMAP |
| #define CALL_MMAP(s) MMAP(s) |
| #else /* MMAP */ |
| #define CALL_MMAP(s) MMAP_DEFAULT(s) |
| #endif /* MMAP */ |
| #ifdef MUNMAP |
| #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) |
| #else /* MUNMAP */ |
| #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) |
| #endif /* MUNMAP */ |
| #ifdef DIRECT_MMAP |
| #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) |
| #else /* DIRECT_MMAP */ |
| #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) |
| #endif /* DIRECT_MMAP */ |
| #else /* HAVE_MMAP */ |
| #define USE_MMAP_BIT (SIZE_T_ZERO) |
| |
| #define MMAP(s) MFAIL |
| #define MUNMAP(a, s) (-1) |
| #define DIRECT_MMAP(s) MFAIL |
| #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) |
| #define CALL_MMAP(s) MMAP(s) |
| #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) |
| #endif /* HAVE_MMAP */ |
| |
| /** |
| * Define CALL_MREMAP |
| */ |
| #if HAVE_MMAP && HAVE_MREMAP |
| #ifdef MREMAP |
| #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) |
| #else /* MREMAP */ |
| #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) |
| #endif /* MREMAP */ |
| #else /* HAVE_MMAP && HAVE_MREMAP */ |
| #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL |
| #endif /* HAVE_MMAP && HAVE_MREMAP */ |
| |
| /* mstate bit set if contiguous morecore disabled or failed */ |
| #define USE_NONCONTIGUOUS_BIT (4U) |
| |
| /* mstate bit set if no expansion allowed */ |
| #define USE_NOEXPAND_BIT (8U) |
| |
| /* trace allocations if set */ |
| #define USE_TRACE_BIT (16U) |
| |
| /* segment bit set in create_mspace_with_base */ |
| #define EXTERN_BIT (8U) |
| |
| |
| /* --------------------------- Lock preliminaries ------------------------ */ |
| |
| /* |
| When locks are defined, there is one global lock, plus |
| one per-mspace lock. |
| |
| The global lock_ensures that mparams.magic and other unique |
| mparams values are initialized only once. It also protects |
| sequences of calls to MORECORE. In many cases sys_alloc requires |
| two calls, that should not be interleaved with calls by other |
| threads. This does not protect against direct calls to MORECORE |
| by other threads not using this lock, so there is still code to |
| cope the best we can on interference. |
| |
| Per-mspace locks surround calls to malloc, free, etc. |
| By default, locks are simple non-reentrant mutexes. |
| |
| Because lock-protected regions generally have bounded times, it is |
| OK to use the supplied simple spinlocks. Spinlocks are likely to |
| improve performance for lightly contended applications, but worsen |
| performance under heavy contention. |
| |
| If USE_LOCKS is > 1, the definitions of lock routines here are |
| bypassed, in which case you will need to define the type MLOCK_T, |
| and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK |
| and TRY_LOCK. You must also declare a |
| static MLOCK_T malloc_global_mutex = { initialization values };. |
| |
| */ |
| |
| #if !USE_LOCKS |
| #define USE_LOCK_BIT (0U) |
| #define INITIAL_LOCK(l) (0) |
| #define DESTROY_LOCK(l) (0) |
| #define ACQUIRE_MALLOC_GLOBAL_LOCK() |
| #define RELEASE_MALLOC_GLOBAL_LOCK() |
| |
| #else |
| #if USE_LOCKS > 1 |
| /* ----------------------- User-defined locks ------------------------ */ |
| /* Define your own lock implementation here */ |
| /* #define INITIAL_LOCK(lk) ... */ |
| /* #define DESTROY_LOCK(lk) ... */ |
| /* #define ACQUIRE_LOCK(lk) ... */ |
| /* #define RELEASE_LOCK(lk) ... */ |
| /* #define TRY_LOCK(lk) ... */ |
| /* static MLOCK_T malloc_global_mutex = ... */ |
| |
| #elif USE_SPIN_LOCKS |
| |
| /* First, define CAS_LOCK and CLEAR_LOCK on ints */ |
| /* Note CAS_LOCK defined to return 0 on success */ |
| |
| #if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) |
| #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) |
| #define CLEAR_LOCK(sl) __sync_lock_release(sl) |
| |
| #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) |
| /* Custom spin locks for older gcc on x86 */ |
| static FORCEINLINE int x86_cas_lock(int *sl) { |
| int ret; |
| int val = 1; |
| int cmp = 0; |
| __asm__ __volatile__ ("lock; cmpxchgl %1, %2" |
| : "=a" (ret) |
| : "r" (val), "m" (*(sl)), "0"(cmp) |
| : "memory", "cc"); |
| return ret; |
| } |
| |
| static FORCEINLINE void x86_clear_lock(int* sl) { |
| assert(*sl != 0); |
| int prev = 0; |
| int ret; |
| __asm__ __volatile__ ("lock; xchgl %0, %1" |
| : "=r" (ret) |
| : "m" (*(sl)), "0"(prev) |
| : "memory"); |
| } |
| |
| #define CAS_LOCK(sl) x86_cas_lock(sl) |
| #define CLEAR_LOCK(sl) x86_clear_lock(sl) |
| |
| #else /* Win32 MSC */ |
| #define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1) |
| #define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0) |
| |
| #endif /* ... gcc spins locks ... */ |
| |
| /* How to yield for a spin lock */ |
| #define SPINS_PER_YIELD 63 |
| #if defined(_MSC_VER) |
| #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ |
| #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) |
| #elif defined (__SVR4) && defined (__sun) /* solaris */ |
| #define SPIN_LOCK_YIELD thr_yield(); |
| #elif !defined(LACKS_SCHED_H) |
| #define SPIN_LOCK_YIELD sched_yield(); |
| #else |
| #define SPIN_LOCK_YIELD |
| #endif /* ... yield ... */ |
| |
| #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 |
| /* Plain spin locks use single word (embedded in malloc_states) */ |
| __clib_nosanitize_addr |
| static int spin_acquire_lock(int *sl) { |
| int spins = 0; |
| while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) { |
| if ((++spins & SPINS_PER_YIELD) == 0) { |
| SPIN_LOCK_YIELD; |
| } |
| } |
| return 0; |
| } |
| |
| #define MLOCK_T int |
| #define TRY_LOCK(sl) !CAS_LOCK(sl) |
| #define RELEASE_LOCK(sl) CLEAR_LOCK(sl) |
| #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) |
| #define INITIAL_LOCK(sl) (*sl = 0) |
| #define DESTROY_LOCK(sl) (0) |
| static MLOCK_T malloc_global_mutex = 0; |
| |
| #else /* USE_RECURSIVE_LOCKS */ |
| /* types for lock owners */ |
| #ifdef WIN32 |
| #define THREAD_ID_T DWORD |
| #define CURRENT_THREAD GetCurrentThreadId() |
| #define EQ_OWNER(X,Y) ((X) == (Y)) |
| #else |
| /* |
| Note: the following assume that pthread_t is a type that can be |
| initialized to (casted) zero. If this is not the case, you will need to |
| somehow redefine these or not use spin locks. |
| */ |
| #define THREAD_ID_T pthread_t |
| #define CURRENT_THREAD pthread_self() |
| #define EQ_OWNER(X,Y) pthread_equal(X, Y) |
| #endif |
| |
| struct malloc_recursive_lock { |
| int sl; |
| unsigned int c; |
| THREAD_ID_T threadid; |
| }; |
| |
| #define MLOCK_T struct malloc_recursive_lock |
| static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0}; |
| |
| static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) { |
| assert(lk->sl != 0); |
| if (--lk->c == 0) { |
| CLEAR_LOCK(&lk->sl); |
| } |
| } |
| |
| static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) { |
| THREAD_ID_T mythreadid = CURRENT_THREAD; |
| int spins = 0; |
| for (;;) { |
| if (*((volatile int *)(&lk->sl)) == 0) { |
| if (!CAS_LOCK(&lk->sl)) { |
| lk->threadid = mythreadid; |
| lk->c = 1; |
| return 0; |
| } |
| } |
| else if (EQ_OWNER(lk->threadid, mythreadid)) { |
| ++lk->c; |
| return 0; |
| } |
| if ((++spins & SPINS_PER_YIELD) == 0) { |
| SPIN_LOCK_YIELD; |
| } |
| } |
| } |
| |
| static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) { |
| THREAD_ID_T mythreadid = CURRENT_THREAD; |
| if (*((volatile int *)(&lk->sl)) == 0) { |
| if (!CAS_LOCK(&lk->sl)) { |
| lk->threadid = mythreadid; |
| lk->c = 1; |
| return 1; |
| } |
| } |
| else if (EQ_OWNER(lk->threadid, mythreadid)) { |
| ++lk->c; |
| return 1; |
| } |
| return 0; |
| } |
| |
| #define RELEASE_LOCK(lk) recursive_release_lock(lk) |
| #define TRY_LOCK(lk) recursive_try_lock(lk) |
| #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) |
| #define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) |
| #define DESTROY_LOCK(lk) (0) |
| #endif /* USE_RECURSIVE_LOCKS */ |
| |
| #elif defined(WIN32) /* Win32 critical sections */ |
| #define MLOCK_T CRITICAL_SECTION |
| #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) |
| #define RELEASE_LOCK(lk) LeaveCriticalSection(lk) |
| #define TRY_LOCK(lk) TryEnterCriticalSection(lk) |
| #define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) |
| #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) |
| #define NEED_GLOBAL_LOCK_INIT |
| |
| static MLOCK_T malloc_global_mutex; |
| static volatile LONG malloc_global_mutex_status; |
| |
| /* Use spin loop to initialize global lock */ |
| static void init_malloc_global_mutex() { |
| for (;;) { |
| long stat = malloc_global_mutex_status; |
| if (stat > 0) |
| return; |
| /* transition to < 0 while initializing, then to > 0) */ |
| if (stat == 0 && |
| interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) { |
| InitializeCriticalSection(&malloc_global_mutex); |
| interlockedexchange(&malloc_global_mutex_status, (LONG)1); |
| return; |
| } |
| SleepEx(0, FALSE); |
| } |
| } |
| |
| #else /* pthreads-based locks */ |
| #define MLOCK_T pthread_mutex_t |
| #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) |
| #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) |
| #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) |
| #define INITIAL_LOCK(lk) pthread_init_lock(lk) |
| #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) |
| |
| #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) |
| /* Cope with old-style linux recursive lock initialization by adding */ |
| /* skipped internal declaration from pthread.h */ |
| extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, |
| int __kind)); |
| #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP |
| #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) |
| #endif /* USE_RECURSIVE_LOCKS ... */ |
| |
| static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; |
| |
| static int pthread_init_lock (MLOCK_T *lk) { |
| pthread_mutexattr_t attr; |
| if (pthread_mutexattr_init(&attr)) return 1; |
| #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 |
| if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; |
| #endif |
| if (pthread_mutex_init(lk, &attr)) return 1; |
| if (pthread_mutexattr_destroy(&attr)) return 1; |
| return 0; |
| } |
| |
| #endif /* ... lock types ... */ |
| |
| /* Common code for all lock types */ |
| #define USE_LOCK_BIT (2U) |
| |
| #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK |
| #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); |
| #endif |
| |
| #ifndef RELEASE_MALLOC_GLOBAL_LOCK |
| #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); |
| #endif |
| |
| #endif /* USE_LOCKS */ |
| |
| /* ----------------------- Chunk representations ------------------------ */ |
| |
| /* |
| (The following includes lightly edited explanations by Colin Plumb.) |
| |
| The malloc_chunk declaration below is misleading (but accurate and |
| necessary). It declares a "view" into memory allowing access to |
| necessary fields at known offsets from a given base. |
| |
| Chunks of memory are maintained using a `boundary tag' method as |
| originally described by Knuth. (See the paper by Paul Wilson |
| ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such |
| techniques.) Sizes of free chunks are stored both in the front of |
| each chunk and at the end. This makes consolidating fragmented |
| chunks into bigger chunks fast. The head fields also hold bits |
| representing whether chunks are free or in use. |
| |
| Here are some pictures to make it clearer. They are "exploded" to |
| show that the state of a chunk can be thought of as extending from |
| the high 31 bits of the head field of its header through the |
| prev_foot and PINUSE_BIT bit of the following chunk header. |
| |
| A chunk that's in use looks like: |
| |
| chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Size of previous chunk (if P = 0) | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| |
| | Size of this chunk 1| +-+ |
| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | | |
| +- -+ |
| | | |
| +- -+ |
| | : |
| +- size - sizeof(size_t) available payload bytes -+ |
| : | |
| chunk-> +- -+ |
| | | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| |
| | Size of next chunk (may or may not be in use) | +-+ |
| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| |
| And if it's free, it looks like this: |
| |
| chunk-> +- -+ |
| | User payload (must be in use, or we would have merged!) | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| |
| | Size of this chunk 0| +-+ |
| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Next pointer | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Prev pointer | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | : |
| +- size - sizeof(struct chunk) unused bytes -+ |
| : | |
| chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Size of this chunk | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| |
| | Size of next chunk (must be in use, or we would have merged)| +-+ |
| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | : |
| +- User payload -+ |
| : | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| |0| |
| +-+ |
| Note that since we always merge adjacent free chunks, the chunks |
| adjacent to a free chunk must be in use. |
| |
| Given a pointer to a chunk (which can be derived trivially from the |
| payload pointer) we can, in O(1) time, find out whether the adjacent |
| chunks are free, and if so, unlink them from the lists that they |
| are on and merge them with the current chunk. |
| |
| Chunks always begin on even word boundaries, so the mem portion |
| (which is returned to the user) is also on an even word boundary, and |
| thus at least double-word aligned. |
| |
| The P (PINUSE_BIT) bit, stored in the unused low-order bit of the |
| chunk size (which is always a multiple of two words), is an in-use |
| bit for the *previous* chunk. If that bit is *clear*, then the |
| word before the current chunk size contains the previous chunk |
| size, and can be used to find the front of the previous chunk. |
| The very first chunk allocated always has this bit set, preventing |
| access to non-existent (or non-owned) memory. If pinuse is set for |
| any given chunk, then you CANNOT determine the size of the |
| previous chunk, and might even get a memory addressing fault when |
| trying to do so. |
| |
| The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of |
| the chunk size redundantly records whether the current chunk is |
| inuse (unless the chunk is mmapped). This redundancy enables usage |
| checks within free and realloc, and reduces indirection when freeing |
| and consolidating chunks. |
| |
| Each freshly allocated chunk must have both cinuse and pinuse set. |
| That is, each allocated chunk borders either a previously allocated |
| and still in-use chunk, or the base of its memory arena. This is |
| ensured by making all allocations from the `lowest' part of any |
| found chunk. Further, no free chunk physically borders another one, |
| so each free chunk is known to be preceded and followed by either |
| inuse chunks or the ends of memory. |
| |
| Note that the `foot' of the current chunk is actually represented |
| as the prev_foot of the NEXT chunk. This makes it easier to |
| deal with alignments etc but can be very confusing when trying |
| to extend or adapt this code. |
| |
| The exceptions to all this are |
| |
| 1. The special chunk `top' is the top-most available chunk (i.e., |
| the one bordering the end of available memory). It is treated |
| specially. Top is never included in any bin, is used only if |
| no other chunk is available, and is released back to the |
| system if it is very large (see M_TRIM_THRESHOLD). In effect, |
| the top chunk is treated as larger (and thus less well |
| fitting) than any other available chunk. The top chunk |
| doesn't update its trailing size field since there is no next |
| contiguous chunk that would have to index off it. However, |
| space is still allocated for it (TOP_FOOT_SIZE) to enable |
| separation or merging when space is extended. |
| |
| 3. Chunks allocated via mmap, have both cinuse and pinuse bits |
| cleared in their head fields. Because they are allocated |
| one-by-one, each must carry its own prev_foot field, which is |
| also used to hold the offset this chunk has within its mmapped |
| region, which is needed to preserve alignment. Each mmapped |
| chunk is trailed by the first two fields of a fake next-chunk |
| for sake of usage checks. |
| |
| */ |
| |
| struct malloc_chunk { |
| size_t prev_foot; /* Size of previous chunk (if free). */ |
| size_t head; /* Size and inuse bits. */ |
| struct malloc_chunk* fd; /* double links -- used only if free. */ |
| struct malloc_chunk* bk; |
| }; |
| |
| typedef struct malloc_chunk mchunk; |
| typedef struct malloc_chunk* mchunkptr; |
| typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ |
| typedef unsigned int bindex_t; /* Described below */ |
| typedef unsigned int binmap_t; /* Described below */ |
| typedef unsigned int flag_t; /* The type of various bit flag sets */ |
| |
| /* ------------------- Chunks sizes and alignments ----------------------- */ |
| |
| #define MCHUNK_SIZE (sizeof(mchunk)) |
| |
| #if FOOTERS |
| #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
| #else /* FOOTERS */ |
| #define CHUNK_OVERHEAD (SIZE_T_SIZE) |
| #endif /* FOOTERS */ |
| |
| /* MMapped chunks need a second word of overhead ... */ |
| #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
| /* ... and additional padding for fake next-chunk at foot */ |
| #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) |
| |
| /* The smallest size we can malloc is an aligned minimal chunk */ |
| #define MIN_CHUNK_SIZE\ |
| ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
| |
| /* conversion from malloc headers to user pointers, and back */ |
| #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) |
| #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) |
| /* chunk associated with aligned address A */ |
| #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) |
| |
| /* Bounds on request (not chunk) sizes. */ |
| #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) |
| #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) |
| |
| /* pad request bytes into a usable size */ |
| #define pad_request(req) \ |
| (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
| |
| /* pad request, checking for minimum (but not maximum) */ |
| #define request2size(req) \ |
| (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) |
| |
| |
| /* ------------------ Operations on head and foot fields ----------------- */ |
| |
| /* |
| The head field of a chunk is or'ed with PINUSE_BIT when previous |
| adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in |
| use, unless mmapped, in which case both bits are cleared. |
| |
| FLAG4_BIT is not used by this malloc, but might be useful in extensions. |
| */ |
| |
| #define PINUSE_BIT (SIZE_T_ONE) |
| #define CINUSE_BIT (SIZE_T_TWO) |
| #define FLAG4_BIT (SIZE_T_FOUR) |
| #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) |
| #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) |
| |
| /* Head value for fenceposts */ |
| #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) |
| |
| /* extraction of fields from head words */ |
| #define cinuse(p) ((p)->head & CINUSE_BIT) |
| #define pinuse(p) ((p)->head & PINUSE_BIT) |
| #define flag4inuse(p) ((p)->head & FLAG4_BIT) |
| #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) |
| #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) |
| |
| #define chunksize(p) ((p)->head & ~(FLAG_BITS)) |
| |
| #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) |
| #define set_flag4(p) ((p)->head |= FLAG4_BIT) |
| #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) |
| |
| /* Treat space at ptr +/- offset as a chunk */ |
| #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) |
| #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) |
| |
| /* Ptr to next or previous physical malloc_chunk. */ |
| #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) |
| #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) |
| |
| /* extract next chunk's pinuse bit */ |
| #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) |
| |
| /* Get/set size at footer */ |
| #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) |
| #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) |
| |
| /* Set size, pinuse bit, and foot */ |
| #define set_size_and_pinuse_of_free_chunk(p, s)\ |
| ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) |
| |
| /* Set size, pinuse bit, foot, and clear next pinuse */ |
| #define set_free_with_pinuse(p, s, n)\ |
| (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) |
| |
| /* Get the internal overhead associated with chunk p */ |
| #define overhead_for(p)\ |
| (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) |
| |
| /* Return true if malloced space is not necessarily cleared */ |
| #if MMAP_CLEARS |
| #define calloc_must_clear(p) (!is_mmapped(p)) |
| #else /* MMAP_CLEARS */ |
| #define calloc_must_clear(p) (1) |
| #endif /* MMAP_CLEARS */ |
| |
| /* ---------------------- Overlaid data structures ----------------------- */ |
| |
| /* |
| When chunks are not in use, they are treated as nodes of either |
| lists or trees. |
| |
| "Small" chunks are stored in circular doubly-linked lists, and look |
| like this: |
| |
| chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Size of previous chunk | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| `head:' | Size of chunk, in bytes |P| |
| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Forward pointer to next chunk in list | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Back pointer to previous chunk in list | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Unused space (may be 0 bytes long) . |
| . . |
| . | |
| nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| `foot:' | Size of chunk, in bytes | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| |
| Larger chunks are kept in a form of bitwise digital trees (aka |
| tries) keyed on chunksizes. Because malloc_tree_chunks are only for |
| free chunks greater than 256 bytes, their size doesn't impose any |
| constraints on user chunk sizes. Each node looks like: |
| |
| chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Size of previous chunk | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| `head:' | Size of chunk, in bytes |P| |
| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Forward pointer to next chunk of same size | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Back pointer to previous chunk of same size | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Pointer to left child (child[0]) | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Pointer to right child (child[1]) | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Pointer to parent | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | bin index of this chunk | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | Unused space . |
| . | |
| nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| `foot:' | Size of chunk, in bytes | |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| |
| Each tree holding treenodes is a tree of unique chunk sizes. Chunks |
| of the same size are arranged in a circularly-linked list, with only |
| the oldest chunk (the next to be used, in our FIFO ordering) |
| actually in the tree. (Tree members are distinguished by a non-null |
| parent pointer.) If a chunk with the same size an an existing node |
| is inserted, it is linked off the existing node using pointers that |
| work in the same way as fd/bk pointers of small chunks. |
| |
| Each tree contains a power of 2 sized range of chunk sizes (the |
| smallest is 0x100 <= x < 0x180), which is is divided in half at each |
| tree level, with the chunks in the smaller half of the range (0x100 |
| <= x < 0x140 for the top nose) in the left subtree and the larger |
| half (0x140 <= x < 0x180) in the right subtree. This is, of course, |
| done by inspecting individual bits. |
| |
| Using these rules, each node's left subtree contains all smaller |
| sizes than its right subtree. However, the node at the root of each |
| subtree has no particular ordering relationship to either. (The |
| dividing line between the subtree sizes is based on trie relation.) |
| If we remove the last chunk of a given size from the interior of the |
| tree, we need to replace it with a leaf node. The tree ordering |
| rules permit a node to be replaced by any leaf below it. |
| |
| The smallest chunk in a tree (a common operation in a best-fit |
| allocator) can be found by walking a path to the leftmost leaf in |
| the tree. Unlike a usual binary tree, where we follow left child |
| pointers until we reach a null, here we follow the right child |
| pointer any time the left one is null, until we reach a leaf with |
| both child pointers null. The smallest chunk in the tree will be |
| somewhere along that path. |
| |
| The worst case number of steps to add, find, or remove a node is |
| bounded by the number of bits differentiating chunks within |
| bins. Under current bin calculations, this ranges from 6 up to 21 |
| (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case |
| is of course much better. |
| */ |
| |
| struct malloc_tree_chunk { |
| /* The first four fields must be compatible with malloc_chunk */ |
| size_t prev_foot; |
| size_t head; |
| struct malloc_tree_chunk* fd; |
| struct malloc_tree_chunk* bk; |
| |
| struct malloc_tree_chunk* child[2]; |
| struct malloc_tree_chunk* parent; |
| bindex_t index; |
| }; |
| |
| typedef struct malloc_tree_chunk tchunk; |
| typedef struct malloc_tree_chunk* tchunkptr; |
| typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ |
| |
| /* A little helper macro for trees */ |
| #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) |
| |
| /* ----------------------------- Segments -------------------------------- */ |
| |
| /* |
| Each malloc space may include non-contiguous segments, held in a |
| list headed by an embedded malloc_segment record representing the |
| top-most space. Segments also include flags holding properties of |
| the space. Large chunks that are directly allocated by mmap are not |
| included in this list. They are instead independently created and |
| destroyed without otherwise keeping track of them. |
| |
| Segment management mainly comes into play for spaces allocated by |
| MMAP. Any call to MMAP might or might not return memory that is |
| adjacent to an existing segment. MORECORE normally contiguously |
| extends the current space, so this space is almost always adjacent, |
| which is simpler and faster to deal with. (This is why MORECORE is |
| used preferentially to MMAP when both are available -- see |
| sys_alloc.) When allocating using MMAP, we don't use any of the |
| hinting mechanisms (inconsistently) supported in various |
| implementations of unix mmap, or distinguish reserving from |
| committing memory. Instead, we just ask for space, and exploit |
| contiguity when we get it. It is probably possible to do |
| better than this on some systems, but no general scheme seems |
| to be significantly better. |
| |
| Management entails a simpler variant of the consolidation scheme |
| used for chunks to reduce fragmentation -- new adjacent memory is |
| normally prepended or appended to an existing segment. However, |
| there are limitations compared to chunk consolidation that mostly |
| reflect the fact that segment processing is relatively infrequent |
| (occurring only when getting memory from system) and that we |
| don't expect to have huge numbers of segments: |
| |
| * Segments are not indexed, so traversal requires linear scans. (It |
| would be possible to index these, but is not worth the extra |
| overhead and complexity for most programs on most platforms.) |
| * New segments are only appended to old ones when holding top-most |
| memory; if they cannot be prepended to others, they are held in |
| different segments. |
| |
| Except for the top-most segment of an mstate, each segment record |
| is kept at the tail of its segment. Segments are added by pushing |
| segment records onto the list headed by &mstate.seg for the |
| containing mstate. |
| |
| Segment flags control allocation/merge/deallocation policies: |
| * If EXTERN_BIT set, then we did not allocate this segment, |
| and so should not try to deallocate or merge with others. |
| (This currently holds only for the initial segment passed |
| into create_mspace_with_base.) |
| * If USE_MMAP_BIT set, the segment may be merged with |
| other surrounding mmapped segments and trimmed/de-allocated |
| using munmap. |
| * If neither bit is set, then the segment was obtained using |
| MORECORE so can be merged with surrounding MORECORE'd segments |
| and deallocated/trimmed using MORECORE with negative arguments. |
| */ |
| |
| struct malloc_segment { |
| char* base; /* base address */ |
| size_t size; /* allocated size */ |
| struct malloc_segment* next; /* ptr to next segment */ |
| flag_t sflags; /* mmap and extern flag */ |
| }; |
| |
| #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) |
| #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) |
| |
| typedef struct malloc_segment msegment; |
| typedef struct malloc_segment* msegmentptr; |
| |
| /* ---------------------------- malloc_state ----------------------------- */ |
| |
| /* |
| A malloc_state holds all of the bookkeeping for a space. |
| The main fields are: |
| |
| Top |
| The topmost chunk of the currently active segment. Its size is |
| cached in topsize. The actual size of topmost space is |
| topsize+TOP_FOOT_SIZE, which includes space reserved for adding |
| fenceposts and segment records if necessary when getting more |
| space from the system. The size at which to autotrim top is |
| cached from mparams in trim_check, except that it is disabled if |
| an autotrim fails. |
| |
| Designated victim (dv) |
| This is the preferred chunk for servicing small requests that |
| don't have exact fits. It is normally the chunk split off most |
| recently to service another small request. Its size is cached in |
| dvsize. The link fields of this chunk are not maintained since it |
| is not kept in a bin. |
| |
| SmallBins |
| An array of bin headers for free chunks. These bins hold chunks |
| with sizes less than MIN_LARGE_SIZE bytes. Each bin contains |
| chunks of all the same size, spaced 8 bytes apart. To simplify |
| use in double-linked lists, each bin header acts as a malloc_chunk |
| pointing to the real first node, if it exists (else pointing to |
| itself). This avoids special-casing for headers. But to avoid |
| waste, we allocate only the fd/bk pointers of bins, and then use |
| repositioning tricks to treat these as the fields of a chunk. |
| |
| TreeBins |
| Treebins are pointers to the roots of trees holding a range of |
| sizes. There are 2 equally spaced treebins for each power of two |
| from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything |
| larger. |
| |
| Bin maps |
| There is one bit map for small bins ("smallmap") and one for |
| treebins ("treemap). Each bin sets its bit when non-empty, and |
| clears the bit when empty. Bit operations are then used to avoid |
| bin-by-bin searching -- nearly all "search" is done without ever |
| looking at bins that won't be selected. The bit maps |
| conservatively use 32 bits per map word, even if on 64bit system. |
| For a good description of some of the bit-based techniques used |
| here, see Henry S. Warren Jr's book "Hacker's Delight" (and |
| supplement at http://hackersdelight.org/). Many of these are |
| intended to reduce the branchiness of paths through malloc etc, as |
| well as to reduce the number of memory locations read or written. |
| |
| Segments |
| A list of segments headed by an embedded malloc_segment record |
| representing the initial space. |
| |
| Address check support |
| The least_addr field is the least address ever obtained from |
| MORECORE or MMAP. Attempted frees and reallocs of any address less |
| than this are trapped (unless INSECURE is defined). |
| |
| Magic tag |
| A cross-check field that should always hold same value as mparams.magic. |
| |
| Max allowed footprint |
| The maximum allowed bytes to allocate from system (zero means no limit) |
| |
| Flags |
| Bits recording whether to use MMAP, locks, or contiguous MORECORE |
| |
| Statistics |
| Each space keeps track of current and maximum system memory |
| obtained via MORECORE or MMAP. |
| |
| Trim support |
| Fields holding the amount of unused topmost memory that should trigger |
| trimming, and a counter to force periodic scanning to release unused |
| non-topmost segments. |
| |
| Locking |
| If USE_LOCKS is defined, the "mutex" lock is acquired and released |
| around every public call using this mspace. |
| |
| Extension support |
| A void* pointer and a size_t field that can be used to help implement |
| extensions to this malloc. |
| */ |
| |
| /* Bin types, widths and sizes */ |
| #define NSMALLBINS (32U) |
| #define NTREEBINS (32U) |
| #define SMALLBIN_SHIFT (3U) |
| #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) |
| #define TREEBIN_SHIFT (8U) |
| #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) |
| #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) |
| #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) |
| |
| struct malloc_state { |
| binmap_t smallmap; |
| binmap_t treemap; |
| size_t dvsize; |
| size_t topsize; |
| char* least_addr; |
| mchunkptr dv; |
| mchunkptr top; |
| size_t trim_check; |
| size_t release_checks; |
| size_t magic; |
| mchunkptr smallbins[(NSMALLBINS+1)*2]; |
| tbinptr treebins[NTREEBINS]; |
| size_t footprint; |
| size_t max_footprint; |
| size_t footprint_limit; /* zero means no limit */ |
| flag_t mflags; |
| #if USE_LOCKS |
| MLOCK_T mutex; /* locate lock among fields that rarely change */ |
| #endif /* USE_LOCKS */ |
| msegment seg; |
| void* extp; /* Unused but available for extensions */ |
| size_t exts; |
| }; |
| |
| typedef struct malloc_state* mstate; |
| |
| /* ------------- Global malloc_state and malloc_params ------------------- */ |
| |
| /* |
| malloc_params holds global properties, including those that can be |
| dynamically set using mallopt. There is a single instance, mparams, |
| initialized in init_mparams. Note that the non-zeroness of "magic" |
| also serves as an initialization flag. |
| */ |
| |
| struct malloc_params { |
| size_t magic; |
| size_t page_size; |
| size_t granularity; |
| size_t mmap_threshold; |
| size_t trim_threshold; |
| flag_t default_mflags; |
| }; |
| |
| static struct malloc_params mparams; |
| |
| /* Ensure mparams initialized */ |
| #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) |
| |
| #if !ONLY_MSPACES |
| |
| /* The global malloc_state used for all non-"mspace" calls */ |
| static struct malloc_state _gm_; |
| #define gm (&_gm_) |
| #define is_global(M) ((M) == &_gm_) |
| |
| #endif /* !ONLY_MSPACES */ |
| |
| #define is_initialized(M) ((M)->top != 0) |
| |
| /* -------------------------- system alloc setup ------------------------- */ |
| |
| /* Operations on mflags */ |
| |
| #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) |
| #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) |
| #if USE_LOCKS |
| #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) |
| #else |
| #define disable_lock(M) |
| #endif |
| |
| #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) |
| #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) |
| #if HAVE_MMAP |
| #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) |
| #else |
| #define disable_mmap(M) |
| #endif |
| |
| #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) |
| #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) |
| #define use_noexpand(M) ((M)->mflags & USE_NOEXPAND_BIT) |
| #define disable_expand(M) ((M)->mflags |= USE_NOEXPAND_BIT) |
| #define use_trace(M) ((M)->mflags & USE_TRACE_BIT) |
| #define enable_trace(M) ((M)->mflags |= USE_TRACE_BIT) |
| #define disable_trace(M) ((M)->mflags &= ~USE_TRACE_BIT) |
| |
| #define set_lock(M,L)\ |
| ((M)->mflags = (L)?\ |
| ((M)->mflags | USE_LOCK_BIT) :\ |
| ((M)->mflags & ~USE_LOCK_BIT)) |
| |
| /* page-align a size */ |
| #define page_align(S)\ |
| (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) |
| |
| /* granularity-align a size */ |
| #define granularity_align(S)\ |
| (((S) + (mparams.granularity - SIZE_T_ONE))\ |
| & ~(mparams.granularity - SIZE_T_ONE)) |
| |
| |
| /* For mmap, use granularity alignment on windows, else page-align */ |
| #ifdef WIN32 |
| #define mmap_align(S) granularity_align(S) |
| #else |
| #define mmap_align(S) page_align(S) |
| #endif |
| |
| /* For sys_alloc, enough padding to ensure can malloc request on success */ |
| #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) |
| |
| #define is_page_aligned(S)\ |
| (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) |
| #define is_granularity_aligned(S)\ |
| (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) |
| |
| /* True if segment S holds address A */ |
| #define segment_holds(S, A)\ |
| ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) |
| |
| /* Return segment holding given address */ |
| __clib_nosanitize_addr |
| static msegmentptr segment_holding(mstate m, char* addr) { |
| msegmentptr sp = &m->seg; |
| for (;;) { |
| if (addr >= sp->base && addr < sp->base + sp->size) |
| return sp; |
| if ((sp = sp->next) == 0) |
| return 0; |
| } |
| } |
| |
| /* Return true if segment contains a segment link */ |
| __clib_nosanitize_addr |
| static int has_segment_link(mstate m, msegmentptr ss) { |
| msegmentptr sp = &m->seg; |
| for (;;) { |
| if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) |
| return 1; |
| if ((sp = sp->next) == 0) |
| return 0; |
| } |
| } |
| |
| #ifndef MORECORE_CANNOT_TRIM |
| #define should_trim(M,s) ((s) > (M)->trim_check) |
| #else /* MORECORE_CANNOT_TRIM */ |
| #define should_trim(M,s) (0) |
| #endif /* MORECORE_CANNOT_TRIM */ |
| |
| /* |
| TOP_FOOT_SIZE is padding at the end of a segment, including space |
| that may be needed to place segment records and fenceposts when new |
| noncontiguous segments are added. |
| */ |
| #define TOP_FOOT_SIZE\ |
| (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) |
| |
| |
| /* ------------------------------- Hooks -------------------------------- */ |
| |
| /* |
| PREACTION should be defined to return 0 on success, and nonzero on |
| failure. If you are not using locking, you can redefine these to do |
| anything you like. |
| */ |
| |
| #if USE_LOCKS |
| #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) |
| #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } |
| #else /* USE_LOCKS */ |
| |
| #ifndef PREACTION |
| #define PREACTION(M) (0) |
| #endif /* PREACTION */ |
| |
| #ifndef POSTACTION |
| #define POSTACTION(M) |
| #endif /* POSTACTION */ |
| |
| #endif /* USE_LOCKS */ |
| |
| /* |
| CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. |
| USAGE_ERROR_ACTION is triggered on detected bad frees and |
| reallocs. The argument p is an address that might have triggered the |
| fault. It is ignored by the two predefined actions, but might be |
| useful in custom actions that try to help diagnose errors. |
| */ |
| |
| #if PROCEED_ON_ERROR |
| |
| /* A count of the number of corruption errors causing resets */ |
| int malloc_corruption_error_count; |
| |
| /* default corruption action */ |
| static void reset_on_error(mstate m); |
| |
| #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) |
| #define USAGE_ERROR_ACTION(m, p) |
| |
| #else /* PROCEED_ON_ERROR */ |
| |
| #ifndef CORRUPTION_ERROR_ACTION |
| #define CORRUPTION_ERROR_ACTION(m) DLM_ABORT |
| #endif /* CORRUPTION_ERROR_ACTION */ |
| |
| #ifndef USAGE_ERROR_ACTION |
| #define USAGE_ERROR_ACTION(m,p) DLM_ABORT |
| #endif /* USAGE_ERROR_ACTION */ |
| |
| #endif /* PROCEED_ON_ERROR */ |
| |
| |
| /* -------------------------- Debugging setup ---------------------------- */ |
| |
| #if ! DEBUG |
| |
| #define check_free_chunk(M,P) |
| #define check_inuse_chunk(M,P) |
| #define check_malloced_chunk(M,P,N) |
| #define check_mmapped_chunk(M,P) |
| #define check_malloc_state(M) |
| #define check_top_chunk(M,P) |
| |
| #else /* DEBUG */ |
| #define check_free_chunk(M,P) do_check_free_chunk(M,P) |
| #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) |
| #define check_top_chunk(M,P) do_check_top_chunk(M,P) |
| #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) |
| #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) |
| #define check_malloc_state(M) do_check_malloc_state(M) |
| |
| static void do_check_any_chunk(mstate m, mchunkptr p); |
| static void do_check_top_chunk(mstate m, mchunkptr p); |
| static void do_check_mmapped_chunk(mstate m, mchunkptr p); |
| static void do_check_inuse_chunk(mstate m, mchunkptr p); |
| static void do_check_free_chunk(mstate m, mchunkptr p); |
| static void do_check_malloced_chunk(mstate m, void* mem, size_t s); |
| static void do_check_tree(mstate m, tchunkptr t); |
| static void do_check_treebin(mstate m, bindex_t i); |
| static void do_check_smallbin(mstate m, bindex_t i); |
| static void do_check_malloc_state(mstate m); |
| static int bin_find(mstate m, mchunkptr x); |
| static size_t traverse_and_check(mstate m); |
| #endif /* DEBUG */ |
| |
| /* ---------------------------- Indexing Bins ---------------------------- */ |
| |
| #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) |
| #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) |
| #define small_index2size(i) ((i) << SMALLBIN_SHIFT) |
| #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) |
| |
| /* addressing by index. See above about smallbin repositioning */ |
| #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) |
| #define treebin_at(M,i) (&((M)->treebins[i])) |
| |
| /* assign tree index for size S to variable I. Use x86 asm if possible */ |
| #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) |
| #define compute_tree_index(S, I)\ |
| {\ |
| unsigned int X = S >> TREEBIN_SHIFT;\ |
| if (X == 0)\ |
| I = 0;\ |
| else if (X > 0xFFFF)\ |
| I = NTREEBINS-1;\ |
| else {\ |
| unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ |
| I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ |
| }\ |
| } |
| |
| #elif defined (__INTEL_COMPILER) |
| #define compute_tree_index(S, I)\ |
| {\ |
| size_t X = S >> TREEBIN_SHIFT;\ |
| if (X == 0)\ |
| I = 0;\ |
| else if (X > 0xFFFF)\ |
| I = NTREEBINS-1;\ |
| else {\ |
| unsigned int K = _bit_scan_reverse (X); \ |
| I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ |
| }\ |
| } |
| |
| #elif defined(_MSC_VER) && _MSC_VER>=1300 |
| #define compute_tree_index(S, I)\ |
| {\ |
| size_t X = S >> TREEBIN_SHIFT;\ |
| if (X == 0)\ |
| I = 0;\ |
| else if (X > 0xFFFF)\ |
| I = NTREEBINS-1;\ |
| else {\ |
| unsigned int K;\ |
| _BitScanReverse((DWORD *) &K, (DWORD) X);\ |
| I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ |
| }\ |
| } |
| |
| #else /* GNUC */ |
| #define compute_tree_index(S, I)\ |
| {\ |
| size_t X = S >> TREEBIN_SHIFT;\ |
| if (X == 0)\ |
| I = 0;\ |
| else if (X > 0xFFFF)\ |
| I = NTREEBINS-1;\ |
| else {\ |
| unsigned int Y = (unsigned int)X;\ |
| unsigned int N = ((Y - 0x100) >> 16) & 8;\ |
| unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ |
| N += K;\ |
| N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ |
| K = 14 - N + ((Y <<= K) >> 15);\ |
| I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ |
| }\ |
| } |
| #endif /* GNUC */ |
| |
| /* Bit representing maximum resolved size in a treebin at i */ |
| #define bit_for_tree_index(i) \ |
| (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) |
| |
| /* Shift placing maximum resolved bit in a treebin at i as sign bit */ |
| #define leftshift_for_tree_index(i) \ |
| ((i == NTREEBINS-1)? 0 : \ |
| ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) |
| |
| /* The size of the smallest chunk held in bin with index i */ |
| #define minsize_for_tree_index(i) \ |
| ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ |
| (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) |
| |
| |
| /* ------------------------ Operations on bin maps ----------------------- */ |
| |
| /* bit corresponding to given index */ |
| #define idx2bit(i) ((binmap_t)(1) << (i)) |
| |
| /* Mark/Clear bits with given index */ |
| #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) |
| #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) |
| #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) |
| |
| #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) |
| #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) |
| #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) |
| |
| /* isolate the least set bit of a bitmap */ |
| #define least_bit(x) ((x) & -(x)) |
| |
| /* mask with all bits to left of least bit of x on */ |
| #define left_bits(x) ((x<<1) | -(x<<1)) |
| |
| /* mask with all bits to left of or equal to least bit of x on */ |
| #define same_or_left_bits(x) ((x) | -(x)) |
| |
| /* index corresponding to given bit. Use x86 asm if possible */ |
| |
| #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) |
| #define compute_bit2idx(X, I)\ |
| {\ |
| unsigned int J;\ |
| J = __builtin_ctz(X); \ |
| I = (bindex_t)J;\ |
| } |
| |
| #elif defined (__INTEL_COMPILER) |
| #define compute_bit2idx(X, I)\ |
| {\ |
| unsigned int J;\ |
| J = _bit_scan_forward (X); \ |
| I = (bindex_t)J;\ |
| } |
| |
| #elif defined(_MSC_VER) && _MSC_VER>=1300 |
| #define compute_bit2idx(X, I)\ |
| {\ |
| unsigned int J;\ |
| _BitScanForward((DWORD *) &J, X);\ |
| I = (bindex_t)J;\ |
| } |
| |
| #elif USE_BUILTIN_FFS |
| #define compute_bit2idx(X, I) I = ffs(X)-1 |
| |
| #else |
| #define compute_bit2idx(X, I)\ |
| {\ |
| unsigned int Y = X - 1;\ |
| unsigned int K = Y >> (16-4) & 16;\ |
| unsigned int N = K; Y >>= K;\ |
| N += K = Y >> (8-3) & 8; Y >>= K;\ |
| N += K = Y >> (4-2) & 4; Y >>= K;\ |
| N += K = Y >> (2-1) & 2; Y >>= K;\ |
| N += K = Y >> (1-0) & 1; Y >>= K;\ |
| I = (bindex_t)(N + Y);\ |
| } |
| #endif /* GNUC */ |
| |
| |
| /* ----------------------- Runtime Check Support ------------------------- */ |
| |
| /* |
| For security, the main invariant is that malloc/free/etc never |
| writes to a static address other than malloc_state, unless static |
| malloc_state itself has been corrupted, which cannot occur via |
| malloc (because of these checks). In essence this means that we |
| believe all pointers, sizes, maps etc held in malloc_state, but |
| check all of those linked or offsetted from other embedded data |
| structures. These checks are interspersed with main code in a way |
| that tends to minimize their run-time cost. |
| |
| When FOOTERS is defined, in addition to range checking, we also |
| verify footer fields of inuse chunks, which can be used guarantee |
| that the mstate controlling malloc/free is intact. This is a |
| streamlined version of the approach described by William Robertson |
| et al in "Run-time Detection of Heap-based Overflows" LISA'03 |
| http://www.usenix.org/events/lisa03/tech/robertson.html The footer |
| of an inuse chunk holds the xor of its mstate and a random seed, |
| that is checked upon calls to free() and realloc(). This is |
| (probabilistically) unguessable from outside the program, but can be |
| computed by any code successfully malloc'ing any chunk, so does not |
| itself provide protection against code that has already broken |
| security through some other means. Unlike Robertson et al, we |
| always dynamically check addresses of all offset chunks (previous, |
| next, etc). This turns out to be cheaper than relying on hashes. |
| */ |
| |
| #if !INSECURE |
| /* Check if address a is at least as high as any from MORECORE or MMAP */ |
| #define ok_address(M, a) ((char*)(a) >= (M)->least_addr) |
| /* Check if address of next chunk n is higher than base chunk p */ |
| #define ok_next(p, n) ((char*)(p) < (char*)(n)) |
| /* Check if p has inuse status */ |
| #define ok_inuse(p) is_inuse(p) |
| /* Check if p has its pinuse bit on */ |
| #define ok_pinuse(p) pinuse(p) |
| |
| #else /* !INSECURE */ |
| #define ok_address(M, a) (1) |
| #define ok_next(b, n) (1) |
| #define ok_inuse(p) (1) |
| #define ok_pinuse(p) (1) |
| #endif /* !INSECURE */ |
| |
| #if (FOOTERS && !INSECURE) |
| /* Check if (alleged) mstate m has expected magic field */ |
| __clib_nosanitize_addr |
| static inline int |
| ok_magic (const mstate m) |
| { |
| return (m->magic == mparams.magic); |
| } |
| #else /* (FOOTERS && !INSECURE) */ |
| #define ok_magic(M) (1) |
| #endif /* (FOOTERS && !INSECURE) */ |
| |
| /* In gcc, use __builtin_expect to minimize impact of checks */ |
| #if !INSECURE |
| #if defined(__GNUC__) && __GNUC__ >= 3 |
| #define RTCHECK(e) __builtin_expect(e, 1) |
| #else /* GNUC */ |
| #define RTCHECK(e) (e) |
| #endif /* GNUC */ |
| #else /* !INSECURE */ |
| #define RTCHECK(e) (1) |
| #endif /* !INSECURE */ |
| |
| /* macros to set up inuse chunks with or without footers */ |
| |
| #if !FOOTERS |
| |
| #define mark_inuse_foot(M,p,s) |
| |
| /* Macros for setting head/foot of non-mmapped chunks */ |
| |
| /* Set cinuse bit and pinuse bit of next chunk */ |
| #define set_inuse(M,p,s)\ |
| ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ |
| ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) |
| |
| /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ |
| #define set_inuse_and_pinuse(M,p,s)\ |
| ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
| ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) |
| |
| /* Set size, cinuse and pinuse bit of this chunk */ |
| #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ |
| ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) |
| |
| #else /* FOOTERS */ |
| |
| /* Set foot of inuse chunk to be xor of mstate and seed */ |
| #define mark_inuse_foot(M,p,s)\ |
| (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) |
| |
| #define get_mstate_for(p)\ |
| ((mstate)(((mchunkptr)((char*)(p) +\ |
| (chunksize(p))))->prev_foot ^ mparams.magic)) |
| |
| #define set_inuse(M,p,s)\ |
| ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ |
| (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ |
| mark_inuse_foot(M,p,s)) |
| |
| #define set_inuse_and_pinuse(M,p,s)\ |
| ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
| (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ |
| mark_inuse_foot(M,p,s)) |
| |
| #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ |
| ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
| mark_inuse_foot(M, p, s)) |
| |
| #endif /* !FOOTERS */ |
| |
| /* ---------------------------- setting mparams -------------------------- */ |
| |
| #if LOCK_AT_FORK |
| static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } |
| static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } |
| static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } |
| #endif /* LOCK_AT_FORK */ |
| |
| /* Initialize mparams */ |
| static int init_mparams(void) { |
| #ifdef NEED_GLOBAL_LOCK_INIT |
| if (malloc_global_mutex_status <= 0) |
| init_malloc_global_mutex(); |
| #endif |
| |
| ACQUIRE_MALLOC_GLOBAL_LOCK(); |
| if (mparams.magic == 0) { |
| size_t magic; |
| size_t psize; |
| size_t gsize; |
| |
| #ifndef WIN32 |
| psize = malloc_getpagesize; |
| gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); |
| #else /* WIN32 */ |
| { |
| SYSTEM_INFO system_info; |
| GetSystemInfo(&system_info); |
| psize = system_info.dwPageSize; |
| gsize = ((DEFAULT_GRANULARITY != 0)? |
| DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); |
| } |
| #endif /* WIN32 */ |
| |
| /* Sanity-check configuration: |
| size_t must be unsigned and as wide as pointer type. |
| ints must be at least 4 bytes. |
| alignment must be at least 8. |
| Alignment, min chunk size, and page size must all be powers of 2. |
| */ |
| if ((sizeof(size_t) != sizeof(char*)) || |
| (MAX_SIZE_T < MIN_CHUNK_SIZE) || |
| (sizeof(int) < 4) || |
| (MALLOC_ALIGNMENT < (size_t)8U) || |
| ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || |
| ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || |
| ((gsize & (gsize-SIZE_T_ONE)) != 0) || |
| ((psize & (psize-SIZE_T_ONE)) != 0)) |
| DLM_ABORT; |
| mparams.granularity = gsize; |
| mparams.page_size = psize; |
| mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
| mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; |
| #if MORECORE_CONTIGUOUS |
| mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; |
| #else /* MORECORE_CONTIGUOUS */ |
| mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; |
| #endif /* MORECORE_CONTIGUOUS */ |
| |
| #if !ONLY_MSPACES |
| /* Set up lock for main malloc area */ |
| gm->mflags = mparams.default_mflags; |
| (void)INITIAL_LOCK(&gm->mutex); |
| #endif |
| #if LOCK_AT_FORK |
| pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child); |
| #endif |
| |
| { |
| #ifndef DLM_MAGIC_CONSTANT |
| #if USE_DEV_RANDOM |
| int fd; |
| unsigned char buf[sizeof(size_t)]; |
| /* Try to use /dev/urandom, else fall back on using time */ |
| if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && |
| read(fd, buf, sizeof(buf)) == sizeof(buf)) { |
| magic = *((size_t *) buf); |
| close(fd); |
| } |
| else |
| #endif /* USE_DEV_RANDOM */ |
| #ifdef WIN32 |
| magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); |
| #elif defined(LACKS_TIME_H) |
| magic = (size_t)&magic ^ (size_t)0x55555555U; |
| #else |
| magic = (size_t)(time(0) ^ (size_t)0x55555555U); |
| #endif |
| magic |= (size_t)8U; /* ensure nonzero */ |
| magic &= ~(size_t)7U; /* improve chances of fault for bad values */ |
| #else |
| magic = DLM_MAGIC_CONSTANT; |
| #endif |
| /* Until memory modes commonly available, use volatile-write */ |
| (*(volatile size_t *)(&(mparams.magic))) = magic; |
| } |
| } |
| |
| RELEASE_MALLOC_GLOBAL_LOCK(); |
| return 1; |
| } |
| |
| /* support for mallopt */ |
| static int change_mparam(int param_number, int value) { |
| size_t val; |
| ensure_initialization(); |
| val = (value == -1)? MAX_SIZE_T : (size_t)value; |
| switch(param_number) { |
| case M_TRIM_THRESHOLD: |
| mparams.trim_threshold = val; |
| return 1; |
| case M_GRANULARITY: |
| if (val >= mparams.page_size && ((val & (val-1)) == 0)) { |
| mparams.granularity = val; |
| return 1; |
| } |
| else |
| return 0; |
| case M_MMAP_THRESHOLD: |
| mparams.mmap_threshold = val; |
| return 1; |
| default: |
| return 0; |
| } |
| } |
| |
| #if DEBUG |
| /* ------------------------- Debugging Support --------------------------- */ |
| |
| /* Check properties of any chunk, whether free, inuse, mmapped etc */ |
| static void do_check_any_chunk(mstate m, mchunkptr p) { |
| assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); |
| assert(ok_address(m, p)); |
| } |
| |
| /* Check properties of top chunk */ |
| static void do_check_top_chunk(mstate m, mchunkptr p) { |
| msegmentptr sp = segment_holding(m, (char*)p); |
| size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ |
| assert(sp != 0); |
| assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); |
| assert(ok_address(m, p)); |
| assert(sz == m->topsize); |
| assert(sz > 0); |
| assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); |
| assert(pinuse(p)); |
| assert(!pinuse(chunk_plus_offset(p, sz))); |
| } |
| |
| /* Check properties of (inuse) mmapped chunks */ |
| static void do_check_mmapped_chunk(mstate m, mchunkptr p) { |
| size_t sz = chunksize(p); |
| size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); |
| assert(is_mmapped(p)); |
| assert(use_mmap(m)); |
| assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); |
| assert(ok_address(m, p)); |
| assert(!is_small(sz)); |
| assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); |
| assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); |
| assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); |
| } |
| |
| /* Check properties of inuse chunks */ |
| static void do_check_inuse_chunk(mstate m, mchunkptr p) { |
| do_check_any_chunk(m, p); |
| assert(is_inuse(p)); |
| assert(next_pinuse(p)); |
| /* If not pinuse and not mmapped, previous chunk has OK offset */ |
| assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); |
| if (is_mmapped(p)) |
| do_check_mmapped_chunk(m, p); |
| } |
| |
| /* Check properties of free chunks */ |
| static void do_check_free_chunk(mstate m, mchunkptr p) { |
| size_t sz = chunksize(p); |
| mchunkptr next = chunk_plus_offset(p, sz); |
| do_check_any_chunk(m, p); |
| assert(!is_inuse(p)); |
| assert(!next_pinuse(p)); |
| assert (!is_mmapped(p)); |
| if (p != m->dv && p != m->top) { |
| if (sz >= MIN_CHUNK_SIZE) { |
| assert((sz & CHUNK_ALIGN_MASK) == 0); |
| assert(is_aligned(chunk2mem(p))); |
| assert(next->prev_foot == sz); |
| assert(pinuse(p)); |
| assert (next == m->top || is_inuse(next)); |
| assert(p->fd->bk == p); |
| assert(p->bk->fd == p); |
| } |
| else /* markers are always of size SIZE_T_SIZE */ |
| assert(sz == SIZE_T_SIZE); |
| } |
| } |
| |
| /* Check properties of malloced chunks at the point they are malloced */ |
| static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { |
| if (mem != 0) { |
| mchunkptr p = mem2chunk(mem); |
| size_t sz = p->head & ~INUSE_BITS; |
| do_check_inuse_chunk(m, p); |
| assert((sz & CHUNK_ALIGN_MASK) == 0); |
| assert(sz >= MIN_CHUNK_SIZE); |
| assert(sz >= s); |
| /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ |
| assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); |
| } |
| } |
| |
| /* Check a tree and its subtrees. */ |
| static void do_check_tree(mstate m, tchunkptr t) { |
| tchunkptr head = 0; |
| tchunkptr u = t; |
| bindex_t tindex = t->index; |
| size_t tsize = chunksize(t); |
| bindex_t idx; |
| compute_tree_index(tsize, idx); |
| assert(tindex == idx); |
| assert(tsize >= MIN_LARGE_SIZE); |
| assert(tsize >= minsize_for_tree_index(idx)); |
| assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); |
| |
| do { /* traverse through chain of same-sized nodes */ |
| do_check_any_chunk(m, ((mchunkptr)u)); |
| assert(u->index == tindex); |
| assert(chunksize(u) == tsize); |
| assert(!is_inuse(u)); |
| assert(!next_pinuse(u)); |
| assert(u->fd->bk == u); |
| assert(u->bk->fd == u); |
| if (u->parent == 0) { |
| assert(u->child[0] == 0); |
| assert(u->child[1] == 0); |
| } |
| else { |
| assert(head == 0); /* only one node on chain has parent */ |
| head = u; |
| assert(u->parent != u); |
| assert (u->parent->child[0] == u || |
| u->parent->child[1] == u || |
| *((tbinptr*)(u->parent)) == u); |
| if (u->child[0] != 0) { |
| assert(u->child[0]->parent == u); |
| assert(u->child[0] != u); |
| do_check_tree(m, u->child[0]); |
| } |
| if (u->child[1] != 0) { |
| assert(u->child[1]->parent == u); |
| assert(u->child[1] != u); |
| do_check_tree(m, u->child[1]); |
| } |
| if (u->child[0] != 0 && u->child[1] != 0) { |
| assert(chunksize(u->child[0]) < chunksize(u->child[1])); |
| } |
| } |
| u = u->fd; |
| } while (u != t); |
| assert(head != 0); |
| } |
| |
| /* Check all the chunks in a treebin. */ |
| static void do_check_treebin(mstate m, bindex_t i) { |
| tbinptr* tb = treebin_at(m, i); |
| tchunkptr t = *tb; |
| int empty = (m->treemap & (1U << i)) == 0; |
| if (t == 0) |
| assert(empty); |
| if (!empty) |
| do_check_tree(m, t); |
| } |
| |
| /* Check all the chunks in a smallbin. */ |
| static void do_check_smallbin(mstate m, bindex_t i) { |
| sbinptr b = smallbin_at(m, i); |
| mchunkptr p = b->bk; |
| unsigned int empty = (m->smallmap & (1U << i)) == 0; |
| if (p == b) |
| assert(empty); |
| if (!empty) { |
| for (; p != b; p = p->bk) { |
| size_t size = chunksize(p); |
| mchunkptr q; |
| /* each chunk claims to be free */ |
| do_check_free_chunk(m, p); |
| /* chunk belongs in bin */ |
| assert(small_index(size) == i); |
| assert(p->bk == b || chunksize(p->bk) == chunksize(p)); |
| /* chunk is followed by an inuse chunk */ |
| q = next_chunk(p); |
| if (q->head != FENCEPOST_HEAD) |
| do_check_inuse_chunk(m, q); |
| } |
| } |
| } |
| |
| /* Find x in a bin. Used in other check functions. */ |
| static int bin_find(mstate m, mchunkptr x) { |
| size_t size = chunksize(x); |
| if (is_small(size)) { |
| bindex_t sidx = small_index(size); |
| sbinptr b = smallbin_at(m, sidx); |
| if (smallmap_is_marked(m, sidx)) { |
| mchunkptr p = b; |
| do { |
| if (p == x) |
| return 1; |
| } while ((p = p->fd) != b); |
| } |
| } |
| else { |
| bindex_t tidx; |
| compute_tree_index(size, tidx); |
| if (treemap_is_marked(m, tidx)) { |
| tchunkptr t = *treebin_at(m, tidx); |
| size_t sizebits = size << leftshift_for_tree_index(tidx); |
| while (t != 0 && chunksize(t) != size) { |
| t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; |
| sizebits <<= 1; |
| } |
| if (t != 0) { |
| tchunkptr u = t; |
| do { |
| if (u == (tchunkptr)x) |
| return 1; |
| } while ((u = u->fd) != t); |
| } |
| } |
| } |
| return 0; |
| } |
| |
| /* Traverse each chunk and check it; return total */ |
| static size_t traverse_and_check(mstate m) { |
| size_t sum = 0; |
| if (is_initialized(m)) { |
| msegmentptr s = &m->seg; |
| sum += m->topsize + TOP_FOOT_SIZE; |
| while (s != 0) { |
| mchunkptr q = align_as_chunk(s->base); |
| mchunkptr lastq = 0; |
| assert(pinuse(q)); |
| while (segment_holds(s, q) && |
| q != m->top && q->head != FENCEPOST_HEAD) { |
| sum += chunksize(q); |
| if (is_inuse(q)) { |
| assert(!bin_find(m, q)); |
| do_check_inuse_chunk(m, q); |
| } |
| else { |
| assert(q == m->dv || bin_find(m, q)); |
| assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ |
| do_check_free_chunk(m, q); |
| } |
| lastq = q; |
| q = next_chunk(q); |
| } |
| s = s->next; |
| } |
| } |
| return sum; |
| } |
| |
| |
| /* Check all properties of malloc_state. */ |
| static void do_check_malloc_state(mstate m) { |
| bindex_t i; |
| size_t total; |
| /* check bins */ |
| for (i = 0; i < NSMALLBINS; ++i) |
| do_check_smallbin(m, i); |
| for (i = 0; i < NTREEBINS; ++i) |
| do_check_treebin(m, i); |
| |
| if (m->dvsize != 0) { /* check dv chunk */ |
| do_check_any_chunk(m, m->dv); |
| assert(m->dvsize == chunksize(m->dv)); |
| assert(m->dvsize >= MIN_CHUNK_SIZE); |
| assert(bin_find(m, m->dv) == 0); |
| } |
| |
| if (m->top != 0) { /* check top chunk */ |
| do_check_top_chunk(m, m->top); |
| /*assert(m->topsize == chunksize(m->top)); redundant */ |
| assert(m->topsize > 0); |
| assert(bin_find(m, m->top) == 0); |
| } |
| |
| total = traverse_and_check(m); |
| assert(total <= m->footprint); |
| assert(m->footprint <= m->max_footprint); |
| } |
| #endif /* DEBUG */ |
| |
| /* ----------------------------- statistics ------------------------------ */ |
| |
| #if !NO_MALLINFO |
| __clib_nosanitize_addr |
| static struct dlmallinfo internal_mallinfo(mstate m) { |
| struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
| ensure_initialization(); |
| if (!PREACTION(m)) { |
| check_malloc_state(m); |
| if (is_initialized(m)) { |
| size_t nfree = SIZE_T_ONE; /* top always free */ |
| size_t mfree = m->topsize + TOP_FOOT_SIZE; |
| size_t sum = mfree; |
| msegmentptr s = &m->seg; |
| while (s != 0) { |
| mchunkptr q = align_as_chunk(s->base); |
| while (segment_holds(s, q) && |
| q != m->top && q->head != FENCEPOST_HEAD) { |
| size_t sz = chunksize(q); |
| sum += sz; |
| if (!is_inuse(q)) { |
| mfree += sz; |
| ++nfree; |
| } |
| q = next_chunk(q); |
| } |
| s = s->next; |
| } |
| |
| nm.arena = sum; |
| nm.ordblks = nfree; |
| nm.hblkhd = m->footprint - sum; |
| nm.usmblks = m->max_footprint; |
| nm.uordblks = m->footprint - mfree; |
| nm.fordblks = mfree; |
| nm.keepcost = m->topsize; |
| } |
| |
| POSTACTION(m); |
| } |
| return nm; |
| } |
| #endif /* !NO_MALLINFO */ |
| |
| #if !NO_MALLOC_STATS |
| static void internal_malloc_stats(mstate m) { |
| ensure_initialization(); |
| if (!PREACTION(m)) { |
| size_t maxfp = 0; |
| size_t fp = 0; |
| size_t used = 0; |
| check_malloc_state(m); |
| if (is_initialized(m)) { |
| msegmentptr s = &m->seg; |
| maxfp = m->max_footprint; |
| fp = m->footprint; |
| used = fp - (m->topsize + TOP_FOOT_SIZE); |
| |
| while (s != 0) { |
| mchunkptr q = align_as_chunk(s->base); |
| while (segment_holds(s, q) && |
| q != m->top && q->head != FENCEPOST_HEAD) { |
| if (!is_inuse(q)) |
| used -= chunksize(q); |
| q = next_chunk(q); |
| } |
| s = s->next; |
| } |
| } |
| POSTACTION(m); /* drop lock */ |
| fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); |
| fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); |
| fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); |
| } |
| } |
| #endif /* NO_MALLOC_STATS */ |
| |
| /* ----------------------- Operations on smallbins ----------------------- */ |
| |
| /* |
| Various forms of linking and unlinking are defined as macros. Even |
| the ones for trees, which are very long but have very short typical |
| paths. This is ugly but reduces reliance on inlining support of |
| compilers. |
| */ |
| |
| /* Link a free chunk into a smallbin */ |
| #define insert_small_chunk(M, P, S) {\ |
| bindex_t I = small_index(S);\ |
| mchunkptr B = smallbin_at(M, I);\ |
| mchunkptr F = B;\ |
| assert(S >= MIN_CHUNK_SIZE);\ |
| if (!smallmap_is_marked(M, I))\ |
| mark_smallmap(M, I);\ |
| else if (RTCHECK(ok_address(M, B->fd)))\ |
| F = B->fd;\ |
| else {\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| }\ |
| B->fd = P;\ |
| F->bk = P;\ |
| P->fd = F;\ |
| P->bk = B;\ |
| } |
| |
| /* Unlink a chunk from a smallbin */ |
| #define unlink_small_chunk(M, P, S) {\ |
| mchunkptr F = P->fd;\ |
| mchunkptr B = P->bk;\ |
| bindex_t I = small_index(S);\ |
| assert(P != B);\ |
| assert(P != F);\ |
| assert(chunksize(P) == small_index2size(I));\ |
| if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ |
| if (B == F) {\ |
| clear_smallmap(M, I);\ |
| }\ |
| else if (RTCHECK(B == smallbin_at(M,I) ||\ |
| (ok_address(M, B) && B->fd == P))) {\ |
| F->bk = B;\ |
| B->fd = F;\ |
| }\ |
| else {\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| }\ |
| }\ |
| else {\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| }\ |
| } |
| |
| /* Unlink the first chunk from a smallbin */ |
| #define unlink_first_small_chunk(M, B, P, I) {\ |
| mchunkptr F = P->fd;\ |
| assert(P != B);\ |
| assert(P != F);\ |
| assert(chunksize(P) == small_index2size(I));\ |
| if (B == F) {\ |
| clear_smallmap(M, I);\ |
| }\ |
| else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ |
| F->bk = B;\ |
| B->fd = F;\ |
| }\ |
| else {\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| }\ |
| } |
| |
| /* Replace dv node, binning the old one */ |
| /* Used only when dvsize known to be small */ |
| #define replace_dv(M, P, S) {\ |
| size_t DVS = M->dvsize;\ |
| assert(is_small(DVS));\ |
| if (DVS != 0) {\ |
| mchunkptr DV = M->dv;\ |
| insert_small_chunk(M, DV, DVS);\ |
| }\ |
| M->dvsize = S;\ |
| M->dv = P;\ |
| } |
| |
| /* ------------------------- Operations on trees ------------------------- */ |
| |
| /* Insert chunk into tree */ |
| #define insert_large_chunk(M, X, S) {\ |
| tbinptr* H;\ |
| bindex_t I;\ |
| compute_tree_index(S, I);\ |
| H = treebin_at(M, I);\ |
| X->index = I;\ |
| X->child[0] = X->child[1] = 0;\ |
| if (!treemap_is_marked(M, I)) {\ |
| mark_treemap(M, I);\ |
| *H = X;\ |
| X->parent = (tchunkptr)H;\ |
| X->fd = X->bk = X;\ |
| }\ |
| else {\ |
| tchunkptr T = *H;\ |
| size_t K = S << leftshift_for_tree_index(I);\ |
| for (;;) {\ |
| if (chunksize(T) != S) {\ |
| tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ |
| K <<= 1;\ |
| if (*C != 0)\ |
| T = *C;\ |
| else if (RTCHECK(ok_address(M, C))) {\ |
| *C = X;\ |
| X->parent = T;\ |
| X->fd = X->bk = X;\ |
| break;\ |
| }\ |
| else {\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| break;\ |
| }\ |
| }\ |
| else {\ |
| tchunkptr F = T->fd;\ |
| if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ |
| T->fd = F->bk = X;\ |
| X->fd = F;\ |
| X->bk = T;\ |
| X->parent = 0;\ |
| break;\ |
| }\ |
| else {\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| break;\ |
| }\ |
| }\ |
| }\ |
| }\ |
| } |
| |
| /* |
| Unlink steps: |
| |
| 1. If x is a chained node, unlink it from its same-sized fd/bk links |
| and choose its bk node as its replacement. |
| 2. If x was the last node of its size, but not a leaf node, it must |
| be replaced with a leaf node (not merely one with an open left or |
| right), to make sure that lefts and rights of descendents |
| correspond properly to bit masks. We use the rightmost descendent |
| of x. We could use any other leaf, but this is easy to locate and |
| tends to counteract removal of leftmosts elsewhere, and so keeps |
| paths shorter than minimally guaranteed. This doesn't loop much |
| because on average a node in a tree is near the bottom. |
| 3. If x is the base of a chain (i.e., has parent links) relink |
| x's parent and children to x's replacement (or null if none). |
| */ |
| |
| #define unlink_large_chunk(M, X) {\ |
| tchunkptr XP = X->parent;\ |
| tchunkptr R;\ |
| if (X->bk != X) {\ |
| tchunkptr F = X->fd;\ |
| R = X->bk;\ |
| if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ |
| F->bk = R;\ |
| R->fd = F;\ |
| }\ |
| else {\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| }\ |
| }\ |
| else {\ |
| tchunkptr* RP;\ |
| if (((R = *(RP = &(X->child[1]))) != 0) ||\ |
| ((R = *(RP = &(X->child[0]))) != 0)) {\ |
| tchunkptr* CP;\ |
| while ((*(CP = &(R->child[1])) != 0) ||\ |
| (*(CP = &(R->child[0])) != 0)) {\ |
| R = *(RP = CP);\ |
| }\ |
| if (RTCHECK(ok_address(M, RP)))\ |
| *RP = 0;\ |
| else {\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| }\ |
| }\ |
| }\ |
| if (XP != 0) {\ |
| tbinptr* H = treebin_at(M, X->index);\ |
| if (X == *H) {\ |
| if ((*H = R) == 0) \ |
| clear_treemap(M, X->index);\ |
| }\ |
| else if (RTCHECK(ok_address(M, XP))) {\ |
| if (XP->child[0] == X) \ |
| XP->child[0] = R;\ |
| else \ |
| XP->child[1] = R;\ |
| }\ |
| else\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| if (R != 0) {\ |
| if (RTCHECK(ok_address(M, R))) {\ |
| tchunkptr C0, C1;\ |
| R->parent = XP;\ |
| if ((C0 = X->child[0]) != 0) {\ |
| if (RTCHECK(ok_address(M, C0))) {\ |
| R->child[0] = C0;\ |
| C0->parent = R;\ |
| }\ |
| else\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| }\ |
| if ((C1 = X->child[1]) != 0) {\ |
| if (RTCHECK(ok_address(M, C1))) {\ |
| R->child[1] = C1;\ |
| C1->parent = R;\ |
| }\ |
| else\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| }\ |
| }\ |
| else\ |
| CORRUPTION_ERROR_ACTION(M);\ |
| }\ |
| }\ |
| } |
| |
| /* Relays to large vs small bin operations */ |
| |
| #define insert_chunk(M, P, S)\ |
| if (is_small(S)) insert_small_chunk(M, P, S)\ |
| else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } |
| |
| #define unlink_chunk(M, P, S)\ |
| if (is_small(S)) unlink_small_chunk(M, P, S)\ |
| else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } |
| |
| |
| /* Relays to internal calls to malloc/free from realloc, memalign etc */ |
| |
| #if ONLY_MSPACES |
| #define internal_malloc(m, b) mspace_malloc(m, b) |
| #define internal_free(m, mem) mspace_free(m,mem); |
| #else /* ONLY_MSPACES */ |
| #if MSPACES |
| #define internal_malloc(m, b)\ |
| ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) |
| #define internal_free(m, mem)\ |
| if (m == gm) dlfree(mem); else mspace_free(m,mem); |
| #else /* MSPACES */ |
| #define internal_malloc(m, b) dlmalloc(b) |
| #define internal_free(m, mem) dlfree(mem) |
| #endif /* MSPACES */ |
| #endif /* ONLY_MSPACES */ |
| |
| /* ----------------------- Direct-mmapping chunks ----------------------- */ |
| |
| /* |
| Directly mmapped chunks are set up with an offset to the start of |
| the mmapped region stored in the prev_foot field of the chunk. This |
| allows reconstruction of the required argument to MUNMAP when freed, |
| and also allows adjustment of the returned chunk to meet alignment |
| requirements (especially in memalign). |
| */ |
| |
| /* Malloc using mmap */ |
| static void* mmap_alloc(mstate m, size_t nb) { |
| size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
| if (m->footprint_limit != 0) { |
| size_t fp = m->footprint + mmsize; |
| if (fp <= m->footprint || fp > m->footprint_limit) |
| return 0; |
| } |
| if (mmsize > nb) { /* Check for wrap around 0 */ |
| char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); |
| if (mm != CMFAIL) { |
| size_t offset = align_offset(chunk2mem(mm)); |
| size_t psize = mmsize - offset - MMAP_FOOT_PAD; |
| mchunkptr p = (mchunkptr)(mm + offset); |
| p->prev_foot = offset; |
| p->head = psize; |
| mark_inuse_foot(m, p, psize); |
| chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; |
| chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; |
| |
| if (m->least_addr == 0 || mm < m->least_addr) |
| m->least_addr = mm; |
| if ((m->footprint += mmsize) > m->max_footprint) |
| m->max_footprint = m->footprint; |
| assert(is_aligned(chunk2mem(p))); |
| check_mmapped_chunk(m, p); |
| return chunk2mem(p); |
| } |
| } |
| return 0; |
| } |
| |
| /* Realloc using mmap */ |
| static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { |
| size_t oldsize = chunksize(oldp); |
| (void)flags; /* placate people compiling -Wunused */ |
| if (is_small(nb)) /* Can't shrink mmap regions below small size */ |
| return 0; |
| /* Keep old chunk if big enough but not too big */ |
| if (oldsize >= nb + SIZE_T_SIZE && |
| (oldsize - nb) <= (mparams.granularity << 1)) |
| return oldp; |
| else { |
| size_t offset = oldp->prev_foot; |
| size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; |
| size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
| char* cp = (char*)CALL_MREMAP((char*)oldp - offset, |
| oldmmsize, newmmsize, flags); |
| if (cp != CMFAIL) { |
| mchunkptr newp = (mchunkptr)(cp + offset); |
| size_t psize = newmmsize - offset - MMAP_FOOT_PAD; |
| newp->head = psize; |
| mark_inuse_foot(m, newp, psize); |
| chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; |
| chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; |
| |
| if (cp < m->least_addr) |
| m->least_addr = cp; |
| if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) |
| m->max_footprint = m->footprint; |
| check_mmapped_chunk(m, newp); |
| return newp; |
| } |
| } |
| return 0; |
| } |
| |
| |
| /* -------------------------- mspace management -------------------------- */ |
| |
| /* Initialize top chunk and its size */ |
| __clib_nosanitize_addr |
| static void init_top(mstate m, mchunkptr p, size_t psize) { |
| /* Ensure alignment */ |
| size_t offset = align_offset(chunk2mem(p)); |
| p = (mchunkptr)((char*)p + offset); |
| psize -= offset; |
| |
| m->top = p; |
| m->topsize = psize; |
| p->head = psize | PINUSE_BIT; |
| /* set size of fake trailing chunk holding overhead space only once */ |
| chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; |
| m->trim_check = mparams.trim_threshold; /* reset on each update */ |
| } |
| |
| /* Initialize bins for a new mstate that is otherwise zeroed out */ |
| static void init_bins(mstate m) { |
| /* Establish circular links for smallbins */ |
| bindex_t i; |
| for (i = 0; i < NSMALLBINS; ++i) { |
| sbinptr bin = smallbin_at(m,i); |
| bin->fd = bin->bk = bin; |
| } |
| } |
| |
| #if PROCEED_ON_ERROR |
| |
| /* default corruption action */ |
| static void reset_on_error(mstate m) { |
| int i; |
| ++malloc_corruption_error_count; |
| /* Reinitialize fields to forget about all memory */ |
| m->smallmap = m->treemap = 0; |
| m->dvsize = m->topsize = 0; |
| m->seg.base = 0; |
| m->seg.size = 0; |
| m->seg.next = 0; |
| m->top = m->dv = 0; |
| for (i = 0; i < NTREEBINS; ++i) |
| *treebin_at(m, i) = 0; |
| init_bins(m); |
| } |
| #endif /* PROCEED_ON_ERROR */ |
| |
| /* Allocate chunk and prepend remainder with chunk in successor base. */ |
| __clib_nosanitize_addr |
| static void* prepend_alloc(mstate m, char* newbase, char* oldbase, |
| size_t nb) { |
| mchunkptr p = align_as_chunk(newbase); |
| mchunkptr oldfirst = align_as_chunk(oldbase); |
| size_t psize = (char*)oldfirst - (char*)p; |
| mchunkptr q = chunk_plus_offset(p, nb); |
| size_t qsize = psize - nb; |
| set_size_and_pinuse_of_inuse_chunk(m, p, nb); |
| |
| assert((char*)oldfirst > (char*)q); |
| assert(pinuse(oldfirst)); |
| assert(qsize >= MIN_CHUNK_SIZE); |
| |
| /* consolidate remainder with first chunk of old base */ |
| if (oldfirst == m->top) { |
| size_t tsize = m->topsize += qsize; |
| m->top = q; |
| q->head = tsize | PINUSE_BIT; |
| check_top_chunk(m, q); |
| } |
| else if (oldfirst == m->dv) { |
| size_t dsize = m->dvsize += qsize; |
| m->dv = q; |
| set_size_and_pinuse_of_free_chunk(q, dsize); |
| } |
| else { |
| if (!is_inuse(oldfirst)) { |
| size_t nsize = chunksize(oldfirst); |
| unlink_chunk(m, oldfirst, nsize); |
| oldfirst = chunk_plus_offset(oldfirst, nsize); |
| qsize += nsize; |
| } |
| set_free_with_pinuse(q, qsize, oldfirst); |
| insert_chunk(m, q, qsize); |
| check_free_chunk(m, q); |
| } |
| |
| check_malloced_chunk(m, chunk2mem(p), nb); |
| return chunk2mem(p); |
| } |
| |
| /* Add a segment to hold a new noncontiguous region */ |
| __clib_nosanitize_addr |
| static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { |
| /* Determine locations and sizes of segment, fenceposts, old top */ |
| char* old_top = (char*)m->top; |
| msegmentptr oldsp = segment_holding(m, old_top); |
| char* old_end = oldsp->base + oldsp->size; |
| size_t ssize = pad_request(sizeof(struct malloc_segment)); |
| char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
| size_t offset = align_offset(chunk2mem(rawsp)); |
| char* asp = rawsp + offset; |
| char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; |
| mchunkptr sp = (mchunkptr)csp; |
| msegmentptr ss = (msegmentptr)(chunk2mem(sp)); |
| mchunkptr tnext = chunk_plus_offset(sp, ssize); |
| mchunkptr p = tnext; |
| int __attribute__((unused)) nfences = 0; |
| |
| /* reset top to new space */ |
| init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); |
| |
| /* Set up segment record */ |
| assert(is_aligned(ss)); |
| set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); |
| *ss = m->seg; /* Push current record */ |
| m->seg.base = tbase; |
| m->seg.size = tsize; |
| m->seg.sflags = mmapped; |
| m->seg.next = ss; |
| |
| /* Insert trailing fenceposts */ |
| for (;;) { |
| mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); |
| p->head = FENCEPOST_HEAD; |
| ++nfences; |
| if ((char*)(&(nextp->head)) < old_end) |
| p = nextp; |
| else |
| break; |
| } |
| assert(nfences >= 2); |
| |
| /* Insert the rest of old top into a bin as an ordinary free chunk */ |
| if (csp != old_top) { |
| mchunkptr q = (mchunkptr)old_top; |
| size_t psize = csp - old_top; |
| mchunkptr tn = chunk_plus_offset(q, psize); |
| set_free_with_pinuse(q, psize, tn); |
| insert_chunk(m, q, psize); |
| } |
| |
| check_top_chunk(m, m->top); |
| } |
| |
| /* -------------------------- System allocation -------------------------- */ |
| |
| /* Get memory from system using MORECORE or MMAP */ |
| __clib_nosanitize_addr |
| static void* sys_alloc(mstate m, size_t nb) { |
| char* tbase = CMFAIL; |
| size_t tsize = 0; |
| flag_t mmap_flag = 0; |
| size_t asize; /* allocation size */ |
| |
| ensure_initialization(); |
| |
| if (use_noexpand(m)) |
| return 0; |
| |
| /* Directly map large chunks, but only if already initialized */ |
| if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { |
| void* mem = mmap_alloc(m, nb); |
| if (mem != 0) |
| return mem; |
| } |
| |
| asize = granularity_align(nb + SYS_ALLOC_PADDING); |
| if (asize <= nb) |
| return 0; /* wraparound */ |
| if (m->footprint_limit != 0) { |
| size_t fp = m->footprint + asize; |
| if (fp <= m->footprint || fp > m->footprint_limit) |
| return 0; |
| } |
| |
| /* |
| Try getting memory in any of three ways (in most-preferred to |
| least-preferred order): |
| 1. A call to MORECORE that can normally contiguously extend memory. |
| (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or |
| or main space is mmapped or a previous contiguous call failed) |
| 2. A call to MMAP new space (disabled if not HAVE_MMAP). |
| Note that under the default settings, if MORECORE is unable to |
| fulfill a request, and HAVE_MMAP is true, then mmap is |
| used as a noncontiguous system allocator. This is a useful backup |
| strategy for systems with holes in address spaces -- in this case |
| sbrk cannot contiguously expand the heap, but mmap may be able to |
| find space. |
| 3. A call to MORECORE that cannot usually contiguously extend memory. |
| (disabled if not HAVE_MORECORE) |
| |
| In all cases, we need to request enough bytes from system to ensure |
| we can malloc nb bytes upon success, so pad with enough space for |
| top_foot, plus alignment-pad to make sure we don't lose bytes if |
| not on boundary, and round this up to a granularity unit. |
| */ |
| |
| if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { |
| char* br = CMFAIL; |
| size_t ssize = asize; /* sbrk call size */ |
| msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); |
| ACQUIRE_MALLOC_GLOBAL_LOCK(); |
| |
| if (ss == 0) { /* First time through or recovery */ |
| char* base = (char*)CALL_MORECORE(0); |
| if (base != CMFAIL) { |
| size_t fp; |
| /* Adjust to end on a page boundary */ |
| if (!is_page_aligned(base)) |
| ssize += (page_align((size_t)base) - (size_t)base); |
| fp = m->footprint + ssize; /* recheck limits */ |
| if (ssize > nb && ssize < HALF_MAX_SIZE_T && |
| (m->footprint_limit == 0 || |
| (fp > m->footprint && fp <= m->footprint_limit)) && |
| (br = (char*)(CALL_MORECORE(ssize))) == base) { |
| tbase = base; |
| tsize = ssize; |
| } |
| } |
| } |
| else { |
| /* Subtract out existing available top space from MORECORE request. */ |
| ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); |
| /* Use mem here only if it did continuously extend old space */ |
| if (ssize < HALF_MAX_SIZE_T && |
| (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { |
| tbase = br; |
| tsize = ssize; |
| } |
| } |
| |
| if (tbase == CMFAIL) { /* Cope with partial failure */ |
| if (br != CMFAIL) { /* Try to use/extend the space we did get */ |
| if (ssize < HALF_MAX_SIZE_T && |
| ssize < nb + SYS_ALLOC_PADDING) { |
| size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); |
| if (esize < HALF_MAX_SIZE_T) { |
| char* end = (char*)CALL_MORECORE(esize); |
| if (end != CMFAIL) |
| ssize += esize; |
| else { /* Can't use; try to release */ |
| (void) CALL_MORECORE(-ssize); |
| br = CMFAIL; |
| } |
| } |
| } |
| } |
| if (br != CMFAIL) { /* Use the space we did get */ |
| tbase = br; |
| tsize = ssize; |
| } |
| else |
| disable_contiguous(m); /* Don't try contiguous path in the future */ |
| } |
| |
| RELEASE_MALLOC_GLOBAL_LOCK(); |
| } |
| |
| if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ |
| char* mp = (char*)(CALL_MMAP(asize)); |
| if (mp != CMFAIL) { |
| tbase = mp; |
| tsize = asize; |
| mmap_flag = USE_MMAP_BIT; |
| } |
| } |
| |
| if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ |
| if (asize < HALF_MAX_SIZE_T) { |
| char* br = CMFAIL; |
| char* end = CMFAIL; |
| ACQUIRE_MALLOC_GLOBAL_LOCK(); |
| br = (char*)(CALL_MORECORE(asize)); |
| end = (char*)(CALL_MORECORE(0)); |
| RELEASE_MALLOC_GLOBAL_LOCK(); |
| if (br != CMFAIL && end != CMFAIL && br < end) { |
| size_t ssize = end - br; |
| if (ssize > nb + TOP_FOOT_SIZE) { |
| tbase = br; |
| tsize = ssize; |
| } |
| } |
| } |
| } |
| |
| if (tbase != CMFAIL) { |
| |
| if ((m->footprint += tsize) > m->max_footprint) |
| m->max_footprint = m->footprint; |
| |
| if (!is_initialized(m)) { /* first-time initialization */ |
| if (m->least_addr == 0 || tbase < m->least_addr) |
| m->least_addr = tbase; |
| m->seg.base = tbase; |
| m->seg.size = tsize; |
| m->seg.sflags = mmap_flag; |
| m->magic = mparams.magic; |
| m->release_checks = MAX_RELEASE_CHECK_RATE; |
| init_bins(m); |
| #if !ONLY_MSPACES |
| if (is_global(m)) |
| init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); |
| else |
| #endif |
| { |
| /* Offset top by embedded malloc_state */ |
| mchunkptr mn = next_chunk(mem2chunk(m)); |
| init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); |
| } |
| } |
| |
| else { |
| /* Try to merge with an existing segment */ |
| msegmentptr sp = &m->seg; |
| /* Only consider most recent segment if traversal suppressed */ |
| while (sp != 0 && tbase != sp->base + sp->size) |
| sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; |
| if (sp != 0 && |
| !is_extern_segment(sp) && |
| (sp->sflags & USE_MMAP_BIT) == mmap_flag && |
| segment_holds(sp, m->top)) { /* append */ |
| sp->size += tsize; |
| init_top(m, m->top, m->topsize + tsize); |
| } |
| else { |
| if (tbase < m->least_addr) |
| m->least_addr = tbase; |
| sp = &m->seg; |
| while (sp != 0 && sp->base != tbase + tsize) |
| sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; |
| if (sp != 0 && |
| !is_extern_segment(sp) && |
| (sp->sflags & USE_MMAP_BIT) == mmap_flag) { |
| char* oldbase = sp->base; |
| sp->base = tbase; |
| sp->size += tsize; |
| return prepend_alloc(m, tbase, oldbase, nb); |
| } |
| else |
| add_segment(m, tbase, tsize, mmap_flag); |
| } |
| } |
| |
| if (nb < m->topsize) { /* Allocate from new or extended top space */ |
| size_t rsize = m->topsize -= nb; |
| mchunkptr p = m->top; |
| mchunkptr r = m->top = chunk_plus_offset(p, nb); |
| r->head = rsize | PINUSE_BIT; |
| set_size_and_pinuse_of_inuse_chunk(m, p, nb); |
| check_top_chunk(m, m->top); |
| check_malloced_chunk(m, chunk2mem(p), nb); |
| return chunk2mem(p); |
| } |
| } |
| |
| MALLOC_FAILURE_ACTION; |
| return 0; |
| } |
| |
| /* ----------------------- system deallocation -------------------------- */ |
| |
| /* Unmap and unlink any mmapped segments that don't contain used chunks */ |
| __clib_nosanitize_addr |
| static size_t release_unused_segments(mstate m) { |
| size_t released = 0; |
| int nsegs = 0; |
| msegmentptr pred = &m->seg; |
| msegmentptr sp = pred->next; |
| while (sp != 0) { |
| char* base = sp->base; |
| size_t size = sp->size; |
| msegmentptr next = sp->next; |
| ++nsegs; |
| if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { |
| mchunkptr p = align_as_chunk(base); |
| size_t psize = chunksize(p); |
| /* Can unmap if first chunk holds entire segment and not pinned */ |
| if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { |
| tchunkptr tp = (tchunkptr)p; |
| assert(segment_holds(sp, (char*)sp)); |
| if (p == m->dv) { |
| m->dv = 0; |
| m->dvsize = 0; |
| } |
| else { |
| unlink_large_chunk(m, tp); |
| } |
| if (CALL_MUNMAP(base, size) == 0) { |
| released += size; |
| m->footprint -= size; |
| /* unlink obsoleted record */ |
| sp = pred; |
| sp->next = next; |
| } |
| else { /* back out if cannot unmap */ |
| insert_large_chunk(m, tp, psize); |
| } |
| } |
| } |
| if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ |
| break; |
| pred = sp; |
| sp = next; |
| } |
| /* Reset check counter */ |
| m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? |
| (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); |
| return released; |
| } |
| |
| __clib_nosanitize_addr |
| static int sys_trim(mstate m, size_t pad) { |
| size_t released = 0; |
| ensure_initialization(); |
| if (pad < MAX_REQUEST && is_initialized(m)) { |
| pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ |
| |
| if (m->topsize > pad) { |
| /* Shrink top space in granularity-size units, keeping at least one */ |
| size_t unit = mparams.granularity; |
| size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - |
| SIZE_T_ONE) * unit; |
| msegmentptr sp = segment_holding(m, (char*)m->top); |
| |
| if (!is_extern_segment(sp)) { |
| if (is_mmapped_segment(sp)) { |
| if (HAVE_MMAP && |
| sp->size >= extra && |
| !has_segment_link(m, sp)) { /* can't shrink if pinned */ |
| size_t newsize = sp->size - extra; |
| (void)newsize; /* placate people compiling -Wunused-variable */ |
| /* Prefer mremap, fall back to munmap */ |
| if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || |
| (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { |
| released = extra; |
| } |
| } |
| } |
| else if (HAVE_MORECORE) { |
| if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ |
| extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; |
| ACQUIRE_MALLOC_GLOBAL_LOCK(); |
| { |
| /* Make sure end of memory is where we last set it. */ |
| char* old_br = (char*)(CALL_MORECORE(0)); |
| if (old_br == sp->base + sp->size) { |
| char* rel_br = (char*)(CALL_MORECORE(-extra)); |
| char* new_br = (char*)(CALL_MORECORE(0)); |
| if (rel_br != CMFAIL && new_br < old_br) |
| released = old_br - new_br; |
| } |
| } |
| RELEASE_MALLOC_GLOBAL_LOCK(); |
| } |
| } |
| |
| if (released != 0) { |
| sp->size -= released; |
| m->footprint -= released; |
| init_top(m, m->top, m->topsize - released); |
| check_top_chunk(m, m->top); |
| } |
| } |
| |
| /* Unmap any unused mmapped segments */ |
| if (HAVE_MMAP) |
| released += release_unused_segments(m); |
| |
| /* On failure, disable autotrim to avoid repeated failed future calls */ |
| if (released == 0 && m->topsize > m->trim_check) |
| m->trim_check = MAX_SIZE_T; |
| } |
| |
| return (released != 0)? 1 : 0; |
| } |
| |
| /* Consolidate and bin a chunk. Differs from exported versions |
| of free mainly in that the chunk need not be marked as inuse. |
| */ |
| __clib_nosanitize_addr |
| static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { |
| mchunkptr next = chunk_plus_offset(p, psize); |
| if (!pinuse(p)) { |
| mchunkptr prev; |
| size_t prevsize = p->prev_foot; |
| if (is_mmapped(p)) { |
| psize += prevsize + MMAP_FOOT_PAD; |
| if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) |
| m->footprint -= psize; |
| return; |
| } |
| prev = chunk_minus_offset(p, prevsize); |
| psize += prevsize; |
| p = prev; |
| if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ |
| if (p != m->dv) { |
| unlink_chunk(m, p, prevsize); |
| } |
| else if ((next->head & INUSE_BITS) == INUSE_BITS) { |
| m->dvsize = psize; |
| set_free_with_pinuse(p, psize, next); |
| return; |
| } |
| } |
| else { |
| CORRUPTION_ERROR_ACTION(m); |
| return; |
| } |
| } |
| if (RTCHECK(ok_address(m, next))) { |
| if (!cinuse(next)) { /* consolidate forward */ |
| if (next == m->top) { |
| size_t tsize = m->topsize += psize; |
| m->top = p; |
| p->head = tsize | PINUSE_BIT; |
| if (p == m->dv) { |
| m->dv = 0; |
| m->dvsize = 0; |
| } |
| return; |
| } |
| else if (next == m->dv) { |
| size_t dsize = m->dvsize += psize; |
| m->dv = p; |
| set_size_and_pinuse_of_free_chunk(p, dsize); |
| return; |
| } |
| else { |
| size_t nsize = chunksize(next); |
| psize += nsize; |
| unlink_chunk(m, next, nsize); |
| set_size_and_pinuse_of_free_chunk(p, psize); |
| if (p == m->dv) { |
| m->dvsize = psize; |
| return; |
| } |
| } |
| } |
| else { |
| set_free_with_pinuse(p, psize, next); |
| } |
| insert_chunk(m, p, psize); |
| } |
| else { |
| CORRUPTION_ERROR_ACTION(m); |
| } |
| } |
| |
| /* ---------------------------- malloc --------------------------- */ |
| |
| /* allocate a large request from the best fitting chunk in a treebin */ |
| __clib_nosanitize_addr |
| static void* tmalloc_large(mstate m, size_t nb) { |
| tchunkptr v = 0; |
| size_t rsize = -nb; /* Unsigned negation */ |
| tchunkptr t; |
| bindex_t idx; |
| compute_tree_index(nb, idx); |
| if ((t = *treebin_at(m, idx)) != 0) { |
| /* Traverse tree for this bin looking for node with size == nb */ |
| size_t sizebits = nb << leftshift_for_tree_index(idx); |
| tchunkptr rst = 0; /* The deepest untaken right subtree */ |
| for (;;) { |
| tchunkptr rt; |
| size_t trem = chunksize(t) - nb; |
| if (trem < rsize) { |
| v = t; |
| if ((rsize = trem) == 0) |
| break; |
| } |
| rt = t->child[1]; |
| t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; |
| if (rt != 0 && rt != t) |
| rst = rt; |
| if (t == 0) { |
| t = rst; /* set t to least subtree holding sizes > nb */ |
| break; |
| } |
| sizebits <<= 1; |
| } |
| } |
| if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ |
| binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; |
| if (leftbits != 0) { |
| bindex_t i; |
| binmap_t leastbit = least_bit(leftbits); |
| compute_bit2idx(leastbit, i); |
| t = *treebin_at(m, i); |
| } |
| } |
| |
| while (t != 0) { /* find smallest of tree or subtree */ |
| size_t trem = chunksize(t) - nb; |
| if (trem < rsize) { |
| rsize = trem; |
| v = t; |
| } |
| t = leftmost_child(t); |
| } |
| |
| /* If dv is a better fit, return 0 so malloc will use it */ |
| if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { |
| if (RTCHECK(ok_address(m, v))) { /* split */ |
| mchunkptr r = chunk_plus_offset(v, nb); |
| assert(chunksize(v) == rsize + nb); |
| if (RTCHECK(ok_next(v, r))) { |
| unlink_large_chunk(m, v); |
| if (rsize < MIN_CHUNK_SIZE) |
| set_inuse_and_pinuse(m, v, (rsize + nb)); |
| else { |
| set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
| set_size_and_pinuse_of_free_chunk(r, rsize); |
| insert_chunk(m, r, rsize); |
| } |
| return chunk2mem(v); |
| } |
| } |
| CORRUPTION_ERROR_ACTION(m); |
| } |
| return 0; |
| } |
| |
| /* allocate a small request from the best fitting chunk in a treebin */ |
| __clib_nosanitize_addr |
| static void* tmalloc_small(mstate m, size_t nb) { |
| tchunkptr t, v; |
| size_t rsize; |
| bindex_t i; |
| binmap_t leastbit = least_bit(m->treemap); |
| compute_bit2idx(leastbit, i); |
| v = t = *treebin_at(m, i); |
| rsize = chunksize(t) - nb; |
| |
| while ((t = leftmost_child(t)) != 0) { |
| size_t trem = chunksize(t) - nb; |
| if (trem < rsize) { |
| rsize = trem; |
| v = t; |
| } |
| } |
| |
| if (RTCHECK(ok_address(m, v))) { |
| mchunkptr r = chunk_plus_offset(v, nb); |
| assert(chunksize(v) == rsize + nb); |
| if (RTCHECK(ok_next(v, r))) { |
| unlink_large_chunk(m, v); |
| if (rsize < MIN_CHUNK_SIZE) |
| set_inuse_and_pinuse(m, v, (rsize + nb)); |
| else { |
| set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
| set_size_and_pinuse_of_free_chunk(r, rsize); |
| replace_dv(m, r, rsize); |
| } |
| return chunk2mem(v); |
| } |
| } |
| |
| CORRUPTION_ERROR_ACTION(m); |
| return 0; |
| } |
| |
| #if !ONLY_MSPACES |
| |
| void* dlmalloc(size_t bytes) { |
| /* |
| Basic algorithm: |
| If a small request (< 256 bytes minus per-chunk overhead): |
| 1. If one exists, use a remainderless chunk in associated smallbin. |
| (Remainderless means that there are too few excess bytes to |
| represent as a chunk.) |
| 2. If it is big enough, use the dv chunk, which is normally the |
| chunk adjacent to the one used for the most recent small request. |
| 3. If one exists, split the smallest available chunk in a bin, |
| saving remainder in dv. |
| 4. If it is big enough, use the top chunk. |
| 5. If available, get memory from system and use it |
| Otherwise, for a large request: |
| 1. Find the smallest available binned chunk that fits, and use it |
| if it is better fitting than dv chunk, splitting if necessary. |
| 2. If better fitting than any binned chunk, use the dv chunk. |
| 3. If it is big enough, use the top chunk. |
| 4. If request size >= mmap threshold, try to directly mmap this chunk. |
| 5. If available, get memory from system and use it |
| |
| The ugly goto's here ensure that postaction occurs along all paths. |
| */ |
| |
| #if USE_LOCKS |
| ensure_initialization(); /* initialize in sys_alloc if not using locks */ |
| #endif |
| |
| if (!PREACTION(gm)) { |
| void* mem; |
| size_t nb; |
| if (bytes <= MAX_SMALL_REQUEST) { |
| bindex_t idx; |
| binmap_t smallbits; |
| nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); |
| idx = small_index(nb); |
| smallbits = gm->smallmap >> idx; |
| |
| if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ |
| mchunkptr b, p; |
| idx += ~smallbits & 1; /* Uses next bin if idx empty */ |
| b = smallbin_at(gm, idx); |
| p = b->fd; |
| assert(chunksize(p) == small_index2size(idx)); |
| unlink_first_small_chunk(gm, b, p, idx); |
| set_inuse_and_pinuse(gm, p, small_index2size(idx)); |
| mem = chunk2mem(p); |
| check_malloced_chunk(gm, mem, nb); |
| goto postaction; |
| } |
| |
| else if (nb > gm->dvsize) { |
| if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ |
| mchunkptr b, p, r; |
| size_t rsize; |
| bindex_t i; |
| binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); |
| binmap_t leastbit = least_bit(leftbits); |
| compute_bit2idx(leastbit, i); |
| b = smallbin_at(gm, i); |
| p = b->fd; |
| assert(chunksize(p) == small_index2size(i)); |
| unlink_first_small_chunk(gm, b, p, i); |
| rsize = small_index2size(i) - nb; |
| /* Fit here cannot be remainderless if 4byte sizes */ |
| if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) |
| set_inuse_and_pinuse(gm, p, small_index2size(i)); |
| else { |
| set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
| r = chunk_plus_offset(p, nb); |
| set_size_and_pinuse_of_free_chunk(r, rsize); |
| replace_dv(gm, r, rsize); |
| } |
| mem = chunk2mem(p); |
| check_malloced_chunk(gm, mem, nb); |
| goto postaction; |
| } |
| |
| else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { |
| check_malloced_chunk(gm, mem, nb); |
| goto postaction; |
| } |
| } |
| } |
| else if (bytes >= MAX_REQUEST) |
| nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ |
| else { |
| nb = pad_request(bytes); |
| if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { |
| check_malloced_chunk(gm, mem, nb); |
| goto postaction; |
| } |
| } |
| |
| if (nb <= gm->dvsize) { |
| size_t rsize = gm->dvsize - nb; |
| mchunkptr p = gm->dv; |
| if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ |
| mchunkptr r = gm->dv = chunk_plus_offset(p, nb); |
| gm->dvsize = rsize; |
| set_size_and_pinuse_of_free_chunk(r, rsize); |
| set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
| } |
| else { /* exhaust dv */ |
| size_t dvs = gm->dvsize; |
| gm->dvsize = 0; |
| gm->dv = 0; |
| set_inuse_and_pinuse(gm, p, dvs); |
| } |
| mem = chunk2mem(p); |
| check_malloced_chunk(gm, mem, nb); |
| goto postaction; |
| } |
| |
| else if (nb < gm->topsize) { /* Split top */ |
| size_t rsize = gm->topsize -= nb; |
| mchunkptr p = gm->top; |
| mchunkptr r = gm->top = chunk_plus_offset(p, nb); |
| r->head = rsize | PINUSE_BIT; |
| set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
| mem = chunk2mem(p); |
| check_top_chunk(gm, gm->top); |
| check_malloced_chunk(gm, mem, nb); |
| goto postaction; |
| } |
| |
| mem = sys_alloc(gm, nb); |
| |
| postaction: |
| POSTACTION(gm); |
| return mem; |
| } |
| |
| return 0; |
| } |
| |
| /* ---------------------------- free --------------------------- */ |
| |
| void dlfree(void* mem) { |
| /* |
| Consolidate freed chunks with preceding or succeeding bordering |
| free chunks, if they exist, and then place in a bin. Intermixed |
| with special cases for top, dv, mmapped chunks, and usage errors. |
| */ |
| |
| if (mem != 0) { |
| mchunkptr p = mem2chunk(mem); |
| #if FOOTERS |
| mstate fm = get_mstate_for(p); |
| if (!ok_magic(fm)) { |
| USAGE_ERROR_ACTION(fm, p); |
| return; |
| } |
| #else /* FOOTERS */ |
| #define fm gm |
| #endif /* FOOTERS */ |
| if (!PREACTION(fm)) { |
| check_inuse_chunk(fm, p); |
| if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { |
| size_t psize = chunksize(p); |
| mchunkptr next = chunk_plus_offset(p, psize); |
| if (!pinuse(p)) { |
| size_t prevsize = p->prev_foot; |
| if (is_mmapped(p)) { |
| psize += prevsize + MMAP_FOOT_PAD; |
| if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) |
| fm->footprint -= psize; |
| goto postaction; |
| } |
| else { |
| mchunkptr prev = chunk_minus_offset(p, prevsize); |
| psize += prevsize; |
| p = prev; |
| if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ |
| if (p != fm->dv) { |
| unlink_chunk(fm, p, prevsize); |
| } |
| else if ((next->head & INUSE_BITS) == INUSE_BITS) { |
| fm->dvsize = psize; |
| set_free_with_pinuse(p, psize, next); |
| goto postaction; |
| } |
| } |
| else |
| goto erroraction; |
| } |
| } |
| |
| if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { |
| if (!cinuse(next)) { /* consolidate forward */ |
| if (next == fm->top) { |
| size_t tsize = fm->topsize += psize; |
| fm->top = p; |
| p->head = tsize | PINUSE_BIT; |
| if (p == fm->dv) { |
| fm->dv = 0; |
| fm->dvsize = 0; |
| } |
| if (should_trim(fm, tsize)) |
| sys_trim(fm, 0); |
| goto postaction; |
| } |
| else if (next == fm->dv) { |
| size_t dsize = fm->dvsize += psize; |
| fm->dv = p; |
| set_size_and_pinuse_of_free_chunk(p, dsize); |
| goto postaction; |
| } |
| else { |
| size_t nsize = chunksize(next); |
| psize += nsize; |
| unlink_chunk(fm, next, nsize); |
| set_size_and_pinuse_of_free_chunk(p, psize); |
| if (p == fm->dv) { |
| fm->dvsize = psize; |
| goto postaction; |
| } |
| } |
| } |
| else |
| set_free_with_pinuse(p, psize, next); |
| |
| if (is_small(psize)) { |
| insert_small_chunk(fm, p, psize); |
| check_free_chunk(fm, p); |
| } |
| else { |
| tchunkptr tp = (tchunkptr)p; |
| insert_large_chunk(fm, tp, psize); |
| check_free_chunk(fm, p); |
| if (--fm->release_checks == 0) |
| release_unused_segments(fm); |
| } |
| goto postaction; |
| } |
| } |
| erroraction: |
| USAGE_ERROR_ACTION(fm, p); |
| postaction: |
| POSTACTION(fm); |
| } |
| } |
| #if !FOOTERS |
| #undef fm |
| #endif /* FOOTERS */ |
| } |
| |
| void* dlcalloc(size_t n_elements, size_t elem_size) { |
| void* mem; |
| size_t req = 0; |
| if (n_elements != 0) { |
| req = n_elements * elem_size; |
| if (((n_elements | elem_size) & ~(size_t)0xffff) && |
| (req / n_elements != elem_size)) |
| req = MAX_SIZE_T; /* force downstream failure on overflow */ |
| } |
| mem = dlmalloc(req); |
| if (mem != 0 && calloc_must_clear(mem2chunk(mem))) |
| memset(mem, 0, req); |
| return mem; |
| } |
| |
| #endif /* !ONLY_MSPACES */ |
| |
| /* ------------ Internal support for realloc, memalign, etc -------------- */ |
| |
| /* Try to realloc; only in-place unless can_move true */ |
| static __clib_nosanitize_addr mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, |
| int can_move) { |
| mchunkptr newp = 0; |
| size_t oldsize = chunksize(p); |
| mchunkptr next = chunk_plus_offset(p, oldsize); |
| if (RTCHECK(ok_address(m, p) && ok_inuse(p) && |
| ok_next(p, next) && ok_pinuse(next))) { |
| if (is_mmapped(p)) { |
| newp = mmap_resize(m, p, nb, can_move); |
| } |
| else if (oldsize >= nb) { /* already big enough */ |
| size_t rsize = oldsize - nb; |
| if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ |
| mchunkptr r = chunk_plus_offset(p, nb); |
| set_inuse(m, p, nb); |
| set_inuse(m, r, rsize); |
| dispose_chunk(m, r, rsize); |
| } |
| newp = p; |
| } |
| else if (next == m->top) { /* extend into top */ |
| if (oldsize + m->topsize > nb) { |
| size_t newsize = oldsize + m->topsize; |
| size_t newtopsize = newsize - nb; |
| mchunkptr newtop = chunk_plus_offset(p, nb); |
| set_inuse(m, p, nb); |
| newtop->head = newtopsize |PINUSE_BIT; |
| m->top = newtop; |
| m->topsize = newtopsize; |
| newp = p; |
| } |
| } |
| else if (next == m->dv) { /* extend into dv */ |
| size_t dvs = m->dvsize; |
| if (oldsize + dvs >= nb) { |
| size_t dsize = oldsize + dvs - nb; |
| if (dsize >= MIN_CHUNK_SIZE) { |
| mchunkptr r = chunk_plus_offset(p, nb); |
| mchunkptr n = chunk_plus_offset(r, dsize); |
| set_inuse(m, p, nb); |
| set_size_and_pinuse_of_free_chunk(r, dsize); |
| clear_pinuse(n); |
| m->dvsize = dsize; |
| m->dv = r; |
| } |
| else { /* exhaust dv */ |
| size_t newsize = oldsize + dvs; |
| set_inuse(m, p, newsize); |
| m->dvsize = 0; |
| m->dv = 0; |
| } |
| newp = p; |
| } |
| } |
| else if (!cinuse(next)) { /* extend into next free chunk */ |
| size_t nextsize = chunksize(next); |
| if (oldsize + nextsize >= nb) { |
| size_t rsize = oldsize + nextsize - nb; |
| unlink_chunk(m, next, nextsize); |
| if (rsize < MIN_CHUNK_SIZE) { |
| size_t newsize = oldsize + nextsize; |
| set_inuse(m, p, newsize); |
| } |
| else { |
| mchunkptr r = chunk_plus_offset(p, nb); |
| set_inuse(m, p, nb); |
| set_inuse(m, r, rsize); |
| dispose_chunk(m, r, rsize); |
| } |
| newp = p; |
| } |
| } |
| } |
| else { |
| USAGE_ERROR_ACTION(m, chunk2mem(p)); |
| } |
| return newp; |
| } |
| |
| __clib_nosanitize_addr |
| static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { |
| void* mem = 0; |
| if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ |
| alignment = MIN_CHUNK_SIZE; |
| if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ |
| size_t a = MALLOC_ALIGNMENT << 1; |
| while (a < alignment) a <<= 1; |
| alignment = a; |
| } |
| if (bytes >= MAX_REQUEST - alignment) { |
| if (m != 0) { /* Test isn't needed but avoids compiler warning */ |
| MALLOC_FAILURE_ACTION; |
| } |
| } |
| else { |
| size_t nb = request2size(bytes); |
| size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; |
| mem = internal_malloc(m, req); |
| if (mem != 0) { |
| mchunkptr p = mem2chunk(mem); |
| if (PREACTION(m)) |
| return 0; |
| if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ |
| /* |
| Find an aligned spot inside chunk. Since we need to give |
| back leading space in a chunk of at least MIN_CHUNK_SIZE, if |
| the first calculation places us at a spot with less than |
| MIN_CHUNK_SIZE leader, we can move to the next aligned spot. |
| We've allocated enough total room so that this is always |
| possible. |
| */ |
| char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment - |
| SIZE_T_ONE)) & |
| -alignment)); |
| char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? |
| br : br+alignment; |
| mchunkptr newp = (mchunkptr)pos; |
| size_t leadsize = pos - (char*)(p); |
| size_t newsize = chunksize(p) - leadsize; |
| |
| if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ |
| newp->prev_foot = p->prev_foot + leadsize; |
| newp->head = newsize; |
| } |
| else { /* Otherwise, give back leader, use the rest */ |
| set_inuse(m, newp, newsize); |
| set_inuse(m, p, leadsize); |
| dispose_chunk(m, p, leadsize); |
| } |
| p = newp; |
| } |
| |
| /* Give back spare room at the end */ |
| if (!is_mmapped(p)) { |
| size_t size = chunksize(p); |
| if (size > nb + MIN_CHUNK_SIZE) { |
| size_t remainder_size = size - nb; |
| mchunkptr remainder = chunk_plus_offset(p, nb); |
| set_inuse(m, p, nb); |
| set_inuse(m, remainder, remainder_size); |
| dispose_chunk(m, remainder, remainder_size); |
| } |
| } |
| |
| mem = chunk2mem(p); |
| assert (chunksize(p) >= nb); |
| assert(((size_t)mem & (alignment - 1)) == 0); |
| check_inuse_chunk(m, p); |
| POSTACTION(m); |
| } |
| } |
| return mem; |
| } |
| |
| /* |
| Common support for independent_X routines, handling |
| all of the combinations that can result. |
| The opts arg has: |
| bit 0 set if all elements are same size (using sizes[0]) |
| bit 1 set if elements should be zeroed |
| */ |
| static void** ialloc(mstate m, |
| size_t n_elements, |
| size_t* sizes, |
| int opts, |
| void* chunks[]) { |
| |
| size_t element_size; /* chunksize of each element, if all same */ |
| size_t contents_size; /* total size of elements */ |
| size_t array_size; /* request size of pointer array */ |
| void* mem; /* malloced aggregate space */ |
| mchunkptr p; /* corresponding chunk */ |
| size_t remainder_size; /* remaining bytes while splitting */ |
| void** marray; /* either "chunks" or malloced ptr array */ |
| mchunkptr array_chunk; /* chunk for malloced ptr array */ |
| flag_t was_enabled; /* to disable mmap */ |
| size_t size; |
| size_t i; |
| |
| ensure_initialization(); |
| /* compute array length, if needed */ |
| if (chunks != 0) { |
| if (n_elements == 0) |
| return chunks; /* nothing to do */ |
| marray = chunks; |
| array_size = 0; |
| } |
| else { |
| /* if empty req, must still return chunk representing empty array */ |
| if (n_elements == 0) |
| return (void**)internal_malloc(m, 0); |
| marray = 0; |
| array_size = request2size(n_elements * (sizeof(void*))); |
| } |
| |
| /* compute total element size */ |
| if (opts & 0x1) { /* all-same-size */ |
| element_size = request2size(*sizes); |
| contents_size = n_elements * element_size; |
| } |
| else { /* add up all the sizes */ |
| element_size = 0; |
| contents_size = 0; |
| for (i = 0; i != n_elements; ++i) |
| contents_size += request2size(sizes[i]); |
| } |
| |
| size = contents_size + array_size; |
| |
| /* |
| Allocate the aggregate chunk. First disable direct-mmapping so |
| malloc won't use it, since we would not be able to later |
| free/realloc space internal to a segregated mmap region. |
| */ |
| was_enabled = use_mmap(m); |
| disable_mmap(m); |
| mem = internal_malloc(m, size - CHUNK_OVERHEAD); |
| if (was_enabled) |
| enable_mmap(m); |
| if (mem == 0) |
| return 0; |
| |
| if (PREACTION(m)) return 0; |
| p = mem2chunk(mem); |
| remainder_size = chunksize(p); |
| |
| assert(!is_mmapped(p)); |
| |
| if (opts & 0x2) { /* optionally clear the elements */ |
| memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); |
| } |
| |
| /* If not provided, allocate the pointer array as final part of chunk */ |
| if (marray == 0) { |
| size_t array_chunk_size; |
| array_chunk = chunk_plus_offset(p, contents_size); |
| array_chunk_size = remainder_size - contents_size; |
| marray = (void**) (chunk2mem(array_chunk)); |
| set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); |
| remainder_size = contents_size; |
| } |
| |
| /* split out elements */ |
| for (i = 0; ; ++i) { |
| marray[i] = chunk2mem(p); |
| if (i != n_elements-1) { |
| if (element_size != 0) |
| size = element_size; |
| else |
| size = request2size(sizes[i]); |
| remainder_size -= size; |
| set_size_and_pinuse_of_inuse_chunk(m, p, size); |
| p = chunk_plus_offset(p, size); |
| } |
| else { /* the final element absorbs any overallocation slop */ |
| set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); |
| break; |
| } |
| } |
| |
| #if DEBUG |
| if (marray != chunks) { |
| /* final element must have exactly exhausted chunk */ |
| if (element_size != 0) { |
| assert(remainder_size == element_size); |
| } |
| else { |
| assert(remainder_size == request2size(sizes[i])); |
| } |
| check_inuse_chunk(m, mem2chunk(marray)); |
| } |
| for (i = 0; i != n_elements; ++i) |
| check_inuse_chunk(m, mem2chunk(marray[i])); |
| |
| #endif /* DEBUG */ |
| |
| POSTACTION(m); |
| return marray; |
| } |
| |
| /* Try to free all pointers in the given array. |
| Note: this could be made faster, by delaying consolidation, |
| at the price of disabling some user integrity checks, We |
| still optimize some consolidations by combining adjacent |
| chunks before freeing, which will occur often if allocated |
| with ialloc or the array is sorted. |
| */ |
| static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) { |
| size_t unfreed = 0; |
| if (!PREACTION(m)) { |
| void** a; |
| void** fence = &(array[nelem]); |
| for (a = array; a != fence; ++a) { |
| void* mem = *a; |
| if (mem != 0) { |
| mchunkptr p = mem2chunk(mem); |
| size_t psize = chunksize(p); |
| #if FOOTERS |
| if (get_mstate_for(p) != m) { |
| ++unfreed; |
| continue; |
| } |
| #endif |
| check_inuse_chunk(m, p); |
| *a = 0; |
| if (RTCHECK(ok_address(m, p) && ok_inuse(p))) { |
| void ** b = a + 1; /* try to merge with next chunk */ |
| mchunkptr next = next_chunk(p); |
| if (b != fence && *b == chunk2mem(next)) { |
| size_t newsize = chunksize(next) + psize; |
| set_inuse(m, p, newsize); |
| *b = chunk2mem(p); |
| } |
| else |
| dispose_chunk(m, p, psize); |
| } |
| else { |
| CORRUPTION_ERROR_ACTION(m); |
| break; |
| } |
| } |
| } |
| if (should_trim(m, m->topsize)) |
| sys_trim(m, 0); |
| POSTACTION(m); |
| } |
| return unfreed; |
| } |
| |
| /* Traversal */ |
| #if MALLOC_INSPECT_ALL |
| static void internal_inspect_all(mstate m, |
| void(*handler)(void *start, |
| void *end, |
| size_t used_bytes, |
| void* callback_arg), |
| void* arg) { |
| if (is_initialized(m)) { |
| mchunkptr top = m->top; |
| msegmentptr s; |
| for (s = &m->seg; s != 0; s = s->next) { |
| mchunkptr q = align_as_chunk(s->base); |
| while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) { |
| mchunkptr next = next_chunk(q); |
| size_t sz = chunksize(q); |
| size_t used; |
| void* start; |
| if (is_inuse(q)) { |
| used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ |
| start = chunk2mem(q); |
| } |
| else { |
| used = 0; |
| if (is_small(sz)) { /* offset by possible bookkeeping */ |
| start = (void*)((char*)q + sizeof(struct malloc_chunk)); |
| } |
| else { |
| start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); |
| } |
| } |
| if (start < (void*)next) /* skip if all space is bookkeeping */ |
| handler(start, next, used, arg); |
| if (q == top) |
| break; |
| q = next; |
| } |
| } |
| } |
| } |
| #endif /* MALLOC_INSPECT_ALL */ |
| |
| /* ------------------ Exported realloc, memalign, etc -------------------- */ |
| |
| #if !ONLY_MSPACES |
| |
| void* dlrealloc(void* oldmem, size_t bytes) { |
| void* mem = 0; |
| if (oldmem == 0) { |
| mem = dlmalloc(bytes); |
| } |
| else if (bytes >= MAX_REQUEST) { |
| MALLOC_FAILURE_ACTION; |
| } |
| #ifdef REALLOC_ZERO_BYTES_FREES |
| else if (bytes == 0) { |
| dlfree(oldmem); |
| } |
| #endif /* REALLOC_ZERO_BYTES_FREES */ |
| else { |
| size_t nb = request2size(bytes); |
| mchunkptr oldp = mem2chunk(oldmem); |
| #if ! FOOTERS |
| mstate m = gm; |
| #else /* FOOTERS */ |
| mstate m = get_mstate_for(oldp); |
| if (!ok_magic(m)) { |
| USAGE_ERROR_ACTION(m, oldmem); |
| return 0; |
| } |
| #endif /* FOOTERS */ |
| if (!PREACTION(m)) { |
| mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); |
| POSTACTION(m); |
| if (newp != 0) { |
| check_inuse_chunk(m, newp); |
| mem = chunk2mem(newp); |
| } |
| else { |
| mem = internal_malloc(m, bytes); |
| if (mem != 0) { |
| size_t oc = chunksize(oldp) - overhead_for(oldp); |
| memcpy(mem, oldmem, (oc < bytes)? oc : bytes); |
| internal_free(m, oldmem); |
| } |
| } |
| } |
| } |
| return mem; |
| } |
| |
| void* dlrealloc_in_place(void* oldmem, size_t bytes) { |
| void* mem = 0; |
| if (oldmem != 0) { |
| if (bytes >= MAX_REQUEST) { |
| MALLOC_FAILURE_ACTION; |
| } |
| else { |
| size_t nb = request2size(bytes); |
| mchunkptr oldp = mem2chunk(oldmem); |
| #if ! FOOTERS |
| mstate m = gm; |
| #else /* FOOTERS */ |
| mstate m = get_mstate_for(oldp); |
| if (!ok_magic(m)) { |
| USAGE_ERROR_ACTION(m, oldmem); |
| return 0; |
| } |
| #endif /* FOOTERS */ |
| if (!PREACTION(m)) { |
| mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); |
| POSTACTION(m); |
| if (newp == oldp) { |
| check_inuse_chunk(m, newp); |
| mem = oldmem; |
| } |
| } |
| } |
| } |
| return mem; |
| } |
| |
| void* dlmemalign(size_t alignment, size_t bytes) { |
| if (alignment <= MALLOC_ALIGNMENT) { |
| return dlmalloc(bytes); |
| } |
| return internal_memalign(gm, alignment, bytes); |
| } |
| |
| int dlposix_memalign(void** pp, size_t alignment, size_t bytes) { |
| void* mem = 0; |
| if (alignment == MALLOC_ALIGNMENT) |
| mem = dlmalloc(bytes); |
| else { |
| size_t d = alignment / sizeof(void*); |
| size_t r = alignment % sizeof(void*); |
| if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0) |
| return EINVAL; |
| else if (bytes <= MAX_REQUEST - alignment) { |
| if (alignment < MIN_CHUNK_SIZE) |
| alignment = MIN_CHUNK_SIZE; |
| mem = internal_memalign(gm, alignment, bytes); |
| } |
| } |
| if (mem == 0) |
| return ENOMEM; |
| else { |
| *pp = mem; |
| return 0; |
| } |
| } |
| |
| void* dlvalloc(size_t bytes) { |
| size_t pagesz; |
| ensure_initialization(); |
| pagesz = mparams.page_size; |
| return dlmemalign(pagesz, bytes); |
| } |
| |
| void* dlpvalloc(size_t bytes) { |
| size_t pagesz; |
| ensure_initialization(); |
| pagesz = mparams.page_size; |
| return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); |
| } |
| |
| void** dlindependent_calloc(size_t n_elements, size_t elem_size, |
| void* chunks[]) { |
| size_t sz = elem_size; /* serves as 1-element array */ |
| return ialloc(gm, n_elements, &sz, 3, chunks); |
| } |
| |
| void** dlindependent_comalloc(size_t n_elements, size_t sizes[], |
| void* chunks[]) { |
| return ialloc(gm, n_elements, sizes, 0, chunks); |
| } |
| |
| size_t dlbulk_free(void* array[], size_t nelem) { |
| return internal_bulk_free(gm, array, nelem); |
| } |
| |
| #if MALLOC_INSPECT_ALL |
| void dlmalloc_inspect_all(void(*handler)(void *start, |
| void *end, |
| size_t used_bytes, |
| void* callback_arg), |
| void* arg) { |
| ensure_initialization(); |
| if (!PREACTION(gm)) { |
| internal_inspect_all(gm, handler, arg); |
| POSTACTION(gm); |
| } |
| } |
| #endif /* MALLOC_INSPECT_ALL */ |
| |
| int dlmalloc_trim(size_t pad) { |
| int result = 0; |
| ensure_initialization(); |
| if (!PREACTION(gm)) { |
| result = sys_trim(gm, pad); |
| POSTACTION(gm); |
| } |
| return result; |
| } |
| |
| size_t dlmalloc_footprint(void) { |
| return gm->footprint; |
| } |
| |
| size_t dlmalloc_max_footprint(void) { |
| return gm->max_footprint; |
| } |
| |
| size_t dlmalloc_footprint_limit(void) { |
| size_t maf = gm->footprint_limit; |
| return maf == 0 ? MAX_SIZE_T : maf; |
| } |
| |
| size_t dlmalloc_set_footprint_limit(size_t bytes) { |
| size_t result; /* invert sense of 0 */ |
| if (bytes == 0) |
| result = granularity_align(1); /* Use minimal size */ |
| if (bytes == MAX_SIZE_T) |
| result = 0; /* disable */ |
| else |
| result = granularity_align(bytes); |
| return gm->footprint_limit = result; |
| } |
| |
| #if !NO_MALLINFO |
| struct dlmallinfo dlmallinfo(void) { |
| return internal_mallinfo(gm); |
| } |
| #endif /* NO_MALLINFO */ |
| |
| #if !NO_MALLOC_STATS |
| void dlmalloc_stats() { |
| internal_malloc_stats(gm); |
| } |
| #endif /* NO_MALLOC_STATS */ |
| |
| int dlmallopt(int param_number, int value) { |
| return change_mparam(param_number, value); |
| } |
| |
| size_t dlmalloc_usable_size(void* mem) { |
| if (mem != 0) { |
| mchunkptr p = mem2chunk(mem); |
| if (is_inuse(p)) |
| return chunksize(p) - overhead_for(p); |
| } |
| return 0; |
| } |
| |
| #endif /* !ONLY_MSPACES */ |
| |
| /* ----------------------------- user mspaces ---------------------------- */ |
| |
| #if MSPACES |
| |
| static mstate init_user_mstate(char* tbase, size_t tsize) { |
| size_t msize = pad_request(sizeof(struct malloc_state)); |
| mchunkptr mn; |
| mchunkptr msp = align_as_chunk(tbase); |
| mstate m = (mstate)(chunk2mem(msp)); |
| memset(m, 0, msize); |
| (void)INITIAL_LOCK(&m->mutex); |
| msp->head = (msize|INUSE_BITS); |
| m->seg.base = m->least_addr = tbase; |
| m->seg.size = m->footprint = m->max_footprint = tsize; |
| m->magic = mparams.magic; |
| m->release_checks = MAX_RELEASE_CHECK_RATE; |
| m->mflags = mparams.default_mflags; |
| m->extp = 0; |
| m->exts = 0; |
| disable_contiguous(m); |
| init_bins(m); |
| mn = next_chunk(mem2chunk(m)); |
| init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); |
| check_top_chunk(m, m->top); |
| return m; |
| } |
| |
| mspace create_mspace(size_t capacity, int locked) { |
| mstate m = 0; |
| size_t msize; |
| ensure_initialization(); |
| msize = pad_request(sizeof(struct malloc_state)); |
| if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { |
| size_t rs = ((capacity == 0)? mparams.granularity : |
| (capacity + TOP_FOOT_SIZE + msize)); |
| size_t tsize = granularity_align(rs); |
| char* tbase = (char*)(CALL_MMAP(tsize)); |
| if (tbase != CMFAIL) { |
| m = init_user_mstate(tbase, tsize); |
| m->seg.sflags = USE_MMAP_BIT; |
| set_lock(m, locked); |
| } |
| } |
| return (mspace)m; |
| } |
| |
| mspace create_mspace_with_base(void* base, size_t capacity, int locked) { |
| mstate m = 0; |
| size_t msize; |
| ensure_initialization(); |
| msize = pad_request(sizeof(struct malloc_state)); |
| if (capacity > msize + TOP_FOOT_SIZE && |
| capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { |
| m = init_user_mstate((char*)base, capacity); |
| m->seg.sflags = EXTERN_BIT; |
| set_lock(m, locked); |
| } |
| return (mspace)m; |
| } |
| |
| int mspace_track_large_chunks(mspace msp, int enable) { |
| int ret = 0; |
| mstate ms = (mstate)msp; |
| if (!PREACTION(ms)) { |
| if (!use_mmap(ms)) { |
| ret = 1; |
| } |
| if (!enable) { |
| enable_mmap(ms); |
| } else { |
| disable_mmap(ms); |
| } |
| POSTACTION(ms); |
| } |
| return ret; |
| } |
| |
| __clib_nosanitize_addr |
| size_t destroy_mspace(mspace msp) { |
| size_t freed = 0; |
| mstate ms = (mstate)msp; |
| if (ok_magic(ms)) { |
| msegmentptr sp = &ms->seg; |
| (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ |
| while (sp != 0) { |
| char* base = sp->base; |
| size_t size = sp->size; |
| flag_t flag = sp->sflags; |
| (void)base; /* placate people compiling -Wunused-variable */ |
| sp = sp->next; |
| if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && |
| CALL_MUNMAP(base, size) == 0) |
| freed += size; |
| } |
| } |
| else { |
| USAGE_ERROR_ACTION(ms,ms); |
| } |
| return freed; |
| } |
| |
| void mspace_get_address_and_size (mspace msp, char **addrp, size_t *sizep) |
| { |
| mstate ms; |
| msegment *this_seg; |
| |
| ms = (mstate)msp; |
| this_seg = &ms->seg; |
| |
| *addrp = this_seg->base; |
| *sizep = this_seg->size; |
| } |
| |
| __clib_nosanitize_addr |
| int mspace_is_heap_object (mspace msp, void *p) |
| { |
| msegment *this_seg; |
| char *pp, *base; |
| mstate ms; |
| |
| ms = (mstate)msp; |
| |
| this_seg = &ms->seg; |
| pp = (char *) p; |
| |
| while (this_seg) |
| { |
| base = this_seg->base; |
| if (pp >= base && pp < (base + this_seg->size)) |
| return 1; |
| this_seg = this_seg->next; |
| } |
| |
| if (pp > ms->least_addr && pp <= ms->least_addr + ms->footprint) |
| return 1; |
| |
| return 0; |
| } |
| |
| __clib_nosanitize_addr |
| void *mspace_least_addr (mspace msp) |
| { |
| mstate ms = (mstate) msp; |
| return (void *) ms->least_addr; |
| } |
| |
| void mspace_disable_expand (mspace msp) |
| { |
| mstate ms = (mstate)msp; |
| |
| disable_expand (ms); |
| } |
| |
| __clib_nosanitize_addr |
| int mspace_enable_disable_trace (mspace msp, int enable) |
| { |
| mstate ms = (mstate)msp; |
| int was_enabled = 0; |
| |
| if (use_trace(ms)) |
| was_enabled = 1; |
| |
| if (enable) |
| enable_trace (ms); |
| else |
| disable_trace (ms); |
| |
| return (was_enabled); |
| } |
| |
| __clib_nosanitize_addr |
| int mspace_is_traced (mspace msp) |
| { |
| mstate ms = (mstate)msp; |
| |
| if (use_trace(ms)) |
| return 1; |
| return 0; |
| } |
| |
| __clib_nosanitize_addr |
| void* mspace_get_aligned (mspace msp, |
| unsigned long n_user_data_bytes, |
| unsigned long align, |
| unsigned long align_offset) { |
| char *rv; |
| unsigned long searchp; |
| unsigned *wwp; /* "where's Waldo" pointer */ |
| mstate ms = (mstate)msp; |
| |
| /* |
| * Allocate space for the "Where's Waldo?" pointer |
| * the base of the dlmalloc object |
| */ |
| n_user_data_bytes += sizeof(unsigned); |
| align = align < MALLOC_ALIGNMENT ? MALLOC_ALIGNMENT : align; |
| |
| /* |
| * Alignment requests greater than 4K must be at offset zero, |
| * and must be freed using mspace_free_no_offset - or never freed - |
| * since the "Where's Waldo?" pointer would waste too much space. |
| * |
| * Waldo is the address of the chunk of memory returned by mspace_malloc, |
| * which we need later to call mspace_free... |
| */ |
| if (align > 4<<10 || align_offset == ~0UL) { |
| n_user_data_bytes -= sizeof(unsigned); |
| assert(align_offset == 0); |
| rv = internal_memalign(ms, (size_t)align, n_user_data_bytes); |
| |
| /* Trace the allocation */ |
| if (rv && use_trace(ms)) { |
| mchunkptr p = mem2chunk(rv); |
| size_t psize = chunksize(p); |
| mheap_get_trace ((unsigned long)rv, psize); |
| } |
| return rv; |
| } |
| |
| align = clib_max (align, MALLOC_ALIGNMENT); |
| align = max_pow2 (align); |
| |
| /* Correct align offset to be smaller than alignment. */ |
| align_offset &= (align - 1); |
| |
| n_user_data_bytes += align; |
| rv = mspace_malloc (msp, n_user_data_bytes); |
| |
| if (rv == 0) |
| return rv; |
| |
| /* Honor the alignment request */ |
| searchp = (unsigned long)(rv + sizeof (unsigned)); |
| |
| #if 0 /* this is the idea... */ |
| while ((searchp + align_offset) % align) |
| searchp++; |
| #endif |
| |
| { |
| unsigned long where_now, delta; |
| |
| where_now = (searchp + align_offset) % align; |
| delta = align - where_now; |
| |
| searchp += delta; |
| } |
| |
| wwp = (unsigned *)(searchp - sizeof(unsigned)); |
| *wwp = (searchp - (((unsigned long) rv) + sizeof (*wwp))); |
| assert (*wwp < align); |
| |
| if (use_trace(ms)) { |
| mchunkptr p = mem2chunk(rv); |
| size_t psize = chunksize(p); |
| mheap_get_trace (searchp, psize); |
| } |
| return (void *) searchp; |
| } |
| |
| __clib_nosanitize_addr |
| void mspace_put (mspace msp, void *p_arg) |
| { |
| char *object_header; |
| unsigned *wwp; |
| mstate ms = (mstate)msp; |
| |
| /* Find the object header delta */ |
| wwp = (unsigned *)p_arg; |
| wwp --; |
| |
| /* Recover the dlmalloc object pointer */ |
| object_header = (char *)wwp; |
| object_header -= *wwp; |
| |
| /* Tracing (if enabled) */ |
| if (use_trace(ms)) |
| { |
| mchunkptr p = mem2chunk(object_header); |
| size_t psize = chunksize(p); |
| |
| mheap_put_trace ((unsigned long)p_arg, psize); |
| } |
| |
| #if CLIB_DEBUG > 0 && !defined(CLIB_SANITIZE_ADDR) |
| /* Poison the object */ |
| { |
| size_t psize = mspace_usable_size (object_header); |
| memset (object_header, 0x13, psize); |
| } |
| #endif |
| |
| /* And free it... */ |
| mspace_free (msp, object_header); |
| } |
| |
| void mspace_put_no_offset (mspace msp, void *p_arg) |
| { |
| mstate ms = (mstate)msp; |
| |
| if (use_trace(ms)) |
| { |
| mchunkptr p = mem2chunk(p_arg); |
| size_t psize = chunksize(p); |
| |
| mheap_put_trace ((unsigned long)p_arg, psize); |
| } |
| mspace_free (msp, p_arg); |
| } |
| |
| __clib_nosanitize_addr |
| size_t mspace_usable_size_with_delta (const void *p) |
| { |
| size_t usable_size; |
| char *object_header; |
| unsigned *wwp; |
| |
| /* Find the object header delta */ |
| wwp = (unsigned *)p; |
| wwp --; |
| |
| /* Recover the dlmalloc object pointer */ |
| object_header = (char *)wwp; |
| object_header -= *wwp; |
| |
| usable_size = mspace_usable_size (object_header); |
| /* account for the offset and the size of the offset... */ |
| usable_size -= (*wwp + sizeof (*wwp)); |
| return usable_size; |
| } |
| |
| /* |
| mspace versions of routines are near-clones of the global |
| versions. This is not so nice but better than the alternatives. |
| */ |
| |
| __clib_nosanitize_addr |
| void* mspace_malloc(mspace msp, size_t bytes) { |
| mstate ms = (mstate)msp; |
| if (!ok_magic(ms)) { |
| USAGE_ERROR_ACTION(ms,ms); |
| return 0; |
| } |
| if (!PREACTION(ms)) { |
| void* mem; |
| size_t nb; |
| if (bytes <= MAX_SMALL_REQUEST) { |
| bindex_t idx; |
| binmap_t smallbits; |
| nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); |
| idx = small_index(nb); |
| smallbits = ms->smallmap >> idx; |
| |
| if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ |
| mchunkptr b, p; |
| idx += ~smallbits & 1; /* Uses next bin if idx empty */ |
| b = smallbin_at(ms, idx); |
| p = b->fd; |
| assert(chunksize(p) == small_index2size(idx)); |
| unlink_first_small_chunk(ms, b, p, idx); |
| set_inuse_and_pinuse(ms, p, small_index2size(idx)); |
| mem = chunk2mem(p); |
| check_malloced_chunk(ms, mem, nb); |
| goto postaction; |
| } |
| |
| else if (nb > ms->dvsize) { |
| if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ |
| mchunkptr b, p, r; |
| size_t rsize; |
| bindex_t i; |
| binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); |
| binmap_t leastbit = least_bit(leftbits); |
| compute_bit2idx(leastbit, i); |
| b = smallbin_at(ms, i); |
| p = b->fd; |
| assert(chunksize(p) == small_index2size(i)); |
| unlink_first_small_chunk(ms, b, p, i); |
| rsize = small_index2size(i) - nb; |
| /* Fit here cannot be remainderless if 4byte sizes */ |
| if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) |
| set_inuse_and_pinuse(ms, p, small_index2size(i)); |
| else { |
| set_size_and_pinuse_of_inuse_chunk(ms, p, nb); |
| r = chunk_plus_offset(p, nb); |
| set_size_and_pinuse_of_free_chunk(r, rsize); |
| replace_dv(ms, r, rsize); |
| } |
| mem = chunk2mem(p); |
| check_malloced_chunk(ms, mem, nb); |
| goto postaction; |
| } |
| |
| else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { |
| check_malloced_chunk(ms, mem, nb); |
| goto postaction; |
| } |
| } |
| } |
| else if (bytes >= MAX_REQUEST) |
| nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ |
| else { |
| nb = pad_request(bytes); |
| if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { |
| check_malloced_chunk(ms, mem, nb); |
| goto postaction; |
| } |
| } |
| |
| if (nb <= ms->dvsize) { |
| size_t rsize = ms->dvsize - nb; |
| mchunkptr p = ms->dv; |
| if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ |
| mchunkptr r = ms->dv = chunk_plus_offset(p, nb); |
| ms->dvsize = rsize; |
| set_size_and_pinuse_of_free_chunk(r, rsize); |
| set_size_and_pinuse_of_inuse_chunk(ms, p, nb); |
| } |
| else { /* exhaust dv */ |
| size_t dvs = ms->dvsize; |
| ms->dvsize = 0; |
| ms->dv = 0; |
| set_inuse_and_pinuse(ms, p, dvs); |
| } |
| mem = chunk2mem(p); |
| check_malloced_chunk(ms, mem, nb); |
| goto postaction; |
| } |
| |
| else if (nb < ms->topsize) { /* Split top */ |
| size_t rsize = ms->topsize -= nb; |
| mchunkptr p = ms->top; |
| mchunkptr r = ms->top = chunk_plus_offset(p, nb); |
| r->head = rsize | PINUSE_BIT; |
| set_size_and_pinuse_of_inuse_chunk(ms, p, nb); |
| mem = chunk2mem(p); |
| check_top_chunk(ms, ms->top); |
| check_malloced_chunk(ms, mem, nb); |
| goto postaction; |
| } |
| |
| mem = sys_alloc(ms, nb); |
| |
| postaction: |
| POSTACTION(ms); |
| return mem; |
| } |
| |
| return 0; |
| } |
| |
| __clib_nosanitize_addr |
| void mspace_free(mspace msp, void* mem) { |
| if (mem != 0) { |
| mchunkptr p = mem2chunk(mem); |
| #if FOOTERS |
| mstate fm = get_mstate_for(p); |
| (void)msp; /* placate people compiling -Wunused */ |
| #else /* FOOTERS */ |
| mstate fm = (mstate)msp; |
| #endif /* FOOTERS */ |
| if (!ok_magic(fm)) { |
| USAGE_ERROR_ACTION(fm, p); |
| return; |
| } |
| if (!PREACTION(fm)) { |
| check_inuse_chunk(fm, p); |
| if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { |
| size_t psize = chunksize(p); |
| mchunkptr next = chunk_plus_offset(p, psize); |
| if (!pinuse(p)) { |
| size_t prevsize = p->prev_foot; |
| if (is_mmapped(p)) { |
| psize += prevsize + MMAP_FOOT_PAD; |
| if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) |
| fm->footprint -= psize; |
| goto postaction; |
| } |
| else { |
| mchunkptr prev = chunk_minus_offset(p, prevsize); |
| psize += prevsize; |
| p = prev; |
| if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ |
| if (p != fm->dv) { |
| unlink_chunk(fm, p, prevsize); |
| } |
| else if ((next->head & INUSE_BITS) == INUSE_BITS) { |
| fm->dvsize = psize; |
| set_free_with_pinuse(p, psize, next); |
| goto postaction; |
| } |
| } |
| else |
| goto erroraction; |
| } |
| } |
| |
| if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { |
| if (!cinuse(next)) { /* consolidate forward */ |
| if (next == fm->top) { |
| size_t tsize = fm->topsize += psize; |
| fm->top = p; |
| p->head = tsize | PINUSE_BIT; |
| if (p == fm->dv) { |
| fm->dv = 0; |
| fm->dvsize = 0; |
| } |
| if (should_trim(fm, tsize)) |
| sys_trim(fm, 0); |
| goto postaction; |
| } |
| else if (next == fm->dv) { |
| size_t dsize = fm->dvsize += psize; |
| fm->dv = p; |
| set_size_and_pinuse_of_free_chunk(p, dsize); |
| goto postaction; |
| } |
| else { |
| size_t nsize = chunksize(next); |
| psize += nsize; |
| unlink_chunk(fm, next, nsize); |
| set_size_and_pinuse_of_free_chunk(p, psize); |
| if (p == fm->dv) { |
| fm->dvsize = psize; |
| goto postaction; |
| } |
| } |
| } |
| else |
| set_free_with_pinuse(p, psize, next); |
| |
| if (is_small(psize)) { |
| insert_small_chunk(fm, p, psize); |
| check_free_chunk(fm, p); |
| } |
| else { |
| tchunkptr tp = (tchunkptr)p; |
| insert_large_chunk(fm, tp, psize); |
| check_free_chunk(fm, p); |
| if (--fm->release_checks == 0) |
| release_unused_segments(fm); |
| } |
| goto postaction; |
| } |
| } |
| erroraction: |
| USAGE_ERROR_ACTION(fm, p); |
| postaction: |
| POSTACTION(fm); |
| } |
| } |
| } |
| |
| void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { |
| void* mem; |
| size_t req = 0; |
| mstate ms = (mstate)msp; |
| if (!ok_magic(ms)) { |
| USAGE_ERROR_ACTION(ms,ms); |
| return 0; |
| } |
| if (n_elements != 0) { |
| req = n_elements * elem_size; |
| if (((n_elements | elem_size) & ~(size_t)0xffff) && |
| (req / n_elements != elem_size)) |
| req = MAX_SIZE_T; /* force downstream failure on overflow */ |
| } |
| mem = internal_malloc(ms, req); |
| if (mem != 0 && calloc_must_clear(mem2chunk(mem))) |
| memset(mem, 0, req); |
| return mem; |
| } |
| |
| void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { |
| void* mem = 0; |
| if (oldmem == 0) { |
| mem = mspace_malloc(msp, bytes); |
| } |
| else if (bytes >= MAX_REQUEST) { |
| MALLOC_FAILURE_ACTION; |
| } |
| #ifdef REALLOC_ZERO_BYTES_FREES |
| else if (bytes == 0) { |
| mspace_free(msp, oldmem); |
| } |
| #endif /* REALLOC_ZERO_BYTES_FREES */ |
| else { |
| size_t nb = request2size(bytes); |
| mchunkptr oldp = mem2chunk(oldmem); |
| #if ! FOOTERS |
| mstate m = (mstate)msp; |
| #else /* FOOTERS */ |
| mstate m = get_mstate_for(oldp); |
| if (!ok_magic(m)) { |
| USAGE_ERROR_ACTION(m, oldmem); |
| return 0; |
| } |
| #endif /* FOOTERS */ |
| if (!PREACTION(m)) { |
| mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); |
| POSTACTION(m); |
| if (newp != 0) { |
| check_inuse_chunk(m, newp); |
| mem = chunk2mem(newp); |
| } |
| else { |
| mem = mspace_malloc(m, bytes); |
| if (mem != 0) { |
| size_t oc = chunksize(oldp) - overhead_for(oldp); |
| memcpy(mem, oldmem, (oc < bytes)? oc : bytes); |
| mspace_free(m, oldmem); |
| } |
| } |
| } |
| } |
| return mem; |
| } |
| |
| __clib_nosanitize_addr |
| void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) { |
| void* mem = 0; |
| if (oldmem != 0) { |
| if (bytes >= MAX_REQUEST) { |
| MALLOC_FAILURE_ACTION; |
| } |
| else { |
| size_t nb = request2size(bytes); |
| mchunkptr oldp = mem2chunk(oldmem); |
| #if ! FOOTERS |
| mstate m = (mstate)msp; |
| #else /* FOOTERS */ |
| mstate m = get_mstate_for(oldp); |
| (void)msp; /* placate people compiling -Wunused */ |
| if (!ok_magic(m)) { |
| USAGE_ERROR_ACTION(m, oldmem); |
| return 0; |
| } |
| #endif /* FOOTERS */ |
| if (!PREACTION(m)) { |
| mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); |
| POSTACTION(m); |
| if (newp == oldp) { |
| check_inuse_chunk(m, newp); |
| mem = oldmem; |
| } |
| } |
| } |
| } |
| return mem; |
| } |
| |
| __clib_nosanitize_addr |
| void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { |
| mstate ms = (mstate)msp; |
| if (!ok_magic(ms)) { |
| USAGE_ERROR_ACTION(ms,ms); |
| return 0; |
| } |
| if (alignment <= MALLOC_ALIGNMENT) |
| return mspace_malloc(msp, bytes); |
| return internal_memalign(ms, alignment, bytes); |
| } |
| |
| void** mspace_independent_calloc(mspace msp, size_t n_elements, |
| size_t elem_size, void* chunks[]) { |
| size_t sz = elem_size; /* serves as 1-element array */ |
| mstate ms = (mstate)msp; |
| if (!ok_magic(ms)) { |
| USAGE_ERROR_ACTION(ms,ms); |
| return 0; |
| } |
| return ialloc(ms, n_elements, &sz, 3, chunks); |
| } |
| |
| void** mspace_independent_comalloc(mspace msp, size_t n_elements, |
| size_t sizes[], void* chunks[]) { |
| mstate ms = (mstate)msp; |
| if (!ok_magic(ms)) { |
| USAGE_ERROR_ACTION(ms,ms); |
| return 0; |
| } |
| return ialloc(ms, n_elements, sizes, 0, chunks); |
| } |
| |
| size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) { |
| return internal_bulk_free((mstate)msp, array, nelem); |
| } |
| |
| #if MALLOC_INSPECT_ALL |
| void mspace_inspect_all(mspace msp, |
| void(*handler)(void *start, |
| void *end, |
| size_t used_bytes, |
| void* callback_arg), |
| void* arg) { |
| mstate ms = (mstate)msp; |
| if (ok_magic(ms)) { |
| if (!PREACTION(ms)) { |
| internal_inspect_all(ms, handler, arg); |
| POSTACTION(ms); |
| } |
| } |
| else { |
| USAGE_ERROR_ACTION(ms,ms); |
| } |
| } |
| #endif /* MALLOC_INSPECT_ALL */ |
| |
| int mspace_trim(mspace msp, size_t pad) { |
| int result = 0; |
| mstate ms = (mstate)msp; |
| if (ok_magic(ms)) { |
| if (!PREACTION(ms)) { |
| result = sys_trim(ms, pad); |
| POSTACTION(ms); |
| } |
| } |
| else { |
| USAGE_ERROR_ACTION(ms,ms); |
| } |
| return result; |
| } |
| |
| #if !NO_MALLOC_STATS |
| void mspace_malloc_stats(mspace msp) { |
| mstate ms = (mstate)msp; |
| if (ok_magic(ms)) { |
| internal_malloc_stats(ms); |
| } |
| else { |
| USAGE_ERROR_ACTION(ms,ms); |
| } |
| } |
| #endif /* NO_MALLOC_STATS */ |
| |
| size_t mspace_footprint(mspace msp) { |
| size_t result = 0; |
| mstate ms = (mstate)msp; |
| if (ok_magic(ms)) { |
| result = ms->footprint; |
| } |
| else { |
| USAGE_ERROR_ACTION(ms,ms); |
| } |
| return result; |
| } |
| |
| size_t mspace_max_footprint(mspace msp) { |
| size_t result = 0; |
| mstate ms = (mstate)msp; |
| if (ok_magic(ms)) { |
| result = ms->max_footprint; |
| } |
| else { |
| USAGE_ERROR_ACTION(ms,ms); |
| } |
| return result; |
| } |
| |
| size_t mspace_footprint_limit(mspace msp) { |
| size_t result = 0; |
| mstate ms = (mstate)msp; |
| if (ok_magic(ms)) { |
| size_t maf = ms->footprint_limit; |
| result = (maf == 0) ? MAX_SIZE_T : maf; |
| } |
| else { |
| USAGE_ERROR_ACTION(ms,ms); |
| } |
| return result; |
| } |
| |
| size_t mspace_set_footprint_limit(mspace msp, size_t bytes) { |
| size_t result = 0; |
| mstate ms = (mstate)msp; |
| if (ok_magic(ms)) { |
| if (bytes == 0) |
| result = granularity_align(1); /* Use minimal size */ |
| if (bytes == MAX_SIZE_T) |
| result = 0; /* disable */ |
| else |
| result = granularity_align(bytes); |
| ms->footprint_limit = result; |
| } |
| else { |
| USAGE_ERROR_ACTION(ms,ms); |
| } |
| return result; |
| } |
| |
| #if !NO_MALLINFO |
| __clib_nosanitize_addr |
| struct dlmallinfo mspace_mallinfo(mspace msp) { |
| mstate ms = (mstate)msp; |
| if (!ok_magic(ms)) { |
| USAGE_ERROR_ACTION(ms,ms); |
| } |
| return internal_mallinfo(ms); |
| } |
| #endif /* NO_MALLINFO */ |
| |
| __clib_nosanitize_addr |
| size_t mspace_usable_size(const void* mem) { |
| if (mem != 0) { |
| mchunkptr p = mem2chunk(mem); |
| if (is_inuse(p)) |
| return chunksize(p) - overhead_for(p); |
| } |
| return 0; |
| } |
| |
| int mspace_mallopt(int param_number, int value) { |
| return change_mparam(param_number, value); |
| } |
| |
| #endif /* MSPACES */ |
| |
| |
| /* -------------------- Alternative MORECORE functions ------------------- */ |
| |
| /* |
| Guidelines for creating a custom version of MORECORE: |
| |
| * For best performance, MORECORE should allocate in multiples of pagesize. |
| * MORECORE may allocate more memory than requested. (Or even less, |
| but this will usually result in a malloc failure.) |
| * MORECORE must not allocate memory when given argument zero, but |
| instead return one past the end address of memory from previous |
| nonzero call. |
| * For best performance, consecutive calls to MORECORE with positive |
| arguments should return increasing addresses, indicating that |
| space has been contiguously extended. |
| * Even though consecutive calls to MORECORE need not return contiguous |
| addresses, it must be OK for malloc'ed chunks to span multiple |
| regions in those cases where they do happen to be contiguous. |
| * MORECORE need not handle negative arguments -- it may instead |
| just return MFAIL when given negative arguments. |
| Negative arguments are always multiples of pagesize. MORECORE |
| must not misinterpret negative args as large positive unsigned |
| args. You can suppress all such calls from even occurring by defining |
| MORECORE_CANNOT_TRIM, |
| |
| As an example alternative MORECORE, here is a custom allocator |
| kindly contributed for pre-OSX macOS. It uses virtually but not |
| necessarily physically contiguous non-paged memory (locked in, |
| present and won't get swapped out). You can use it by uncommenting |
| this section, adding some #includes, and setting up the appropriate |
| defines above: |
| |
| #define MORECORE osMoreCore |
| |
| There is also a shutdown routine that should somehow be called for |
| cleanup upon program exit. |
| |
| #define MAX_POOL_ENTRIES 100 |
| #define MINIMUM_MORECORE_SIZE (64 * 1024U) |
| static int next_os_pool; |
| void *our_os_pools[MAX_POOL_ENTRIES]; |
| |
| void *osMoreCore(int size) |
| { |
| void *ptr = 0; |
| static void *sbrk_top = 0; |
| |
| if (size > 0) |
| { |
| if (size < MINIMUM_MORECORE_SIZE) |
| size = MINIMUM_MORECORE_SIZE; |
| if (CurrentExecutionLevel() == kTaskLevel) |
| ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); |
| if (ptr == 0) |
| { |
| return (void *) MFAIL; |
| } |
| // save ptrs so they can be freed during cleanup |
| our_os_pools[next_os_pool] = ptr; |
| next_os_pool++; |
| ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); |
| sbrk_top = (char *) ptr + size; |
| return ptr; |
| } |
| else if (size < 0) |
| { |
| // we don't currently support shrink behavior |
| return (void *) MFAIL; |
| } |
| else |
| { |
| return sbrk_top; |
| } |
| } |
| |
| // cleanup any allocated memory pools |
| // called as last thing before shutting down driver |
| |
| void osCleanupMem(void) |
| { |
| void **ptr; |
| |
| for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) |
| if (*ptr) |
| { |
| PoolDeallocate(*ptr); |
| *ptr = 0; |
| } |
| } |
| |
| */ |
| |
| |
| /* ----------------------------------------------------------------------- |
| History: |
| v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea |
| * fix bad comparison in dlposix_memalign |
| * don't reuse adjusted asize in sys_alloc |
| * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion |
| * reduce compiler warnings -- thanks to all who reported/suggested these |
| |
| v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee) |
| * Always perform unlink checks unless INSECURE |
| * Add posix_memalign. |
| * Improve realloc to expand in more cases; expose realloc_in_place. |
| Thanks to Peter Buhr for the suggestion. |
| * Add footprint_limit, inspect_all, bulk_free. Thanks |
| to Barry Hayes and others for the suggestions. |
| * Internal refactorings to avoid calls while holding locks |
| * Use non-reentrant locks by default. Thanks to Roland McGrath |
| for the suggestion. |
| * Small fixes to mspace_destroy, reset_on_error. |
| * Various configuration extensions/changes. Thanks |
| to all who contributed these. |
| |
| V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu) |
| * Update Creative Commons URL |
| |
| V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee) |
| * Use zeros instead of prev foot for is_mmapped |
| * Add mspace_track_large_chunks; thanks to Jean Brouwers |
| * Fix set_inuse in internal_realloc; thanks to Jean Brouwers |
| * Fix insufficient sys_alloc padding when using 16byte alignment |
| * Fix bad error check in mspace_footprint |
| * Adaptations for ptmalloc; thanks to Wolfram Gloger. |
| * Reentrant spin locks; thanks to Earl Chew and others |
| * Win32 improvements; thanks to Niall Douglas and Earl Chew |
| * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options |
| * Extension hook in malloc_state |
| * Various small adjustments to reduce warnings on some compilers |
| * Various configuration extensions/changes for more platforms. Thanks |
| to all who contributed these. |
| |
| V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) |
| * Add max_footprint functions |
| * Ensure all appropriate literals are size_t |
| * Fix conditional compilation problem for some #define settings |
| * Avoid concatenating segments with the one provided |
| in create_mspace_with_base |
| * Rename some variables to avoid compiler shadowing warnings |
| * Use explicit lock initialization. |
| * Better handling of sbrk interference. |
| * Simplify and fix segment insertion, trimming and mspace_destroy |
| * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x |
| * Thanks especially to Dennis Flanagan for help on these. |
| |
| V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) |
| * Fix memalign brace error. |
| |
| V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) |
| * Fix improper #endif nesting in C++ |
| * Add explicit casts needed for C++ |
| |
| V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) |
| * Use trees for large bins |
| * Support mspaces |
| * Use segments to unify sbrk-based and mmap-based system allocation, |
| removing need for emulation on most platforms without sbrk. |
| * Default safety checks |
| * Optional footer checks. Thanks to William Robertson for the idea. |
| * Internal code refactoring |
| * Incorporate suggestions and platform-specific changes. |
| Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, |
| Aaron Bachmann, Emery Berger, and others. |
| * Speed up non-fastbin processing enough to remove fastbins. |
| * Remove useless cfree() to avoid conflicts with other apps. |
| * Remove internal memcpy, memset. Compilers handle builtins better. |
| * Remove some options that no one ever used and rename others. |
| |
| V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) |
| * Fix malloc_state bitmap array misdeclaration |
| |
| V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) |
| * Allow tuning of FIRST_SORTED_BIN_SIZE |
| * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. |
| * Better detection and support for non-contiguousness of MORECORE. |
| Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger |
| * Bypass most of malloc if no frees. Thanks To Emery Berger. |
| * Fix freeing of old top non-contiguous chunk im sysmalloc. |
| * Raised default trim and map thresholds to 256K. |
| * Fix mmap-related #defines. Thanks to Lubos Lunak. |
| * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. |
| * Branch-free bin calculation |
| * Default trim and mmap thresholds now 256K. |
| |
| V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) |
| * Introduce independent_comalloc and independent_calloc. |
| Thanks to Michael Pachos for motivation and help. |
| * Make optional .h file available |
| * Allow > 2GB requests on 32bit systems. |
| * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>. |
| Thanks also to Andreas Mueller <a.mueller at paradatec.de>, |
| and Anonymous. |
| * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for |
| helping test this.) |
| * memalign: check alignment arg |
| * realloc: don't try to shift chunks backwards, since this |
| leads to more fragmentation in some programs and doesn't |
| seem to help in any others. |
| * Collect all cases in malloc requiring system memory into sysmalloc |
| * Use mmap as backup to sbrk |
| * Place all internal state in malloc_state |
| * Introduce fastbins (although similar to 2.5.1) |
| * Many minor tunings and cosmetic improvements |
| * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK |
| * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS |
| Thanks to Tony E. Bennett <tbennett@nvidia.com> and others. |
| * Include errno.h to support default failure action. |
| |
| V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) |
| * return null for negative arguments |
| * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com> |
| * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' |
| (e.g. WIN32 platforms) |
| * Cleanup header file inclusion for WIN32 platforms |
| * Cleanup code to avoid Microsoft Visual C++ compiler complaints |
| * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing |
| memory allocation routines |
| * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) |
| * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to |
| usage of 'assert' in non-WIN32 code |
| * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to |
| avoid infinite loop |
| * Always call 'fREe()' rather than 'free()' |
| |
| V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) |
| * Fixed ordering problem with boundary-stamping |
| |
| V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) |
| * Added pvalloc, as recommended by H.J. Liu |
| * Added 64bit pointer support mainly from Wolfram Gloger |
| * Added anonymously donated WIN32 sbrk emulation |
| * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen |
| * malloc_extend_top: fix mask error that caused wastage after |
| foreign sbrks |
| * Add linux mremap support code from HJ Liu |
| |
| V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) |
| * Integrated most documentation with the code. |
| * Add support for mmap, with help from |
| Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
| * Use last_remainder in more cases. |
| * Pack bins using idea from colin@nyx10.cs.du.edu |
| * Use ordered bins instead of best-fit threshold |
| * Eliminate block-local decls to simplify tracing and debugging. |
| * Support another case of realloc via move into top |
| * Fix error occurring when initial sbrk_base not word-aligned. |
| * Rely on page size for units instead of SBRK_UNIT to |
| avoid surprises about sbrk alignment conventions. |
| * Add mallinfo, mallopt. Thanks to Raymond Nijssen |
| (raymond@es.ele.tue.nl) for the suggestion. |
| * Add `pad' argument to malloc_trim and top_pad mallopt parameter. |
| * More precautions for cases where other routines call sbrk, |
| courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
| * Added macros etc., allowing use in linux libc from |
| H.J. Lu (hjl@gnu.ai.mit.edu) |
| * Inverted this history list |
| |
| V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) |
| * Re-tuned and fixed to behave more nicely with V2.6.0 changes. |
| * Removed all preallocation code since under current scheme |
| the work required to undo bad preallocations exceeds |
| the work saved in good cases for most test programs. |
| * No longer use return list or unconsolidated bins since |
| no scheme using them consistently outperforms those that don't |
| given above changes. |
| * Use best fit for very large chunks to prevent some worst-cases. |
| * Added some support for debugging |
| |
| V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) |
| * Removed footers when chunks are in use. Thanks to |
| Paul Wilson (wilson@cs.texas.edu) for the suggestion. |
| |
| V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) |
| * Added malloc_trim, with help from Wolfram Gloger |
| (wmglo@Dent.MED.Uni-Muenchen.DE). |
| |
| V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) |
| |
| V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) |
| * realloc: try to expand in both directions |
| * malloc: swap order of clean-bin strategy; |
| * realloc: only conditionally expand backwards |
| * Try not to scavenge used bins |
| * Use bin counts as a guide to preallocation |
| * Occasionally bin return list chunks in first scan |
| * Add a few optimizations from colin@nyx10.cs.du.edu |
| |
| V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) |
| * faster bin computation & slightly different binning |
| * merged all consolidations to one part of malloc proper |
| (eliminating old malloc_find_space & malloc_clean_bin) |
| * Scan 2 returns chunks (not just 1) |
| * Propagate failure in realloc if malloc returns 0 |
| * Add stuff to allow compilation on non-ANSI compilers |
| from kpv@research.att.com |
| |
| V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) |
| * removed potential for odd address access in prev_chunk |
| * removed dependency on getpagesize.h |
| * misc cosmetics and a bit more internal documentation |
| * anticosmetics: mangled names in macros to evade debugger strangeness |
| * tested on sparc, hp-700, dec-mips, rs6000 |
| with gcc & native cc (hp, dec only) allowing |
| Detlefs & Zorn comparison study (in SIGPLAN Notices.) |
| |
| Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) |
| * Based loosely on libg++-1.2X malloc. (It retains some of the overall |
| structure of old version, but most details differ.) |
| |
| */ |