544 #define _COMPILING_NEWLIB 550 #define errno (*__errno()) 551 extern int *__errno (
void);
556 #ifndef DLMALLOC_VERSION 557 #define DLMALLOC_VERSION 20806 560 #ifndef DLMALLOC_EXPORT 561 #define DLMALLOC_EXPORT extern 569 #define LACKS_FCNTL_H 574 #define WIN32_LEAN_AND_MEAN 578 #define HAVE_MORECORE 0 579 #define LACKS_UNISTD_H 580 #define LACKS_SYS_PARAM_H 581 #define LACKS_SYS_MMAN_H 582 #define LACKS_STRING_H 583 #define LACKS_STRINGS_H 584 #define LACKS_SYS_TYPES_H 585 #define LACKS_ERRNO_H 586 #define LACKS_SCHED_H 587 #ifndef MALLOC_FAILURE_ACTION 588 #define MALLOC_FAILURE_ACTION 592 #define MMAP_CLEARS 0 594 #define MMAP_CLEARS 1 599 #if defined(DARWIN) || defined(_DARWIN) 601 #ifndef HAVE_MORECORE 602 #define HAVE_MORECORE 0 605 #ifndef MALLOC_ALIGNMENT 606 #define MALLOC_ALIGNMENT ((size_t)16U) 611 #ifndef LACKS_SYS_TYPES_H 612 #include <sys/types.h> 616 #define MAX_SIZE_T (~(size_t)0) 619 #define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ 620 (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) 624 #if ((defined(__GNUC__) && \ 625 ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ 626 defined(__i386__) || defined(__x86_64__))) || \ 627 (defined(_MSC_VER) && _MSC_VER>=1310)) 628 #ifndef USE_SPIN_LOCKS 629 #define USE_SPIN_LOCKS 1 632 #error "USE_SPIN_LOCKS defined without implementation" 634 #elif !defined(USE_SPIN_LOCKS) 635 #define USE_SPIN_LOCKS 0 639 #define ONLY_MSPACES 0 648 #ifndef MALLOC_ALIGNMENT 649 #define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) 655 #define ABORT abort() 657 #ifndef ABORT_ON_ASSERT_FAILURE 658 #define ABORT_ON_ASSERT_FAILURE 1 660 #ifndef PROCEED_ON_ERROR 661 #define PROCEED_ON_ERROR 0 667 #ifndef MALLOC_INSPECT_ALL 668 #define MALLOC_INSPECT_ALL 0 674 #define MMAP_CLEARS 1 678 #define HAVE_MREMAP 1 681 #define HAVE_MREMAP 0 684 #ifndef MALLOC_FAILURE_ACTION 685 #define MALLOC_FAILURE_ACTION errno = ENOMEM; 687 #ifndef HAVE_MORECORE 689 #define HAVE_MORECORE 0 691 #define HAVE_MORECORE 1 695 #define MORECORE_CONTIGUOUS 0 697 #define MORECORE_DEFAULT sbrk 698 #ifndef MORECORE_CONTIGUOUS 699 #define MORECORE_CONTIGUOUS 1 702 #ifndef DEFAULT_GRANULARITY 703 #if (MORECORE_CONTIGUOUS || defined(WIN32)) 704 #define DEFAULT_GRANULARITY (0) 706 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) 709 #ifndef DEFAULT_TRIM_THRESHOLD 710 #ifndef MORECORE_CANNOT_TRIM 711 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) 713 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T 716 #ifndef DEFAULT_MMAP_THRESHOLD 718 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) 720 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T 723 #ifndef MAX_RELEASE_CHECK_RATE 725 #define MAX_RELEASE_CHECK_RATE 4095 727 #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T 730 #ifndef USE_BUILTIN_FFS 731 #define USE_BUILTIN_FFS 0 733 #ifndef USE_DEV_RANDOM 734 #define USE_DEV_RANDOM 0 737 #define NO_MALLINFO 0 739 #ifndef MALLINFO_FIELD_TYPE 740 #define MALLINFO_FIELD_TYPE size_t 742 #ifndef NO_MALLOC_STATS 743 #define NO_MALLOC_STATS 0 745 #ifndef NO_SEGMENT_TRAVERSAL 746 #define NO_SEGMENT_TRAVERSAL 0 756 #define M_TRIM_THRESHOLD (-1) 757 #define M_GRANULARITY (-2) 758 #define M_MMAP_THRESHOLD (-3) 787 #ifdef HAVE_USR_INCLUDE_MALLOC_H 788 #include "/usr/include/malloc.h" 790 #ifndef STRUCT_MALLINFO_DECLARED 792 #define _STRUCT_MALLINFO 793 #define STRUCT_MALLINFO_DECLARED 1 795 MALLINFO_FIELD_TYPE arena;
796 MALLINFO_FIELD_TYPE ordblks;
797 MALLINFO_FIELD_TYPE smblks;
798 MALLINFO_FIELD_TYPE hblks;
799 MALLINFO_FIELD_TYPE hblkhd;
800 MALLINFO_FIELD_TYPE usmblks;
801 MALLINFO_FIELD_TYPE fsmblks;
802 MALLINFO_FIELD_TYPE uordblks;
803 MALLINFO_FIELD_TYPE fordblks;
804 MALLINFO_FIELD_TYPE keepcost;
816 #if defined(__GNUC__) 817 #define FORCEINLINE __inline __attribute__ ((always_inline)) 818 #elif defined(_MSC_VER) 819 #define FORCEINLINE __forceinline 823 #if defined(__GNUC__) 824 #define NOINLINE __attribute__ ((noinline)) 825 #elif defined(_MSC_VER) 826 #define NOINLINE __declspec(noinline) 835 #define FORCEINLINE inline 846 #ifndef USE_DL_PREFIX 847 #define dlcalloc calloc 849 #define dlmalloc malloc 850 #define dlmemalign memalign 851 #define dlposix_memalign posix_memalign 852 #define dlrealloc realloc 853 #define dlrealloc_in_place realloc_in_place 854 #define dlvalloc valloc 855 #define dlpvalloc pvalloc 856 #define dlmallinfo mallinfo 857 #define dlmallopt mallopt 858 #define dlmalloc_trim malloc_trim 859 #define dlmalloc_stats malloc_stats 860 #define dlmalloc_usable_size malloc_usable_size 861 #define dlmalloc_footprint malloc_footprint 862 #define dlmalloc_max_footprint malloc_max_footprint 863 #define dlmalloc_footprint_limit malloc_footprint_limit 864 #define dlmalloc_set_footprint_limit malloc_set_footprint_limit 865 #define dlmalloc_inspect_all malloc_inspect_all 866 #define dlindependent_calloc independent_calloc 867 #define dlindependent_comalloc independent_comalloc 868 #define dlbulk_free bulk_free 885 DLMALLOC_EXPORT
void* dlmalloc(
size_t);
894 DLMALLOC_EXPORT
void dlfree(
void*);
901 DLMALLOC_EXPORT
void* dlcalloc(
size_t,
size_t);
925 DLMALLOC_EXPORT
void* dlrealloc(
void*,
size_t);
940 DLMALLOC_EXPORT
void* dlrealloc_in_place(
void*,
size_t);
954 DLMALLOC_EXPORT
void* dlmemalign(
size_t,
size_t);
964 DLMALLOC_EXPORT
int dlposix_memalign(
void**,
size_t,
size_t);
971 DLMALLOC_EXPORT
void* dlvalloc(
size_t);
995 DLMALLOC_EXPORT
int dlmallopt(
int,
int);
1006 DLMALLOC_EXPORT
size_t dlmalloc_footprint(
void);
1019 DLMALLOC_EXPORT
size_t dlmalloc_max_footprint(
void);
1030 DLMALLOC_EXPORT
size_t dlmalloc_footprint_limit();
1044 DLMALLOC_EXPORT
size_t dlmalloc_set_footprint_limit(
size_t bytes);
1046 #if MALLOC_INSPECT_ALL 1075 DLMALLOC_EXPORT
void dlmalloc_inspect_all(
void(*handler)(
void*,
void *,
size_t,
void*),
1103 DLMALLOC_EXPORT
struct mallinfo dlmallinfo(void);
1155 DLMALLOC_EXPORT
void** dlindependent_calloc(
size_t,
size_t,
void**);
1213 DLMALLOC_EXPORT
void** dlindependent_comalloc(
size_t,
size_t*,
void**);
1224 DLMALLOC_EXPORT
size_t dlbulk_free(
void**,
size_t n_elements);
1231 DLMALLOC_EXPORT
void* dlpvalloc(
size_t);
1254 DLMALLOC_EXPORT
int dlmalloc_trim(
size_t);
1275 DLMALLOC_EXPORT
void dlmalloc_stats(
void);
1291 size_t dlmalloc_usable_size(
void*);
1301 typedef void* mspace;
1314 DLMALLOC_EXPORT mspace create_mspace(
size_t capacity,
int locked);
1322 DLMALLOC_EXPORT
size_t destroy_mspace(mspace msp);
1333 DLMALLOC_EXPORT mspace create_mspace_with_base(
void* base,
size_t capacity,
int locked);
1346 DLMALLOC_EXPORT
int mspace_track_large_chunks(mspace msp,
int enable);
1353 DLMALLOC_EXPORT
void* mspace_malloc(mspace msp,
size_t bytes);
1363 DLMALLOC_EXPORT
void mspace_free(mspace msp,
void*
mem);
1374 DLMALLOC_EXPORT
void* mspace_realloc(mspace msp,
void* mem,
size_t newsize);
1380 DLMALLOC_EXPORT
void* mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size);
1386 DLMALLOC_EXPORT
void* mspace_memalign(mspace msp,
size_t alignment,
size_t bytes);
1392 DLMALLOC_EXPORT
void** mspace_independent_calloc(mspace msp,
size_t n_elements,
1393 size_t elem_size,
void* chunks[]);
1399 DLMALLOC_EXPORT
void** mspace_independent_comalloc(mspace msp,
size_t n_elements,
1400 size_t sizes[],
void* chunks[]);
1406 DLMALLOC_EXPORT
size_t mspace_footprint(mspace msp);
1412 DLMALLOC_EXPORT
size_t mspace_max_footprint(mspace msp);
1420 DLMALLOC_EXPORT
struct mallinfo mspace_mallinfo(mspace msp);
1426 DLMALLOC_EXPORT
size_t mspace_usable_size(
const void* mem);
1432 DLMALLOC_EXPORT
void mspace_malloc_stats(mspace msp);
1438 DLMALLOC_EXPORT
int mspace_trim(mspace msp,
size_t pad);
1443 DLMALLOC_EXPORT
int mspace_mallopt(
int,
int);
1464 #pragma warning( disable : 4146 ) 1466 #if !NO_MALLOC_STATS 1469 #ifndef LACKS_ERRNO_H 1473 #if ABORT_ON_ASSERT_FAILURE 1475 #define assert(x) if(!(x)) ABORT 1485 #if !defined(WIN32) && !defined(LACKS_TIME_H) 1488 #ifndef LACKS_STDLIB_H 1491 #ifndef LACKS_STRING_H 1495 #ifndef LACKS_STRINGS_H 1496 #include <strings.h> 1500 #ifndef LACKS_SYS_MMAN_H 1502 #if (defined(linux) && !defined(__USE_GNU)) 1504 #include <sys/mman.h> 1507 #include <sys/mman.h> 1510 #ifndef LACKS_FCNTL_H 1514 #ifndef LACKS_UNISTD_H 1517 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) 1518 extern void* sbrk(ptrdiff_t);
1525 #if defined (__SVR4) && defined (__sun) 1527 #elif !defined(LACKS_SCHED_H) 1530 #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS 1531 #include <pthread.h> 1533 #elif defined(_MSC_VER) 1539 LONG __cdecl _InterlockedCompareExchange(LONG
volatile *Dest, LONG Exchange, LONG Comp);
1540 LONG __cdecl _InterlockedExchange(LONG
volatile *Target, LONG Value);
1545 #pragma intrinsic (_InterlockedCompareExchange) 1546 #pragma intrinsic (_InterlockedExchange) 1547 #define interlockedcompareexchange _InterlockedCompareExchange 1548 #define interlockedexchange _InterlockedExchange 1549 #elif defined(WIN32) && defined(__GNUC__) 1550 #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) 1551 #define interlockedexchange __sync_lock_test_and_set 1556 #ifndef LOCK_AT_FORK 1557 #define LOCK_AT_FORK 0 1561 #if defined(_MSC_VER) && _MSC_VER>=1300 1562 #ifndef BitScanForward 1566 unsigned char _BitScanForward(
unsigned long *index,
unsigned long mask);
1567 unsigned char _BitScanReverse(
unsigned long *index,
unsigned long mask);
1572 #define BitScanForward _BitScanForward 1573 #define BitScanReverse _BitScanReverse 1574 #pragma intrinsic(_BitScanForward) 1575 #pragma intrinsic(_BitScanReverse) 1580 #ifndef malloc_getpagesize 1581 # ifdef _SC_PAGESIZE 1582 # ifndef _SC_PAGE_SIZE 1583 # define _SC_PAGE_SIZE _SC_PAGESIZE 1586 # ifdef _SC_PAGE_SIZE 1587 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE) 1589 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) 1590 extern size_t getpagesize();
1591 # define malloc_getpagesize getpagesize() 1594 # define malloc_getpagesize getpagesize() 1596 # ifndef LACKS_SYS_PARAM_H 1597 # include <sys/param.h> 1599 # ifdef EXEC_PAGESIZE 1600 # define malloc_getpagesize EXEC_PAGESIZE 1604 # define malloc_getpagesize NBPG 1606 # define malloc_getpagesize (NBPG * CLSIZE) 1610 # define malloc_getpagesize NBPC 1613 # define malloc_getpagesize PAGESIZE 1615 # define malloc_getpagesize ((size_t)4096U) 1629 #define SIZE_T_SIZE (sizeof(size_t)) 1630 #define SIZE_T_BITSIZE (sizeof(size_t) << 3) 1634 #define SIZE_T_ZERO ((size_t)0) 1635 #define SIZE_T_ONE ((size_t)1) 1636 #define SIZE_T_TWO ((size_t)2) 1637 #define SIZE_T_FOUR ((size_t)4) 1638 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) 1639 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) 1640 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) 1641 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) 1644 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) 1647 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) 1650 #define align_offset(A)\ 1651 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ 1652 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) 1664 #define MFAIL ((void*)(MAX_SIZE_T)) 1665 #define CMFAIL ((char*)(MFAIL)) 1670 #define MUNMAP_DEFAULT(a, s) munmap((a), (s)) 1671 #define MMAP_PROT (PROT_READ|PROT_WRITE) 1672 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) 1673 #define MAP_ANONYMOUS MAP_ANON 1675 #ifdef MAP_ANONYMOUS 1676 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) 1677 #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) 1683 #define MMAP_FLAGS (MAP_PRIVATE) 1684 static int dev_zero_fd = -1;
1685 #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ 1686 (dev_zero_fd = open("/dev/zero", O_RDWR), \ 1687 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ 1688 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) 1691 #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) 1696 static FORCEINLINE
void* win32mmap(
size_t size) {
1697 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
1698 return (ptr != 0)? ptr: MFAIL;
1702 static FORCEINLINE
void* win32direct_mmap(
size_t size) {
1703 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
1705 return (ptr != 0)? ptr: MFAIL;
1709 static FORCEINLINE
int win32munmap(
void* ptr,
size_t size) {
1710 MEMORY_BASIC_INFORMATION minfo;
1711 char* cptr = (
char*)ptr;
1713 if (VirtualQuery(cptr, &minfo,
sizeof(minfo)) == 0)
1715 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1716 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1718 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1720 cptr += minfo.RegionSize;
1721 size -= minfo.RegionSize;
1726 #define MMAP_DEFAULT(s) win32mmap(s) 1727 #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) 1728 #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) 1734 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) 1743 #define CALL_MORECORE(S) MORECORE(S) 1745 #define CALL_MORECORE(S) MORECORE_DEFAULT(S) 1748 #define CALL_MORECORE(S) MFAIL 1755 #define USE_MMAP_BIT (SIZE_T_ONE) 1758 #define CALL_MMAP(s) MMAP(s) 1760 #define CALL_MMAP(s) MMAP_DEFAULT(s) 1763 #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) 1765 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) 1768 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) 1770 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) 1773 #define USE_MMAP_BIT (SIZE_T_ZERO) 1775 #define MMAP(s) MFAIL 1776 #define MUNMAP(a, s) (-1) 1777 #define DIRECT_MMAP(s) MFAIL 1778 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) 1779 #define CALL_MMAP(s) MMAP(s) 1780 #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) 1786 #if HAVE_MMAP && HAVE_MREMAP 1788 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) 1790 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) 1793 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL 1797 #define USE_NONCONTIGUOUS_BIT (4U) 1800 #define EXTERN_BIT (8U) 1834 #define USE_LOCK_BIT (0U) 1835 #define INITIAL_LOCK(l) (0) 1836 #define DESTROY_LOCK(l) (0) 1837 #define ACQUIRE_MALLOC_GLOBAL_LOCK() 1838 #define RELEASE_MALLOC_GLOBAL_LOCK() 1851 #elif USE_SPIN_LOCKS 1856 #if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) 1857 #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) 1858 #define CLEAR_LOCK(sl) __sync_lock_release(sl) 1860 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) 1862 static FORCEINLINE
int x86_cas_lock(
int *sl) {
1866 __asm__ __volatile__ (
"lock; cmpxchgl %1, %2" 1868 :
"r" (val),
"m" (*(sl)),
"0"(cmp)
1873 static FORCEINLINE
void x86_clear_lock(
int* sl) {
1877 __asm__ __volatile__ (
"lock; xchgl %0, %1" 1879 :
"m" (*(sl)),
"0"(prev)
1883 #define CAS_LOCK(sl) x86_cas_lock(sl) 1884 #define CLEAR_LOCK(sl) x86_clear_lock(sl) 1887 #define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1) 1888 #define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0) 1893 #define SPINS_PER_YIELD 63 1894 #if defined(_MSC_VER) 1895 #define SLEEP_EX_DURATION 50 1896 #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) 1897 #elif defined (__SVR4) && defined (__sun) 1898 #define SPIN_LOCK_YIELD thr_yield(); 1899 #elif !defined(LACKS_SCHED_H) 1900 #define SPIN_LOCK_YIELD sched_yield(); 1902 #define SPIN_LOCK_YIELD 1905 #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 1907 static int spin_acquire_lock(
int *sl) {
1909 while (*(
volatile int *)sl != 0 || CAS_LOCK(sl)) {
1910 if ((++spins & SPINS_PER_YIELD) == 0) {
1918 #define TRY_LOCK(sl) !CAS_LOCK(sl) 1919 #define RELEASE_LOCK(sl) CLEAR_LOCK(sl) 1920 #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) 1921 #define INITIAL_LOCK(sl) (*sl = 0) 1922 #define DESTROY_LOCK(sl) (0) 1923 static MLOCK_T malloc_global_mutex = 0;
1928 #define THREAD_ID_T DWORD 1929 #define CURRENT_THREAD GetCurrentThreadId() 1930 #define EQ_OWNER(X,Y) ((X) == (Y)) 1937 #define THREAD_ID_T pthread_t 1938 #define CURRENT_THREAD pthread_self() 1939 #define EQ_OWNER(X,Y) pthread_equal(X, Y) 1942 struct malloc_recursive_lock {
1945 THREAD_ID_T threadid;
1948 #define MLOCK_T struct malloc_recursive_lock 1949 static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
1951 static FORCEINLINE
void recursive_release_lock(MLOCK_T *lk) {
1954 CLEAR_LOCK(&lk->sl);
1958 static FORCEINLINE
int recursive_acquire_lock(MLOCK_T *lk) {
1959 THREAD_ID_T mythreadid = CURRENT_THREAD;
1962 if (*((
volatile int *)(&lk->sl)) == 0) {
1963 if (!CAS_LOCK(&lk->sl)) {
1964 lk->threadid = mythreadid;
1969 else if (EQ_OWNER(lk->threadid, mythreadid)) {
1973 if ((++spins & SPINS_PER_YIELD) == 0) {
1979 static FORCEINLINE
int recursive_try_lock(MLOCK_T *lk) {
1980 THREAD_ID_T mythreadid = CURRENT_THREAD;
1981 if (*((
volatile int *)(&lk->sl)) == 0) {
1982 if (!CAS_LOCK(&lk->sl)) {
1983 lk->threadid = mythreadid;
1988 else if (EQ_OWNER(lk->threadid, mythreadid)) {
1995 #define RELEASE_LOCK(lk) recursive_release_lock(lk) 1996 #define TRY_LOCK(lk) recursive_try_lock(lk) 1997 #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) 1998 #define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) 1999 #define DESTROY_LOCK(lk) (0) 2002 #elif defined(WIN32) 2003 #define MLOCK_T CRITICAL_SECTION 2004 #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) 2005 #define RELEASE_LOCK(lk) LeaveCriticalSection(lk) 2006 #define TRY_LOCK(lk) TryEnterCriticalSection(lk) 2007 #define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) 2008 #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) 2009 #define NEED_GLOBAL_LOCK_INIT 2011 static MLOCK_T malloc_global_mutex;
2012 static volatile LONG malloc_global_mutex_status;
2015 static void init_malloc_global_mutex() {
2017 long stat = malloc_global_mutex_status;
2022 interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
2023 InitializeCriticalSection(&malloc_global_mutex);
2024 interlockedexchange(&malloc_global_mutex_status, (LONG)1);
2032 #define MLOCK_T pthread_mutex_t 2033 #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) 2034 #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) 2035 #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) 2036 #define INITIAL_LOCK(lk) pthread_init_lock(lk) 2037 #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) 2039 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) 2042 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
2044 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP 2045 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) 2048 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
2050 static int pthread_init_lock (MLOCK_T *lk) {
2051 pthread_mutexattr_t attr;
2052 if (pthread_mutexattr_init(&attr))
return 1;
2053 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 2054 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
return 1;
2056 if (pthread_mutex_init(lk, &attr))
return 1;
2057 if (pthread_mutexattr_destroy(&attr))
return 1;
2064 #define USE_LOCK_BIT (2U) 2066 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK 2067 #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); 2070 #ifndef RELEASE_MALLOC_GLOBAL_LOCK 2071 #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); 2223 typedef unsigned int bindex_t;
2224 typedef unsigned int binmap_t;
2225 typedef unsigned int flag_t;
2229 #define MCHUNK_SIZE (sizeof(mchunk)) 2232 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) 2234 #define CHUNK_OVERHEAD (SIZE_T_SIZE) 2238 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) 2240 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) 2243 #define MIN_CHUNK_SIZE\ 2244 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) 2247 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) 2248 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) 2250 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) 2253 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) 2254 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) 2257 #define pad_request(req) \ 2258 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) 2261 #define request2size(req) \ 2262 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) 2275 #define PINUSE_BIT (SIZE_T_ONE) 2276 #define CINUSE_BIT (SIZE_T_TWO) 2277 #define FLAG4_BIT (SIZE_T_FOUR) 2278 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) 2279 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) 2282 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) 2285 #define cinuse(p) ((p)->head & CINUSE_BIT) 2286 #define pinuse(p) ((p)->head & PINUSE_BIT) 2287 #define flag4inuse(p) ((p)->head & FLAG4_BIT) 2288 #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) 2289 #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) 2291 #define chunksize(p) ((p)->head & ~(FLAG_BITS)) 2293 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) 2294 #define set_flag4(p) ((p)->head |= FLAG4_BIT) 2295 #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) 2298 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) 2299 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) 2302 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) 2303 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) 2306 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) 2309 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) 2310 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) 2313 #define set_size_and_pinuse_of_free_chunk(p, s)\ 2314 ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) 2317 #define set_free_with_pinuse(p, s, n)\ 2318 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) 2321 #define overhead_for(p)\ 2322 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) 2326 #define calloc_must_clear(p) (!is_mmapped(p)) 2328 #define calloc_must_clear(p) (1) 2439 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) 2505 #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) 2506 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) 2599 #define NSMALLBINS (32U) 2600 #define NTREEBINS (32U) 2601 #define SMALLBIN_SHIFT (3U) 2602 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) 2603 #define TREEBIN_SHIFT (8U) 2604 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) 2605 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) 2606 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) 2617 size_t release_checks;
2619 mchunkptr smallbins[(NSMALLBINS+1)*2];
2620 tbinptr treebins[NTREEBINS];
2622 size_t max_footprint;
2623 size_t footprint_limit;
2648 size_t mmap_threshold;
2649 size_t trim_threshold;
2650 flag_t default_mflags;
2656 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) 2663 #define is_global(M) ((M) == &_gm_) 2667 #define is_initialized(M) ((M)->top != 0) 2673 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) 2674 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) 2676 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) 2678 #define disable_lock(M) 2681 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) 2682 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) 2684 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) 2686 #define disable_mmap(M) 2689 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) 2690 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) 2692 #define set_lock(M,L)\ 2693 ((M)->mflags = (L)?\ 2694 ((M)->mflags | USE_LOCK_BIT) :\ 2695 ((M)->mflags & ~USE_LOCK_BIT)) 2698 #define page_align(S)\ 2699 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) 2702 #define granularity_align(S)\ 2703 (((S) + (mparams.granularity - SIZE_T_ONE))\ 2704 & ~(mparams.granularity - SIZE_T_ONE)) 2709 #define mmap_align(S) granularity_align(S) 2711 #define mmap_align(S) page_align(S) 2715 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) 2717 #define is_page_aligned(S)\ 2718 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) 2719 #define is_granularity_aligned(S)\ 2720 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) 2723 #define segment_holds(S, A)\ 2724 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) 2727 static msegmentptr segment_holding(mstate m,
char* addr) {
2728 msegmentptr sp = &m->seg;
2730 if (addr >= sp->base && addr < sp->base + sp->size)
2732 if ((sp = sp->next) == 0)
2738 static int has_segment_link(mstate m, msegmentptr ss) {
2739 msegmentptr sp = &m->seg;
2741 if ((
char*)sp >= ss->base && (
char*)sp < ss->base + ss->size)
2743 if ((sp = sp->next) == 0)
2748 #ifndef MORECORE_CANNOT_TRIM 2749 #define should_trim(M,s) ((s) > (M)->trim_check) 2751 #define should_trim(M,s) (0) 2759 #define TOP_FOOT_SIZE\ 2760 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) 2772 #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) 2773 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } 2777 #define PREACTION(M) (0) 2781 #define POSTACTION(M) 2794 #if PROCEED_ON_ERROR 2797 int malloc_corruption_error_count;
2800 static void reset_on_error(mstate m);
2802 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) 2803 #define USAGE_ERROR_ACTION(m, p) 2807 #ifndef CORRUPTION_ERROR_ACTION 2808 #define CORRUPTION_ERROR_ACTION(m) ABORT 2811 #ifndef USAGE_ERROR_ACTION 2812 #define USAGE_ERROR_ACTION(m,p) ABORT 2822 #define check_free_chunk(M,P) 2823 #define check_inuse_chunk(M,P) 2824 #define check_malloced_chunk(M,P,N) 2825 #define check_mmapped_chunk(M,P) 2826 #define check_malloc_state(M) 2827 #define check_top_chunk(M,P) 2830 #define check_free_chunk(M,P) do_check_free_chunk(M,P) 2831 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) 2832 #define check_top_chunk(M,P) do_check_top_chunk(M,P) 2833 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) 2834 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) 2835 #define check_malloc_state(M) do_check_malloc_state(M) 2837 static void do_check_any_chunk(mstate m, mchunkptr p);
2838 static void do_check_top_chunk(mstate m, mchunkptr p);
2839 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
2840 static void do_check_inuse_chunk(mstate m, mchunkptr p);
2841 static void do_check_free_chunk(mstate m, mchunkptr p);
2842 static void do_check_malloced_chunk(mstate m,
void*
mem,
size_t s);
2843 static void do_check_tree(mstate m, tchunkptr t);
2844 static void do_check_treebin(mstate m, bindex_t i);
2845 static void do_check_smallbin(mstate m, bindex_t i);
2846 static void do_check_malloc_state(mstate m);
2847 static int bin_find(mstate m, mchunkptr x);
2848 static size_t traverse_and_check(mstate m);
2853 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) 2854 #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) 2855 #define small_index2size(i) ((i) << SMALLBIN_SHIFT) 2856 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) 2859 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) 2860 #define treebin_at(M,i) (&((M)->treebins[i])) 2863 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) 2864 #define compute_tree_index(S, I)\ 2866 unsigned int X = S >> TREEBIN_SHIFT;\ 2869 else if (X > 0xFFFF)\ 2872 unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ 2873 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ 2877 #elif defined (__INTEL_COMPILER) 2878 #define compute_tree_index(S, I)\ 2880 size_t X = S >> TREEBIN_SHIFT;\ 2883 else if (X > 0xFFFF)\ 2886 unsigned int K = _bit_scan_reverse (X); \ 2887 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ 2891 #elif defined(_MSC_VER) && _MSC_VER>=1300 2892 #define compute_tree_index(S, I)\ 2894 size_t X = S >> TREEBIN_SHIFT;\ 2897 else if (X > 0xFFFF)\ 2901 _BitScanReverse((DWORD *) &K, (DWORD) X);\ 2902 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ 2907 #define compute_tree_index(S, I)\ 2909 size_t X = S >> TREEBIN_SHIFT;\ 2912 else if (X > 0xFFFF)\ 2915 unsigned int Y = (unsigned int)X;\ 2916 unsigned int N = ((Y - 0x100) >> 16) & 8;\ 2917 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ 2919 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ 2920 K = 14 - N + ((Y <<= K) >> 15);\ 2921 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ 2927 #define bit_for_tree_index(i) \ 2928 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) 2931 #define leftshift_for_tree_index(i) \ 2932 ((i == NTREEBINS-1)? 0 : \ 2933 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) 2936 #define minsize_for_tree_index(i) \ 2937 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ 2938 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) 2944 #define idx2bit(i) ((binmap_t)(1) << (i)) 2947 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) 2948 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) 2949 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) 2951 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) 2952 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) 2953 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) 2956 #define least_bit(x) ((x) & -(x)) 2959 #define left_bits(x) ((x<<1) | -(x<<1)) 2962 #define same_or_left_bits(x) ((x) | -(x)) 2966 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) 2967 #define compute_bit2idx(X, I)\ 2970 J = __builtin_ctz(X); \ 2974 #elif defined (__INTEL_COMPILER) 2975 #define compute_bit2idx(X, I)\ 2978 J = _bit_scan_forward (X); \ 2982 #elif defined(_MSC_VER) && _MSC_VER>=1300 2983 #define compute_bit2idx(X, I)\ 2986 _BitScanForward((DWORD *) &J, X);\ 2990 #elif USE_BUILTIN_FFS 2991 #define compute_bit2idx(X, I) I = ffs(X)-1 2994 #define compute_bit2idx(X, I)\ 2996 unsigned int Y = X - 1;\ 2997 unsigned int K = Y >> (16-4) & 16;\ 2998 unsigned int N = K; Y >>= K;\ 2999 N += K = Y >> (8-3) & 8; Y >>= K;\ 3000 N += K = Y >> (4-2) & 4; Y >>= K;\ 3001 N += K = Y >> (2-1) & 2; Y >>= K;\ 3002 N += K = Y >> (1-0) & 1; Y >>= K;\ 3003 I = (bindex_t)(N + Y);\ 3038 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr) 3040 #define ok_next(p, n) ((char*)(p) < (char*)(n)) 3042 #define ok_inuse(p) is_inuse(p) 3044 #define ok_pinuse(p) pinuse(p) 3047 #define ok_address(M, a) (1) 3048 #define ok_next(b, n) (1) 3049 #define ok_inuse(p) (1) 3050 #define ok_pinuse(p) (1) 3053 #if (FOOTERS && !INSECURE) 3055 #define ok_magic(M) ((M)->magic == mparams.magic) 3057 #define ok_magic(M) (1) 3062 #if defined(__GNUC__) && __GNUC__ >= 3 3063 #define RTCHECK(e) __builtin_expect(e, 1) 3065 #define RTCHECK(e) (e) 3068 #define RTCHECK(e) (1) 3075 #define mark_inuse_foot(M,p,s) 3080 #define set_inuse(M,p,s)\ 3081 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ 3082 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) 3085 #define set_inuse_and_pinuse(M,p,s)\ 3086 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ 3087 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) 3090 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ 3091 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) 3096 #define mark_inuse_foot(M,p,s)\ 3097 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) 3099 #define get_mstate_for(p)\ 3100 ((mstate)(((mchunkptr)((char*)(p) +\ 3101 (chunksize(p))))->prev_foot ^ mparams.magic)) 3103 #define set_inuse(M,p,s)\ 3104 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ 3105 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ 3106 mark_inuse_foot(M,p,s)) 3108 #define set_inuse_and_pinuse(M,p,s)\ 3109 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ 3110 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ 3111 mark_inuse_foot(M,p,s)) 3113 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ 3114 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ 3115 mark_inuse_foot(M, p, s)) 3122 static void pre_fork(
void) { ACQUIRE_LOCK(&(gm)->mutex); }
3123 static void post_fork_parent(
void) { RELEASE_LOCK(&(gm)->mutex); }
3124 static void post_fork_child(
void) { INITIAL_LOCK(&(gm)->mutex); }
3128 static int init_mparams(
void) {
3129 #ifdef NEED_GLOBAL_LOCK_INIT 3130 if (malloc_global_mutex_status <= 0)
3131 init_malloc_global_mutex();
3134 ACQUIRE_MALLOC_GLOBAL_LOCK();
3135 if (mparams.magic == 0) {
3141 psize = malloc_getpagesize;
3142 gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
3145 SYSTEM_INFO system_info;
3146 GetSystemInfo(&system_info);
3147 psize = system_info.dwPageSize;
3148 gsize = ((DEFAULT_GRANULARITY != 0)?
3149 DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
3159 if ((
sizeof(
size_t) !=
sizeof(
char*)) ||
3160 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
3161 (
sizeof(
int) < 4) ||
3162 (MALLOC_ALIGNMENT < (
size_t)8U) ||
3163 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
3164 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
3165 ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
3166 ((psize & (psize-SIZE_T_ONE)) != 0))
3168 mparams.granularity = gsize;
3169 mparams.page_size = psize;
3170 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
3171 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
3172 #if MORECORE_CONTIGUOUS 3173 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
3175 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
3180 gm->mflags = mparams.default_mflags;
3181 (void)INITIAL_LOCK(&gm->mutex);
3184 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
3190 unsigned char buf[
sizeof(size_t)];
3192 if ((fd = open(
"/dev/urandom", O_RDONLY)) >= 0 &&
3193 read(fd, buf,
sizeof(buf)) ==
sizeof(buf)) {
3194 magic = *((
size_t *) buf);
3200 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
3201 #elif defined(LACKS_TIME_H) 3202 magic = (size_t)&magic ^ (
size_t)0x55555555U;
3204 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
3206 magic |= (size_t)8U;
3207 magic &= ~(size_t)7U;
3209 (*(
volatile size_t *)(&(mparams.magic))) = magic;
3213 RELEASE_MALLOC_GLOBAL_LOCK();
3218 static int change_mparam(
int param_number,
int value) {
3220 ensure_initialization();
3221 val = (value == -1)? MAX_SIZE_T : (
size_t)value;
3222 switch(param_number) {
3223 case M_TRIM_THRESHOLD:
3224 mparams.trim_threshold = val;
3227 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
3228 mparams.granularity = val;
3233 case M_MMAP_THRESHOLD:
3234 mparams.mmap_threshold = val;
3245 static void do_check_any_chunk(mstate m, mchunkptr p) {
3246 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3247 assert(ok_address(m, p));
3251 static void do_check_top_chunk(mstate m, mchunkptr p) {
3252 msegmentptr sp = segment_holding(m, (
char*)p);
3253 size_t sz = p->head & ~INUSE_BITS;
3255 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3256 assert(ok_address(m, p));
3257 assert(sz == m->topsize);
3259 assert(sz == ((sp->base + sp->size) - (
char*)p) - TOP_FOOT_SIZE);
3261 assert(!pinuse(chunk_plus_offset(p, sz)));
3265 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
3266 size_t sz = chunksize(p);
3267 size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
3270 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3271 assert(ok_address(m, p));
3273 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
3274 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
3275 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
3279 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
3280 do_check_any_chunk(m, p);
3284 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
3286 do_check_mmapped_chunk(m, p);
3290 static void do_check_free_chunk(mstate m, mchunkptr p) {
3291 size_t sz = chunksize(p);
3292 mchunkptr next = chunk_plus_offset(p, sz);
3293 do_check_any_chunk(m, p);
3297 if (p != m->dv && p != m->top) {
3298 if (sz >= MIN_CHUNK_SIZE) {
3299 assert((sz & CHUNK_ALIGN_MASK) == 0);
3300 assert(is_aligned(chunk2mem(p)));
3301 assert(next->prev_foot == sz);
3303 assert (next == m->top || is_inuse(next));
3308 assert(sz == SIZE_T_SIZE);
3313 static void do_check_malloced_chunk(mstate m,
void*
mem,
size_t s) {
3315 mchunkptr p = mem2chunk(
mem);
3316 size_t sz = p->head & ~INUSE_BITS;
3317 do_check_inuse_chunk(m, p);
3318 assert((sz & CHUNK_ALIGN_MASK) == 0);
3319 assert(sz >= MIN_CHUNK_SIZE);
3322 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
3327 static void do_check_tree(mstate m, tchunkptr t) {
3330 bindex_t tindex = t->index;
3331 size_t tsize = chunksize(t);
3333 compute_tree_index(tsize, idx);
3335 assert(tsize >= MIN_LARGE_SIZE);
3336 assert(tsize >= minsize_for_tree_index(idx));
3337 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
3340 do_check_any_chunk(m, ((mchunkptr)u));
3341 assert(u->index == tindex);
3342 assert(chunksize(u) == tsize);
3347 if (u->parent == 0) {
3348 assert(u->child[0] == 0);
3349 assert(u->child[1] == 0);
3355 assert (u->parent->child[0] == u ||
3356 u->parent->child[1] == u ||
3357 *((tbinptr*)(u->parent)) == u);
3358 if (u->child[0] != 0) {
3359 assert(u->child[0]->parent == u);
3360 assert(u->child[0] != u);
3361 do_check_tree(m, u->child[0]);
3363 if (u->child[1] != 0) {
3364 assert(u->child[1]->parent == u);
3365 assert(u->child[1] != u);
3366 do_check_tree(m, u->child[1]);
3368 if (u->child[0] != 0 && u->child[1] != 0) {
3369 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
3378 static void do_check_treebin(mstate m, bindex_t i) {
3379 tbinptr* tb = treebin_at(m, i);
3381 int empty = (m->treemap & (1U << i)) == 0;
3385 do_check_tree(m, t);
3389 static void do_check_smallbin(mstate m, bindex_t i) {
3390 sbinptr b = smallbin_at(m, i);
3391 mchunkptr p = b->bk;
3392 unsigned int empty = (m->smallmap & (1U << i)) == 0;
3396 for (; p != b; p = p->bk) {
3397 size_t size = chunksize(p);
3400 do_check_free_chunk(m, p);
3402 assert(small_index(size) == i);
3403 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
3406 if (q->head != FENCEPOST_HEAD)
3407 do_check_inuse_chunk(m, q);
3413 static int bin_find(mstate m, mchunkptr x) {
3414 size_t size = chunksize(x);
3415 if (is_small(size)) {
3416 bindex_t sidx = small_index(size);
3417 sbinptr b = smallbin_at(m, sidx);
3418 if (smallmap_is_marked(m, sidx)) {
3423 }
while ((p = p->fd) != b);
3428 compute_tree_index(size, tidx);
3429 if (treemap_is_marked(m, tidx)) {
3430 tchunkptr t = *treebin_at(m, tidx);
3431 size_t sizebits = size << leftshift_for_tree_index(tidx);
3432 while (t != 0 && chunksize(t) != size) {
3433 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3439 if (u == (tchunkptr)x)
3441 }
while ((u = u->fd) != t);
3449 static size_t traverse_and_check(mstate m) {
3451 if (is_initialized(m)) {
3452 msegmentptr s = &m->seg;
3453 sum += m->topsize + TOP_FOOT_SIZE;
3455 mchunkptr q = align_as_chunk(s->base);
3456 mchunkptr lastq = 0;
3458 while (segment_holds(s, q) &&
3459 q != m->top && q->head != FENCEPOST_HEAD) {
3460 sum += chunksize(q);
3463 do_check_inuse_chunk(m, q);
3466 assert(q == m->dv || bin_find(m, q));
3467 assert(lastq == 0 || is_inuse(lastq));
3468 do_check_free_chunk(m, q);
3481 static void do_check_malloc_state(mstate m) {
3485 for (i = 0; i < NSMALLBINS; ++i)
3486 do_check_smallbin(m, i);
3487 for (i = 0; i < NTREEBINS; ++i)
3488 do_check_treebin(m, i);
3490 if (m->dvsize != 0) {
3491 do_check_any_chunk(m, m->dv);
3492 assert(m->dvsize == chunksize(m->dv));
3493 assert(m->dvsize >= MIN_CHUNK_SIZE);
3494 assert(bin_find(m, m->dv) == 0);
3498 do_check_top_chunk(m, m->top);
3501 assert(bin_find(m, m->top) == 0);
3504 total = traverse_and_check(m);
3505 assert(total <= m->footprint);
3506 assert(m->footprint <= m->max_footprint);
3513 static struct mallinfo internal_mallinfo(mstate m) {
3514 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
3515 ensure_initialization();
3516 if (!PREACTION(m)) {
3517 check_malloc_state(m);
3518 if (is_initialized(m)) {
3519 size_t nfree = SIZE_T_ONE;
3520 size_t mfree = m->topsize + TOP_FOOT_SIZE;
3522 msegmentptr s = &m->seg;
3524 mchunkptr q = align_as_chunk(s->base);
3525 while (segment_holds(s, q) &&
3526 q != m->top && q->head != FENCEPOST_HEAD) {
3527 size_t sz = chunksize(q);
3540 nm.hblkhd = m->footprint - sum;
3541 nm.usmblks = m->max_footprint;
3542 nm.uordblks = m->footprint - mfree;
3543 nm.fordblks = mfree;
3544 nm.keepcost = m->topsize;
3553 #if !NO_MALLOC_STATS 3554 static void internal_malloc_stats(mstate m) {
3555 ensure_initialization();
3556 if (!PREACTION(m)) {
3560 check_malloc_state(m);
3561 if (is_initialized(m)) {
3562 msegmentptr s = &m->seg;
3563 maxfp = m->max_footprint;
3565 used = fp - (m->topsize + TOP_FOOT_SIZE);
3568 mchunkptr q = align_as_chunk(s->base);
3569 while (segment_holds(s, q) &&
3570 q != m->top && q->head != FENCEPOST_HEAD) {
3572 used -= chunksize(q);
3579 fprintf(stderr,
"max system bytes = %10lu\n", (
unsigned long)(maxfp));
3580 fprintf(stderr,
"system bytes = %10lu\n", (
unsigned long)(fp));
3581 fprintf(stderr,
"in use bytes = %10lu\n", (
unsigned long)(used));
3596 #define insert_small_chunk(M, P, S) {\ 3597 bindex_t I = small_index(S);\ 3598 mchunkptr B = smallbin_at(M, I);\ 3600 assert(S >= MIN_CHUNK_SIZE);\ 3601 if (!smallmap_is_marked(M, I))\ 3602 mark_smallmap(M, I);\ 3603 else if (RTCHECK(ok_address(M, B->fd)))\ 3606 CORRUPTION_ERROR_ACTION(M);\ 3615 #define unlink_small_chunk(M, P, S) {\ 3616 mchunkptr F = P->fd;\ 3617 mchunkptr B = P->bk;\ 3618 bindex_t I = small_index(S);\ 3621 assert(chunksize(P) == small_index2size(I));\ 3622 if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ 3624 clear_smallmap(M, I);\ 3626 else if (RTCHECK(B == smallbin_at(M,I) ||\ 3627 (ok_address(M, B) && B->fd == P))) {\ 3632 CORRUPTION_ERROR_ACTION(M);\ 3636 CORRUPTION_ERROR_ACTION(M);\ 3641 #define unlink_first_small_chunk(M, B, P, I) {\ 3642 mchunkptr F = P->fd;\ 3645 assert(chunksize(P) == small_index2size(I));\ 3647 clear_smallmap(M, I);\ 3649 else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ 3654 CORRUPTION_ERROR_ACTION(M);\ 3660 #define replace_dv(M, P, S) {\ 3661 size_t DVS = M->dvsize;\ 3662 assert(is_small(DVS));\ 3664 mchunkptr DV = M->dv;\ 3665 insert_small_chunk(M, DV, DVS);\ 3674 #define insert_large_chunk(M, X, S) {\ 3677 compute_tree_index(S, I);\ 3678 H = treebin_at(M, I);\ 3680 X->child[0] = X->child[1] = 0;\ 3681 if (!treemap_is_marked(M, I)) {\ 3682 mark_treemap(M, I);\ 3684 X->parent = (tchunkptr)H;\ 3689 size_t K = S << leftshift_for_tree_index(I);\ 3691 if (chunksize(T) != S) {\ 3692 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ 3696 else if (RTCHECK(ok_address(M, C))) {\ 3703 CORRUPTION_ERROR_ACTION(M);\ 3708 tchunkptr F = T->fd;\ 3709 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ 3717 CORRUPTION_ERROR_ACTION(M);\ 3742 #define unlink_large_chunk(M, X) {\ 3743 tchunkptr XP = X->parent;\ 3746 tchunkptr F = X->fd;\ 3748 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ 3753 CORRUPTION_ERROR_ACTION(M);\ 3758 if (((R = *(RP = &(X->child[1]))) != 0) ||\ 3759 ((R = *(RP = &(X->child[0]))) != 0)) {\ 3761 while ((*(CP = &(R->child[1])) != 0) ||\ 3762 (*(CP = &(R->child[0])) != 0)) {\ 3765 if (RTCHECK(ok_address(M, RP)))\ 3768 CORRUPTION_ERROR_ACTION(M);\ 3773 tbinptr* H = treebin_at(M, X->index);\ 3775 if ((*H = R) == 0) \ 3776 clear_treemap(M, X->index);\ 3778 else if (RTCHECK(ok_address(M, XP))) {\ 3779 if (XP->child[0] == X) \ 3785 CORRUPTION_ERROR_ACTION(M);\ 3787 if (RTCHECK(ok_address(M, R))) {\ 3790 if ((C0 = X->child[0]) != 0) {\ 3791 if (RTCHECK(ok_address(M, C0))) {\ 3796 CORRUPTION_ERROR_ACTION(M);\ 3798 if ((C1 = X->child[1]) != 0) {\ 3799 if (RTCHECK(ok_address(M, C1))) {\ 3804 CORRUPTION_ERROR_ACTION(M);\ 3808 CORRUPTION_ERROR_ACTION(M);\ 3815 #define insert_chunk(M, P, S)\ 3816 if (is_small(S)) insert_small_chunk(M, P, S)\ 3817 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } 3819 #define unlink_chunk(M, P, S)\ 3820 if (is_small(S)) unlink_small_chunk(M, P, S)\ 3821 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } 3827 #define internal_malloc(m, b) mspace_malloc(m, b) 3828 #define internal_free(m, mem) mspace_free(m,mem); 3831 #define internal_malloc(m, b)\ 3832 ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) 3833 #define internal_free(m, mem)\ 3834 if (m == gm) dlfree(mem); else mspace_free(m,mem); 3836 #define internal_malloc(m, b) dlmalloc(b) 3837 #define internal_free(m, mem) dlfree(mem) 3852 static void* mmap_alloc(mstate m,
size_t nb) {
3853 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3854 if (m->footprint_limit != 0) {
3855 size_t fp = m->footprint + mmsize;
3856 if (fp <= m->footprint || fp > m->footprint_limit)
3860 char* mm = (
char*)(CALL_DIRECT_MMAP(mmsize));
3862 size_t offset = align_offset(chunk2mem(mm));
3863 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3864 mchunkptr p = (mchunkptr)(mm + offset);
3865 p->prev_foot = offset;
3867 mark_inuse_foot(m, p, psize);
3868 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3869 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
3871 if (m->least_addr == 0 || mm < m->least_addr)
3873 if ((m->footprint += mmsize) > m->max_footprint)
3874 m->max_footprint = m->footprint;
3875 assert(is_aligned(chunk2mem(p)));
3876 check_mmapped_chunk(m, p);
3877 return chunk2mem(p);
3884 static mchunkptr mmap_resize(mstate m, mchunkptr oldp,
size_t nb,
int flags) {
3885 size_t oldsize = chunksize(oldp);
3890 if (oldsize >= nb + SIZE_T_SIZE &&
3891 (oldsize - nb) <= (mparams.granularity << 1))
3894 size_t offset = oldp->prev_foot;
3895 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3896 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3897 char* cp = (
char*)CALL_MREMAP((
char*)oldp - offset,
3898 oldmmsize, newmmsize, flags);
3900 mchunkptr newp = (mchunkptr)(cp + offset);
3901 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3903 mark_inuse_foot(m, newp, psize);
3904 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3905 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
3907 if (cp < m->least_addr)
3909 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3910 m->max_footprint = m->footprint;
3911 check_mmapped_chunk(m, newp);
3922 static void init_top(mstate m, mchunkptr p,
size_t psize) {
3924 size_t offset = align_offset(chunk2mem(p));
3925 p = (mchunkptr)((
char*)p + offset);
3930 p->head = psize | PINUSE_BIT;
3932 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
3933 m->trim_check = mparams.trim_threshold;
3937 static void init_bins(mstate m) {
3940 for (i = 0; i < NSMALLBINS; ++i) {
3941 sbinptr bin = smallbin_at(m,i);
3942 bin->fd = bin->bk = bin;
3946 #if PROCEED_ON_ERROR 3949 static void reset_on_error(mstate m) {
3951 ++malloc_corruption_error_count;
3953 m->smallmap = m->treemap = 0;
3954 m->dvsize = m->topsize = 0;
3959 for (i = 0; i < NTREEBINS; ++i)
3960 *treebin_at(m, i) = 0;
3966 static void* prepend_alloc(mstate m,
char* newbase,
char* oldbase,
3968 mchunkptr p = align_as_chunk(newbase);
3969 mchunkptr oldfirst = align_as_chunk(oldbase);
3970 size_t psize = (
char*)oldfirst - (
char*)p;
3971 mchunkptr q = chunk_plus_offset(p, nb);
3972 size_t qsize = psize - nb;
3973 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3975 assert((
char*)oldfirst > (
char*)q);
3976 assert(pinuse(oldfirst));
3977 assert(qsize >= MIN_CHUNK_SIZE);
3980 if (oldfirst == m->top) {
3981 size_t tsize = m->topsize += qsize;
3983 q->head = tsize | PINUSE_BIT;
3984 check_top_chunk(m, q);
3986 else if (oldfirst == m->dv) {
3987 size_t dsize = m->dvsize += qsize;
3989 set_size_and_pinuse_of_free_chunk(q, dsize);
3992 if (!is_inuse(oldfirst)) {
3993 size_t nsize = chunksize(oldfirst);
3994 unlink_chunk(m, oldfirst, nsize);
3995 oldfirst = chunk_plus_offset(oldfirst, nsize);
3998 set_free_with_pinuse(q, qsize, oldfirst);
3999 insert_chunk(m, q, qsize);
4000 check_free_chunk(m, q);
4003 check_malloced_chunk(m, chunk2mem(p), nb);
4004 return chunk2mem(p);
4008 static void add_segment(mstate m,
char* tbase,
size_t tsize, flag_t mmapped) {
4010 char* old_top = (
char*)m->top;
4011 msegmentptr oldsp = segment_holding(m, old_top);
4012 char* old_end = oldsp->base + oldsp->size;
4014 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
4015 size_t offset = align_offset(chunk2mem(rawsp));
4016 char* asp = rawsp + offset;
4017 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
4018 mchunkptr sp = (mchunkptr)csp;
4019 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
4020 mchunkptr tnext = chunk_plus_offset(sp, ssize);
4021 mchunkptr p = tnext;
4025 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
4029 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
4031 m->seg.base = tbase;
4032 m->seg.size = tsize;
4033 m->seg.sflags = mmapped;
4038 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
4039 p->head = FENCEPOST_HEAD;
4041 if ((
char*)(&(nextp->head)) < old_end)
4049 if (csp != old_top) {
4050 mchunkptr q = (mchunkptr)old_top;
4051 size_t psize = csp - old_top;
4052 mchunkptr tn = chunk_plus_offset(q, psize);
4053 set_free_with_pinuse(q, psize, tn);
4054 insert_chunk(m, q, psize);
4057 check_top_chunk(m, m->top);
4063 static void* sys_alloc(mstate m,
size_t nb) {
4064 char* tbase = CMFAIL;
4066 flag_t mmap_flag = 0;
4069 ensure_initialization();
4072 if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
4073 void*
mem = mmap_alloc(m, nb);
4078 asize = granularity_align(nb + SYS_ALLOC_PADDING);
4081 if (m->footprint_limit != 0) {
4082 size_t fp = m->footprint + asize;
4083 if (fp <= m->footprint || fp > m->footprint_limit)
4109 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
4111 size_t ssize = asize;
4112 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (
char*)m->top);
4113 ACQUIRE_MALLOC_GLOBAL_LOCK();
4116 char* base = (
char*)CALL_MORECORE(0);
4117 if (base != CMFAIL) {
4120 if (!is_page_aligned(base))
4121 ssize += (
page_align((
size_t)base) - (size_t)base);
4122 fp = m->footprint + ssize;
4123 if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
4124 (m->footprint_limit == 0 ||
4125 (fp > m->footprint && fp <= m->footprint_limit)) &&
4126 (br = (
char*)(CALL_MORECORE(ssize))) == base) {
4134 ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
4136 if (ssize < HALF_MAX_SIZE_T &&
4137 (br = (
char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
4143 if (tbase == CMFAIL) {
4145 if (ssize < HALF_MAX_SIZE_T &&
4146 ssize < nb + SYS_ALLOC_PADDING) {
4147 size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
4148 if (esize < HALF_MAX_SIZE_T) {
4149 char* end = (
char*)CALL_MORECORE(esize);
4153 (void) CALL_MORECORE(-ssize);
4164 disable_contiguous(m);
4167 RELEASE_MALLOC_GLOBAL_LOCK();
4170 if (HAVE_MMAP && tbase == CMFAIL) {
4171 char* mp = (
char*)(CALL_MMAP(asize));
4175 mmap_flag = USE_MMAP_BIT;
4179 if (HAVE_MORECORE && tbase == CMFAIL) {
4180 if (asize < HALF_MAX_SIZE_T) {
4183 ACQUIRE_MALLOC_GLOBAL_LOCK();
4184 br = (
char*)(CALL_MORECORE(asize));
4185 end = (
char*)(CALL_MORECORE(0));
4186 RELEASE_MALLOC_GLOBAL_LOCK();
4187 if (br != CMFAIL && end != CMFAIL && br < end) {
4188 size_t ssize = end - br;
4189 if (ssize > nb + TOP_FOOT_SIZE) {
4197 if (tbase != CMFAIL) {
4199 if ((m->footprint += tsize) > m->max_footprint)
4200 m->max_footprint = m->footprint;
4202 if (!is_initialized(m)) {
4203 if (m->least_addr == 0 || tbase < m->least_addr)
4204 m->least_addr = tbase;
4205 m->seg.base = tbase;
4206 m->seg.size = tsize;
4207 m->seg.sflags = mmap_flag;
4208 m->magic = mparams.magic;
4209 m->release_checks = MAX_RELEASE_CHECK_RATE;
4213 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
4218 mchunkptr mn = next_chunk(mem2chunk(m));
4219 init_top(m, mn, (
size_t)((tbase + tsize) - (
char*)mn) -TOP_FOOT_SIZE);
4225 msegmentptr sp = &m->seg;
4227 while (sp != 0 && tbase != sp->base + sp->size)
4228 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
4230 !is_extern_segment(sp) &&
4231 (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
4232 segment_holds(sp, m->top)) {
4234 init_top(m, m->top, m->topsize + tsize);
4237 if (tbase < m->least_addr)
4238 m->least_addr = tbase;
4240 while (sp != 0 && sp->base != tbase + tsize)
4241 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
4243 !is_extern_segment(sp) &&
4244 (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
4245 char* oldbase = sp->base;
4248 return prepend_alloc(m, tbase, oldbase, nb);
4251 add_segment(m, tbase, tsize, mmap_flag);
4255 if (nb < m->topsize) {
4256 size_t rsize = m->topsize -= nb;
4257 mchunkptr p = m->top;
4258 mchunkptr r = m->top = chunk_plus_offset(p, nb);
4259 r->head = rsize | PINUSE_BIT;
4260 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
4261 check_top_chunk(m, m->top);
4262 check_malloced_chunk(m, chunk2mem(p), nb);
4263 return chunk2mem(p);
4267 MALLOC_FAILURE_ACTION;
4274 static size_t release_unused_segments(mstate m) {
4275 size_t released = 0;
4277 msegmentptr pred = &m->seg;
4278 msegmentptr sp = pred->next;
4280 char* base = sp->base;
4281 size_t size = sp->size;
4282 msegmentptr next = sp->next;
4284 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
4285 mchunkptr p = align_as_chunk(base);
4286 size_t psize = chunksize(p);
4288 if (!is_inuse(p) && (
char*)p + psize >= base + size - TOP_FOOT_SIZE) {
4289 tchunkptr tp = (tchunkptr)p;
4290 assert(segment_holds(sp, (
char*)sp));
4296 unlink_large_chunk(m, tp);
4298 if (CALL_MUNMAP(base, size) == 0) {
4300 m->footprint -= size;
4306 insert_large_chunk(m, tp, psize);
4310 if (NO_SEGMENT_TRAVERSAL)
4316 m->release_checks = (((size_t) nsegs > (
size_t) MAX_RELEASE_CHECK_RATE)?
4317 (
size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
4321 static int sys_trim(mstate m,
size_t pad) {
4322 size_t released = 0;
4323 ensure_initialization();
4324 if (pad < MAX_REQUEST && is_initialized(m)) {
4325 pad += TOP_FOOT_SIZE;
4327 if (m->topsize > pad) {
4329 size_t unit = mparams.granularity;
4330 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
4332 msegmentptr sp = segment_holding(m, (
char*)m->top);
4334 if (!is_extern_segment(sp)) {
4335 if (is_mmapped_segment(sp)) {
4337 sp->size >= extra &&
4338 !has_segment_link(m, sp)) {
4339 size_t newsize = sp->size - extra;
4342 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
4343 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
4348 else if (HAVE_MORECORE) {
4349 if (extra >= HALF_MAX_SIZE_T)
4350 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
4351 ACQUIRE_MALLOC_GLOBAL_LOCK();
4354 char* old_br = (
char*)(CALL_MORECORE(0));
4355 if (old_br == sp->base + sp->size) {
4356 char* rel_br = (
char*)(CALL_MORECORE(-extra));
4357 char* new_br = (
char*)(CALL_MORECORE(0));
4358 if (rel_br != CMFAIL && new_br < old_br)
4359 released = old_br - new_br;
4362 RELEASE_MALLOC_GLOBAL_LOCK();
4366 if (released != 0) {
4367 sp->size -= released;
4368 m->footprint -= released;
4369 init_top(m, m->top, m->topsize - released);
4370 check_top_chunk(m, m->top);
4376 released += release_unused_segments(m);
4379 if (released == 0 && m->topsize > m->trim_check)
4380 m->trim_check = MAX_SIZE_T;
4383 return (released != 0)? 1 : 0;
4389 static void dispose_chunk(mstate m, mchunkptr p,
size_t psize) {
4390 mchunkptr next = chunk_plus_offset(p, psize);
4393 size_t prevsize = p->prev_foot;
4394 if (is_mmapped(p)) {
4395 psize += prevsize + MMAP_FOOT_PAD;
4396 if (CALL_MUNMAP((
char*)p - prevsize, psize) == 0)
4397 m->footprint -= psize;
4400 prev = chunk_minus_offset(p, prevsize);
4403 if (RTCHECK(ok_address(m, prev))) {
4405 unlink_chunk(m, p, prevsize);
4407 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4409 set_free_with_pinuse(p, psize, next);
4414 CORRUPTION_ERROR_ACTION(m);
4418 if (RTCHECK(ok_address(m, next))) {
4419 if (!cinuse(next)) {
4420 if (next == m->top) {
4421 size_t tsize = m->topsize += psize;
4423 p->head = tsize | PINUSE_BIT;
4430 else if (next == m->dv) {
4431 size_t dsize = m->dvsize += psize;
4433 set_size_and_pinuse_of_free_chunk(p, dsize);
4437 size_t nsize = chunksize(next);
4439 unlink_chunk(m, next, nsize);
4440 set_size_and_pinuse_of_free_chunk(p, psize);
4448 set_free_with_pinuse(p, psize, next);
4450 insert_chunk(m, p, psize);
4453 CORRUPTION_ERROR_ACTION(m);
4460 static void* tmalloc_large(mstate m,
size_t nb) {
4465 compute_tree_index(nb, idx);
4466 if ((t = *treebin_at(m, idx)) != 0) {
4468 size_t sizebits = nb << leftshift_for_tree_index(idx);
4472 size_t trem = chunksize(t) - nb;
4475 if ((rsize = trem) == 0)
4479 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
4480 if (rt != 0 && rt != t)
4489 if (t == 0 && v == 0) {
4490 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
4491 if (leftbits != 0) {
4493 binmap_t leastbit = least_bit(leftbits);
4494 compute_bit2idx(leastbit, i);
4495 t = *treebin_at(m, i);
4500 size_t trem = chunksize(t) - nb;
4505 t = leftmost_child(t);
4509 if (v != 0 && rsize < (
size_t)(m->dvsize - nb)) {
4510 if (RTCHECK(ok_address(m, v))) {
4511 mchunkptr r = chunk_plus_offset(v, nb);
4512 assert(chunksize(v) == rsize + nb);
4513 if (RTCHECK(ok_next(v, r))) {
4514 unlink_large_chunk(m, v);
4515 if (rsize < MIN_CHUNK_SIZE)
4516 set_inuse_and_pinuse(m, v, (rsize + nb));
4518 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4519 set_size_and_pinuse_of_free_chunk(r, rsize);
4520 insert_chunk(m, r, rsize);
4522 return chunk2mem(v);
4525 CORRUPTION_ERROR_ACTION(m);
4531 static void* tmalloc_small(mstate m,
size_t nb) {
4535 binmap_t leastbit = least_bit(m->treemap);
4536 compute_bit2idx(leastbit, i);
4537 v = t = *treebin_at(m, i);
4538 rsize = chunksize(t) - nb;
4540 while ((t = leftmost_child(t)) != 0) {
4541 size_t trem = chunksize(t) - nb;
4548 if (RTCHECK(ok_address(m, v))) {
4549 mchunkptr r = chunk_plus_offset(v, nb);
4550 assert(chunksize(v) == rsize + nb);
4551 if (RTCHECK(ok_next(v, r))) {
4552 unlink_large_chunk(m, v);
4553 if (rsize < MIN_CHUNK_SIZE)
4554 set_inuse_and_pinuse(m, v, (rsize + nb));
4556 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4557 set_size_and_pinuse_of_free_chunk(r, rsize);
4558 replace_dv(m, r, rsize);
4560 return chunk2mem(v);
4564 CORRUPTION_ERROR_ACTION(m);
4570 void* dlmalloc(
size_t bytes) {
4595 ensure_initialization();
4598 if (!PREACTION(gm)) {
4601 if (bytes <= MAX_SMALL_REQUEST) {
4604 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4605 idx = small_index(nb);
4606 smallbits = gm->smallmap >> idx;
4608 if ((smallbits & 0x3U) != 0) {
4610 idx += ~smallbits & 1;
4611 b = smallbin_at(gm, idx);
4613 assert(chunksize(p) == small_index2size(idx));
4614 unlink_first_small_chunk(gm, b, p, idx);
4615 set_inuse_and_pinuse(gm, p, small_index2size(idx));
4617 check_malloced_chunk(gm, mem, nb);
4621 else if (nb > gm->dvsize) {
4622 if (smallbits != 0) {
4626 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4627 binmap_t leastbit = least_bit(leftbits);
4628 compute_bit2idx(leastbit, i);
4629 b = smallbin_at(gm, i);
4631 assert(chunksize(p) == small_index2size(i));
4632 unlink_first_small_chunk(gm, b, p, i);
4633 rsize = small_index2size(i) - nb;
4635 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4636 set_inuse_and_pinuse(gm, p, small_index2size(i));
4638 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4639 r = chunk_plus_offset(p, nb);
4640 set_size_and_pinuse_of_free_chunk(r, rsize);
4641 replace_dv(gm, r, rsize);
4644 check_malloced_chunk(gm, mem, nb);
4648 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
4649 check_malloced_chunk(gm, mem, nb);
4654 else if (bytes >= MAX_REQUEST)
4657 nb = pad_request(bytes);
4658 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
4659 check_malloced_chunk(gm, mem, nb);
4664 if (nb <= gm->dvsize) {
4665 size_t rsize = gm->dvsize - nb;
4666 mchunkptr p = gm->dv;
4667 if (rsize >= MIN_CHUNK_SIZE) {
4668 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
4670 set_size_and_pinuse_of_free_chunk(r, rsize);
4671 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4674 size_t dvs = gm->dvsize;
4677 set_inuse_and_pinuse(gm, p, dvs);
4680 check_malloced_chunk(gm, mem, nb);
4684 else if (nb < gm->topsize) {
4685 size_t rsize = gm->topsize -= nb;
4686 mchunkptr p = gm->top;
4687 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4688 r->head = rsize | PINUSE_BIT;
4689 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4691 check_top_chunk(gm, gm->top);
4692 check_malloced_chunk(gm, mem, nb);
4696 mem = sys_alloc(gm, nb);
4708 void dlfree(
void*
mem) {
4716 mchunkptr p = mem2chunk(mem);
4718 mstate fm = get_mstate_for(p);
4719 if (!ok_magic(fm)) {
4720 USAGE_ERROR_ACTION(fm, p);
4726 if (!PREACTION(fm)) {
4727 check_inuse_chunk(fm, p);
4728 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
4729 size_t psize = chunksize(p);
4730 mchunkptr next = chunk_plus_offset(p, psize);
4732 size_t prevsize = p->prev_foot;
4733 if (is_mmapped(p)) {
4734 psize += prevsize + MMAP_FOOT_PAD;
4735 if (CALL_MUNMAP((
char*)p - prevsize, psize) == 0)
4736 fm->footprint -= psize;
4740 mchunkptr prev = chunk_minus_offset(p, prevsize);
4743 if (RTCHECK(ok_address(fm, prev))) {
4745 unlink_chunk(fm, p, prevsize);
4747 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4749 set_free_with_pinuse(p, psize, next);
4758 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4759 if (!cinuse(next)) {
4760 if (next == fm->top) {
4761 size_t tsize = fm->topsize += psize;
4763 p->head = tsize | PINUSE_BIT;
4768 if (should_trim(fm, tsize))
4772 else if (next == fm->dv) {
4773 size_t dsize = fm->dvsize += psize;
4775 set_size_and_pinuse_of_free_chunk(p, dsize);
4779 size_t nsize = chunksize(next);
4781 unlink_chunk(fm, next, nsize);
4782 set_size_and_pinuse_of_free_chunk(p, psize);
4790 set_free_with_pinuse(p, psize, next);
4792 if (is_small(psize)) {
4793 insert_small_chunk(fm, p, psize);
4794 check_free_chunk(fm, p);
4797 tchunkptr tp = (tchunkptr)p;
4798 insert_large_chunk(fm, tp, psize);
4799 check_free_chunk(fm, p);
4800 if (--fm->release_checks == 0)
4801 release_unused_segments(fm);
4807 USAGE_ERROR_ACTION(fm, p);
4817 void* dlcalloc(
size_t n_elements,
size_t elem_size) {
4820 if (n_elements != 0) {
4821 req = n_elements * elem_size;
4822 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
4823 (req / n_elements != elem_size))
4826 mem = dlmalloc(req);
4827 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4828 memset(mem, 0, req);
4837 static mchunkptr try_realloc_chunk(mstate m, mchunkptr p,
size_t nb,
4840 size_t oldsize = chunksize(p);
4841 mchunkptr next = chunk_plus_offset(p, oldsize);
4842 if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
4843 ok_next(p, next) && ok_pinuse(next))) {
4844 if (is_mmapped(p)) {
4845 newp = mmap_resize(m, p, nb, can_move);
4847 else if (oldsize >= nb) {
4848 size_t rsize = oldsize - nb;
4849 if (rsize >= MIN_CHUNK_SIZE) {
4850 mchunkptr r = chunk_plus_offset(p, nb);
4851 set_inuse(m, p, nb);
4852 set_inuse(m, r, rsize);
4853 dispose_chunk(m, r, rsize);
4857 else if (next == m->top) {
4858 if (oldsize + m->topsize > nb) {
4859 size_t newsize = oldsize + m->topsize;
4860 size_t newtopsize = newsize - nb;
4861 mchunkptr newtop = chunk_plus_offset(p, nb);
4862 set_inuse(m, p, nb);
4863 newtop->head = newtopsize |PINUSE_BIT;
4865 m->topsize = newtopsize;
4869 else if (next == m->dv) {
4870 size_t dvs = m->dvsize;
4871 if (oldsize + dvs >= nb) {
4872 size_t dsize = oldsize + dvs - nb;
4873 if (dsize >= MIN_CHUNK_SIZE) {
4874 mchunkptr r = chunk_plus_offset(p, nb);
4875 mchunkptr n = chunk_plus_offset(r, dsize);
4876 set_inuse(m, p, nb);
4877 set_size_and_pinuse_of_free_chunk(r, dsize);
4883 size_t newsize = oldsize + dvs;
4884 set_inuse(m, p, newsize);
4891 else if (!cinuse(next)) {
4892 size_t nextsize = chunksize(next);
4893 if (oldsize + nextsize >= nb) {
4894 size_t rsize = oldsize + nextsize - nb;
4895 unlink_chunk(m, next, nextsize);
4896 if (rsize < MIN_CHUNK_SIZE) {
4897 size_t newsize = oldsize + nextsize;
4898 set_inuse(m, p, newsize);
4901 mchunkptr r = chunk_plus_offset(p, nb);
4902 set_inuse(m, p, nb);
4903 set_inuse(m, r, rsize);
4904 dispose_chunk(m, r, rsize);
4911 USAGE_ERROR_ACTION(m, chunk2mem(p));
4916 static void* internal_memalign(mstate m,
size_t alignment,
size_t bytes) {
4918 if (alignment < MIN_CHUNK_SIZE)
4919 alignment = MIN_CHUNK_SIZE;
4920 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {
4921 size_t a = MALLOC_ALIGNMENT << 1;
4922 while (a < alignment) a <<= 1;
4925 if (bytes >= MAX_REQUEST - alignment) {
4927 MALLOC_FAILURE_ACTION;
4931 size_t nb = request2size(bytes);
4932 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
4933 mem = internal_malloc(m, req);
4935 mchunkptr p = mem2chunk(mem);
4938 if ((((
size_t)(mem)) & (alignment - 1)) != 0) {
4947 char* br = (
char*)mem2chunk((
size_t)(((size_t)((
char*)mem + alignment -
4950 char* pos = ((size_t)(br - (
char*)(p)) >= MIN_CHUNK_SIZE)?
4952 mchunkptr newp = (mchunkptr)pos;
4953 size_t leadsize = pos - (
char*)(p);
4954 size_t newsize = chunksize(p) - leadsize;
4956 if (is_mmapped(p)) {
4957 newp->prev_foot = p->prev_foot + leadsize;
4958 newp->head = newsize;
4961 set_inuse(m, newp, newsize);
4962 set_inuse(m, p, leadsize);
4963 dispose_chunk(m, p, leadsize);
4969 if (!is_mmapped(p)) {
4970 size_t size = chunksize(p);
4971 if (size > nb + MIN_CHUNK_SIZE) {
4972 size_t remainder_size = size - nb;
4973 mchunkptr remainder = chunk_plus_offset(p, nb);
4974 set_inuse(m, p, nb);
4975 set_inuse(m, remainder, remainder_size);
4976 dispose_chunk(m, remainder, remainder_size);
4981 assert (chunksize(p) >= nb);
4982 assert(((
size_t)mem & (alignment - 1)) == 0);
4983 check_inuse_chunk(m, p);
4997 static void** ialloc(mstate m,
5003 size_t element_size;
5004 size_t contents_size;
5008 size_t remainder_size;
5010 mchunkptr array_chunk;
5015 ensure_initialization();
5018 if (n_elements == 0)
5025 if (n_elements == 0)
5026 return (
void**)internal_malloc(m, 0);
5028 array_size = request2size(n_elements * (
sizeof(
void*)));
5033 element_size = request2size(*sizes);
5034 contents_size = n_elements * element_size;
5039 for (i = 0; i != n_elements; ++i)
5040 contents_size += request2size(sizes[i]);
5043 size = contents_size + array_size;
5050 was_enabled = use_mmap(m);
5052 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
5058 if (PREACTION(m))
return 0;
5060 remainder_size = chunksize(p);
5065 memset((
size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
5070 size_t array_chunk_size;
5071 array_chunk = chunk_plus_offset(p, contents_size);
5072 array_chunk_size = remainder_size - contents_size;
5073 marray = (
void**) (chunk2mem(array_chunk));
5074 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
5075 remainder_size = contents_size;
5079 for (i = 0; ; ++i) {
5080 marray[i] = chunk2mem(p);
5081 if (i != n_elements-1) {
5082 if (element_size != 0)
5083 size = element_size;
5085 size = request2size(sizes[i]);
5086 remainder_size -= size;
5087 set_size_and_pinuse_of_inuse_chunk(m, p, size);
5088 p = chunk_plus_offset(p, size);
5091 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
5097 if (marray != chunks) {
5099 if (element_size != 0) {
5100 assert(remainder_size == element_size);
5103 assert(remainder_size == request2size(sizes[i]));
5105 check_inuse_chunk(m, mem2chunk(marray));
5107 for (i = 0; i != n_elements; ++i)
5108 check_inuse_chunk(m, mem2chunk(marray[i]));
5123 static size_t internal_bulk_free(mstate m,
void* array[],
size_t nelem) {
5125 if (!PREACTION(m)) {
5127 void** fence = &(array[nelem]);
5128 for (a = array; a != fence; ++a) {
5131 mchunkptr p = mem2chunk(mem);
5132 size_t psize = chunksize(p);
5134 if (get_mstate_for(p) != m) {
5139 check_inuse_chunk(m, p);
5141 if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
5143 mchunkptr next = next_chunk(p);
5144 if (b != fence && *b == chunk2mem(next)) {
5145 size_t newsize = chunksize(next) + psize;
5146 set_inuse(m, p, newsize);
5150 dispose_chunk(m, p, psize);
5153 CORRUPTION_ERROR_ACTION(m);
5158 if (should_trim(m, m->topsize))
5166 #if MALLOC_INSPECT_ALL 5167 static void internal_inspect_all(mstate m,
5168 void(*handler)(
void *start,
5171 void* callback_arg),
5173 if (is_initialized(m)) {
5174 mchunkptr top = m->top;
5176 for (s = &m->seg; s != 0; s = s->next) {
5177 mchunkptr q = align_as_chunk(s->base);
5178 while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
5179 mchunkptr next = next_chunk(q);
5180 size_t sz = chunksize(q);
5184 used = sz - CHUNK_OVERHEAD;
5185 start = chunk2mem(q);
5190 start = (
void*)((
char*)q +
sizeof(
struct malloc_chunk));
5196 if (start < (
void*)next)
5197 handler(start, next, used, arg);
5211 void* dlrealloc(
void* oldmem,
size_t bytes) {
5214 mem = dlmalloc(bytes);
5216 else if (bytes >= MAX_REQUEST) {
5217 MALLOC_FAILURE_ACTION;
5219 #ifdef REALLOC_ZERO_BYTES_FREES 5220 else if (bytes == 0) {
5225 size_t nb = request2size(bytes);
5226 mchunkptr oldp = mem2chunk(oldmem);
5230 mstate m = get_mstate_for(oldp);
5232 USAGE_ERROR_ACTION(m, oldmem);
5236 if (!PREACTION(m)) {
5237 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
5240 check_inuse_chunk(m, newp);
5241 mem = chunk2mem(newp);
5244 mem = internal_malloc(m, bytes);
5246 size_t oc = chunksize(oldp) - overhead_for(oldp);
5247 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
5248 internal_free(m, oldmem);
5256 void* dlrealloc_in_place(
void* oldmem,
size_t bytes) {
5259 if (bytes >= MAX_REQUEST) {
5260 MALLOC_FAILURE_ACTION;
5263 size_t nb = request2size(bytes);
5264 mchunkptr oldp = mem2chunk(oldmem);
5268 mstate m = get_mstate_for(oldp);
5270 USAGE_ERROR_ACTION(m, oldmem);
5274 if (!PREACTION(m)) {
5275 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
5278 check_inuse_chunk(m, newp);
5287 void* dlmemalign(
size_t alignment,
size_t bytes) {
5288 if (alignment <= MALLOC_ALIGNMENT) {
5289 return dlmalloc(bytes);
5291 return internal_memalign(gm, alignment, bytes);
5294 int dlposix_memalign(
void** pp,
size_t alignment,
size_t bytes) {
5296 if (alignment == MALLOC_ALIGNMENT)
5297 mem = dlmalloc(bytes);
5299 size_t d = alignment /
sizeof(
void*);
5300 size_t r = alignment %
sizeof(
void*);
5301 if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
5303 else if (bytes <= MAX_REQUEST - alignment) {
5304 if (alignment < MIN_CHUNK_SIZE)
5305 alignment = MIN_CHUNK_SIZE;
5306 mem = internal_memalign(gm, alignment, bytes);
5317 void* dlvalloc(
size_t bytes) {
5319 ensure_initialization();
5320 pagesz = mparams.page_size;
5321 return dlmemalign(pagesz, bytes);
5324 void* dlpvalloc(
size_t bytes) {
5326 ensure_initialization();
5327 pagesz = mparams.page_size;
5328 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
5331 void** dlindependent_calloc(
size_t n_elements,
size_t elem_size,
5333 size_t sz = elem_size;
5334 return ialloc(gm, n_elements, &sz, 3, chunks);
5337 void** dlindependent_comalloc(
size_t n_elements,
size_t sizes[],
5339 return ialloc(gm, n_elements, sizes, 0, chunks);
5342 size_t dlbulk_free(
void* array[],
size_t nelem) {
5343 return internal_bulk_free(gm, array, nelem);
5346 #if MALLOC_INSPECT_ALL 5347 void dlmalloc_inspect_all(
void(*handler)(
void *start,
5350 void* callback_arg),
5352 ensure_initialization();
5353 if (!PREACTION(gm)) {
5354 internal_inspect_all(gm, handler, arg);
5360 int dlmalloc_trim(
size_t pad) {
5362 ensure_initialization();
5363 if (!PREACTION(gm)) {
5364 result = sys_trim(gm, pad);
5370 size_t dlmalloc_footprint(
void) {
5371 return gm->footprint;
5374 size_t dlmalloc_max_footprint(
void) {
5375 return gm->max_footprint;
5378 size_t dlmalloc_footprint_limit(
void) {
5379 size_t maf = gm->footprint_limit;
5380 return maf == 0 ? MAX_SIZE_T : maf;
5383 size_t dlmalloc_set_footprint_limit(
size_t bytes) {
5386 result = granularity_align(1);
5387 if (bytes == MAX_SIZE_T)
5390 result = granularity_align(bytes);
5391 return gm->footprint_limit = result;
5396 return internal_mallinfo(gm);
5400 #if !NO_MALLOC_STATS 5401 void dlmalloc_stats() {
5402 internal_malloc_stats(gm);
5406 int dlmallopt(
int param_number,
int value) {
5407 return change_mparam(param_number, value);
5410 size_t dlmalloc_usable_size(
void*
mem) {
5412 mchunkptr p = mem2chunk(mem);
5414 return chunksize(p) - overhead_for(p);
5425 static mstate init_user_mstate(
char* tbase,
size_t tsize) {
5426 size_t msize = pad_request(
sizeof(
struct malloc_state));
5428 mchunkptr msp = align_as_chunk(tbase);
5429 mstate m = (mstate)(chunk2mem(msp));
5430 memset(m, 0, msize);
5431 (void)INITIAL_LOCK(&m->mutex);
5432 msp->head = (msize|INUSE_BITS);
5433 m->seg.base = m->least_addr = tbase;
5434 m->seg.size = m->footprint = m->max_footprint = tsize;
5435 m->magic = mparams.magic;
5436 m->release_checks = MAX_RELEASE_CHECK_RATE;
5437 m->mflags = mparams.default_mflags;
5440 disable_contiguous(m);
5442 mn = next_chunk(mem2chunk(m));
5443 init_top(m, mn, (
size_t)((tbase + tsize) - (
char*)mn) - TOP_FOOT_SIZE);
5444 check_top_chunk(m, m->top);
5448 mspace create_mspace(
size_t capacity,
int locked) {
5451 ensure_initialization();
5453 if (capacity < (
size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
5454 size_t rs = ((capacity == 0)? mparams.granularity :
5455 (capacity + TOP_FOOT_SIZE + msize));
5456 size_t tsize = granularity_align(rs);
5457 char* tbase = (
char*)(CALL_MMAP(tsize));
5458 if (tbase != CMFAIL) {
5459 m = init_user_mstate(tbase, tsize);
5460 m->seg.sflags = USE_MMAP_BIT;
5461 set_lock(m, locked);
5467 mspace create_mspace_with_base(
void* base,
size_t capacity,
int locked) {
5470 ensure_initialization();
5472 if (capacity > msize + TOP_FOOT_SIZE &&
5473 capacity < (
size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
5474 m = init_user_mstate((
char*)base, capacity);
5475 m->seg.sflags = EXTERN_BIT;
5476 set_lock(m, locked);
5481 int mspace_track_large_chunks(mspace msp,
int enable) {
5483 mstate ms = (mstate)msp;
5484 if (!PREACTION(ms)) {
5485 if (!use_mmap(ms)) {
5498 size_t destroy_mspace(mspace msp) {
5500 mstate ms = (mstate)msp;
5502 msegmentptr sp = &ms->seg;
5503 (void)DESTROY_LOCK(&ms->mutex);
5505 char* base = sp->base;
5506 size_t size = sp->size;
5507 flag_t flag = sp->sflags;
5510 if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
5511 CALL_MUNMAP(base, size) == 0)
5516 USAGE_ERROR_ACTION(ms,ms);
5526 void* mspace_malloc(mspace msp,
size_t bytes) {
5527 mstate ms = (mstate)msp;
5528 if (!ok_magic(ms)) {
5529 USAGE_ERROR_ACTION(ms,ms);
5532 if (!PREACTION(ms)) {
5535 if (bytes <= MAX_SMALL_REQUEST) {
5538 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
5539 idx = small_index(nb);
5540 smallbits = ms->smallmap >> idx;
5542 if ((smallbits & 0x3U) != 0) {
5544 idx += ~smallbits & 1;
5545 b = smallbin_at(ms, idx);
5547 assert(chunksize(p) == small_index2size(idx));
5548 unlink_first_small_chunk(ms, b, p, idx);
5549 set_inuse_and_pinuse(ms, p, small_index2size(idx));
5551 check_malloced_chunk(ms, mem, nb);
5555 else if (nb > ms->dvsize) {
5556 if (smallbits != 0) {
5560 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
5561 binmap_t leastbit = least_bit(leftbits);
5562 compute_bit2idx(leastbit, i);
5563 b = smallbin_at(ms, i);
5565 assert(chunksize(p) == small_index2size(i));
5566 unlink_first_small_chunk(ms, b, p, i);
5567 rsize = small_index2size(i) - nb;
5569 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
5570 set_inuse_and_pinuse(ms, p, small_index2size(i));
5572 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5573 r = chunk_plus_offset(p, nb);
5574 set_size_and_pinuse_of_free_chunk(r, rsize);
5575 replace_dv(ms, r, rsize);
5578 check_malloced_chunk(ms, mem, nb);
5582 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
5583 check_malloced_chunk(ms, mem, nb);
5588 else if (bytes >= MAX_REQUEST)
5591 nb = pad_request(bytes);
5592 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
5593 check_malloced_chunk(ms, mem, nb);
5598 if (nb <= ms->dvsize) {
5599 size_t rsize = ms->dvsize - nb;
5600 mchunkptr p = ms->dv;
5601 if (rsize >= MIN_CHUNK_SIZE) {
5602 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
5604 set_size_and_pinuse_of_free_chunk(r, rsize);
5605 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5608 size_t dvs = ms->dvsize;
5611 set_inuse_and_pinuse(ms, p, dvs);
5614 check_malloced_chunk(ms, mem, nb);
5618 else if (nb < ms->topsize) {
5619 size_t rsize = ms->topsize -= nb;
5620 mchunkptr p = ms->top;
5621 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
5622 r->head = rsize | PINUSE_BIT;
5623 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5625 check_top_chunk(ms, ms->top);
5626 check_malloced_chunk(ms, mem, nb);
5630 mem = sys_alloc(ms, nb);
5640 void mspace_free(mspace msp,
void*
mem) {
5642 mchunkptr p = mem2chunk(mem);
5644 mstate fm = get_mstate_for(p);
5647 mstate fm = (mstate)msp;
5649 if (!ok_magic(fm)) {
5650 USAGE_ERROR_ACTION(fm, p);
5653 if (!PREACTION(fm)) {
5654 check_inuse_chunk(fm, p);
5655 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
5656 size_t psize = chunksize(p);
5657 mchunkptr next = chunk_plus_offset(p, psize);
5659 size_t prevsize = p->prev_foot;
5660 if (is_mmapped(p)) {
5661 psize += prevsize + MMAP_FOOT_PAD;
5662 if (CALL_MUNMAP((
char*)p - prevsize, psize) == 0)
5663 fm->footprint -= psize;
5667 mchunkptr prev = chunk_minus_offset(p, prevsize);
5670 if (RTCHECK(ok_address(fm, prev))) {
5672 unlink_chunk(fm, p, prevsize);
5674 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
5676 set_free_with_pinuse(p, psize, next);
5685 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
5686 if (!cinuse(next)) {
5687 if (next == fm->top) {
5688 size_t tsize = fm->topsize += psize;
5690 p->head = tsize | PINUSE_BIT;
5695 if (should_trim(fm, tsize))
5699 else if (next == fm->dv) {
5700 size_t dsize = fm->dvsize += psize;
5702 set_size_and_pinuse_of_free_chunk(p, dsize);
5706 size_t nsize = chunksize(next);
5708 unlink_chunk(fm, next, nsize);
5709 set_size_and_pinuse_of_free_chunk(p, psize);
5717 set_free_with_pinuse(p, psize, next);
5719 if (is_small(psize)) {
5720 insert_small_chunk(fm, p, psize);
5721 check_free_chunk(fm, p);
5724 tchunkptr tp = (tchunkptr)p;
5725 insert_large_chunk(fm, tp, psize);
5726 check_free_chunk(fm, p);
5727 if (--fm->release_checks == 0)
5728 release_unused_segments(fm);
5734 USAGE_ERROR_ACTION(fm, p);
5741 void* mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size) {
5744 mstate ms = (mstate)msp;
5745 if (!ok_magic(ms)) {
5746 USAGE_ERROR_ACTION(ms,ms);
5749 if (n_elements != 0) {
5750 req = n_elements * elem_size;
5751 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
5752 (req / n_elements != elem_size))
5755 mem = internal_malloc(ms, req);
5756 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
5757 memset(mem, 0, req);
5761 void* mspace_realloc(mspace msp,
void* oldmem,
size_t bytes) {
5764 mem = mspace_malloc(msp, bytes);
5766 else if (bytes >= MAX_REQUEST) {
5767 MALLOC_FAILURE_ACTION;
5769 #ifdef REALLOC_ZERO_BYTES_FREES 5770 else if (bytes == 0) {
5771 mspace_free(msp, oldmem);
5775 size_t nb = request2size(bytes);
5776 mchunkptr oldp = mem2chunk(oldmem);
5778 mstate m = (mstate)msp;
5780 mstate m = get_mstate_for(oldp);
5782 USAGE_ERROR_ACTION(m, oldmem);
5786 if (!PREACTION(m)) {
5787 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
5790 check_inuse_chunk(m, newp);
5791 mem = chunk2mem(newp);
5794 mem = mspace_malloc(m, bytes);
5796 size_t oc = chunksize(oldp) - overhead_for(oldp);
5797 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
5798 mspace_free(m, oldmem);
5806 void* mspace_realloc_in_place(mspace msp,
void* oldmem,
size_t bytes) {
5809 if (bytes >= MAX_REQUEST) {
5810 MALLOC_FAILURE_ACTION;
5813 size_t nb = request2size(bytes);
5814 mchunkptr oldp = mem2chunk(oldmem);
5816 mstate m = (mstate)msp;
5818 mstate m = get_mstate_for(oldp);
5821 USAGE_ERROR_ACTION(m, oldmem);
5825 if (!PREACTION(m)) {
5826 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
5829 check_inuse_chunk(m, newp);
5838 void* mspace_memalign(mspace msp,
size_t alignment,
size_t bytes) {
5839 mstate ms = (mstate)msp;
5840 if (!ok_magic(ms)) {
5841 USAGE_ERROR_ACTION(ms,ms);
5844 if (alignment <= MALLOC_ALIGNMENT)
5845 return mspace_malloc(msp, bytes);
5846 return internal_memalign(ms, alignment, bytes);
5849 void** mspace_independent_calloc(mspace msp,
size_t n_elements,
5850 size_t elem_size,
void* chunks[]) {
5851 size_t sz = elem_size;
5852 mstate ms = (mstate)msp;
5853 if (!ok_magic(ms)) {
5854 USAGE_ERROR_ACTION(ms,ms);
5857 return ialloc(ms, n_elements, &sz, 3, chunks);
5860 void** mspace_independent_comalloc(mspace msp,
size_t n_elements,
5861 size_t sizes[],
void* chunks[]) {
5862 mstate ms = (mstate)msp;
5863 if (!ok_magic(ms)) {
5864 USAGE_ERROR_ACTION(ms,ms);
5867 return ialloc(ms, n_elements, sizes, 0, chunks);
5870 size_t mspace_bulk_free(mspace msp,
void* array[],
size_t nelem) {
5871 return internal_bulk_free((mstate)msp, array, nelem);
5874 #if MALLOC_INSPECT_ALL 5875 void mspace_inspect_all(mspace msp,
5876 void(*handler)(
void *start,
5879 void* callback_arg),
5881 mstate ms = (mstate)msp;
5883 if (!PREACTION(ms)) {
5884 internal_inspect_all(ms, handler, arg);
5889 USAGE_ERROR_ACTION(ms,ms);
5894 int mspace_trim(mspace msp,
size_t pad) {
5896 mstate ms = (mstate)msp;
5898 if (!PREACTION(ms)) {
5899 result = sys_trim(ms, pad);
5904 USAGE_ERROR_ACTION(ms,ms);
5909 #if !NO_MALLOC_STATS 5910 void mspace_malloc_stats(mspace msp) {
5911 mstate ms = (mstate)msp;
5913 internal_malloc_stats(ms);
5916 USAGE_ERROR_ACTION(ms,ms);
5921 size_t mspace_footprint(mspace msp) {
5923 mstate ms = (mstate)msp;
5925 result = ms->footprint;
5928 USAGE_ERROR_ACTION(ms,ms);
5933 size_t mspace_max_footprint(mspace msp) {
5935 mstate ms = (mstate)msp;
5937 result = ms->max_footprint;
5940 USAGE_ERROR_ACTION(ms,ms);
5945 size_t mspace_footprint_limit(mspace msp) {
5947 mstate ms = (mstate)msp;
5949 size_t maf = ms->footprint_limit;
5950 result = (maf == 0) ? MAX_SIZE_T : maf;
5953 USAGE_ERROR_ACTION(ms,ms);
5958 size_t mspace_set_footprint_limit(mspace msp,
size_t bytes) {
5960 mstate ms = (mstate)msp;
5963 result = granularity_align(1);
5964 if (bytes == MAX_SIZE_T)
5967 result = granularity_align(bytes);
5968 ms->footprint_limit = result;
5971 USAGE_ERROR_ACTION(ms,ms);
5977 struct mallinfo mspace_mallinfo(mspace msp) {
5978 mstate ms = (mstate)msp;
5979 if (!ok_magic(ms)) {
5980 USAGE_ERROR_ACTION(ms,ms);
5982 return internal_mallinfo(ms);
5986 size_t mspace_usable_size(
const void*
mem) {
5988 mchunkptr p = mem2chunk(mem);
5990 return chunksize(p) - overhead_for(p);
5995 int mspace_mallopt(
int param_number,
int value) {
5996 return change_mparam(param_number, value);
EXPORTED_PUBLIC void * page_align(void *p) PURE