Lines Matching +full:minsize +full:- +full:linux
8 /* ---------- To make a malloc.h, start cutting here ------------ */
28 This is not the fastest, most space-conserving, most portable, or
30 while also being among the most space-conserving, portable and tunable.
31 Consistent balance across these factors results in a good general-purpose
32 allocator. For a high-level description, see
50 size argument of zero (re)allocates a minimum-sized chunk.
60 Equivalent to valloc(minimum-page-that-holds(n)), that is,
68 Release all but pad bytes of freed top-most memory back
84 Alignment: 8-byte
89 Code for 8-byte pointers is untested by me but has worked
100 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
101 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
112 Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes
113 8-byte size_t: 2^63 - 16 bytes
121 minimum-sized chunk.
126 make the normal worst-case wastage 15 bytes (i.e., up to 15
129 1. Because requests for zero bytes allocate non-zero space,
139 * No user-definable hooks for callbacks and the like.
144 * Synopsis of compile-time options:
149 Linux. It is also reported to work on WIN32 platforms.
151 stand-alone embedded systems.
153 The implementation is in straight, hand-tuned ANSI C. Among other
156 (for example gcc -O2) that can simplify expressions and control
160 Nonzero if using ANSI-standard C compiler, a C++ compiler, or
163 Define to enable debugging. Adds fairly extensive assertion-based
194 Define to non-zero to optionally make malloc() use mmap() to
196 HAVE_MREMAP (default: defined as 0 unless Linux libc set)
197 Define to non-zero to optionally make realloc() use mremap() to
206 Define to a 32-bit type (probably `unsigned int') if you are on a
207 64-bit machine, yet do not want or need to allow malloc requests of
211 Defined only when compiled as part of Linux libc.
212 Also note that there is some odd internal name-mangling via defines
225 MORECORE_FAILURE (default: -1)
249 #pragma GCC diagnostic ignored "-Wpragmas"
250 #pragma GCC diagnostic ignored "-Wunknown-warning-option"
251 #pragma GCC diagnostic ignored "-Wanalyzer-malloc-leak"
252 #pragma GCC diagnostic ignored "-Wanalyzer-use-of-uninitialized-value"
253 #pragma GCC diagnostic ignored "-Wanalyzer-out-of-bounds"
254 #pragma GCC diagnostic ignored "-Warray-bounds"
255 #pragma GCC diagnostic ignored "-Wanalyzer-null-dereference"
270 Compile-time options
295 If you compile with -DDEBUG, a number of assertion checks are
301 attempt to check every non-mmapped allocated and free chunk in the
344 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
345 of chunk sizes. On a 64-bit machine, you can reduce malloc
367 (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF
368 which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF. This is due to the
374 ? -(long) (y - x) \
375 : (long) (x - y))
401 /* The following macros are only invoked with (2n+1)-multiples of
448 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
457 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
466 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
475 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
493 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
494 large blocks. This is currently only possible on Linux with
519 manages memory from the system in page-size units.
578 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
589 #define M_TRIM_THRESHOLD -1
590 #define M_TOP_PAD -2
591 #define M_MMAP_THRESHOLD -3
592 #define M_MMAP_MAX -4
601 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
604 Automatic trimming is mainly useful in long-lived programs.
615 system-level demands of a long-lived program down to a bare
617 the XF86 X server on Linux, using a trim threshold of 128K and a
618 mmap threshold of 192K led to near-minimal long term resource
621 If you are using this malloc in a long-lived program, it should
630 chunks at all. And in well-behaved long-lived programs,
635 protection against the system-level effects of carrying around
644 disable trimming completely, you can set to (unsigned long)(-1);
670 that nearly every malloc request during program start-up (or
674 Automatic rounding-up to page-size units is normally sufficient
691 be allocated using already-existing space will be serviced via mmap.
702 helps keep the system level memory demands of a long-lived
744 better off using normal sbrk-based allocation routines that
750 the default value is 0, and attempts to set it to non-zero values
759 Special defines for linux libc
761 Except when compiled using these special defines for Linux libc
767 essentially the effect of the linux weak alias approach). It would
778 #define MORECORE_FAILURE -1
839 /* ---------- To make a malloc.h, end cutting here ------------ */
860 struct malloc_chunk* fd; /* double links -- used only if free. */
884 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
886 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
888 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
893 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
895 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
904 thus double-word aligned.
906 Free chunks are stored in circular doubly-linked lists, and look like this:
908 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
910 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
912 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
914 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
916 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
920 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
922 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
924 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
925 chunk size (which is always a multiple of two words), is an in-use
930 preventing access to non-existent (or non-owned) memory.)
942 become less than MINSIZE bytes long, it is replenished via
945 2. Chunks allocated via mmap, which have the second-lowest-order
963 the same-sized chunks, but facilitates best-fit allocation for
973 * `top': The top-most available chunk (i.e., the one bordering the
980 most recently split (non-top) chunk. This bin is checked
981 before other non-fitting chunks, so as to provide better
1004 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
1005 #define MINSIZE (sizeof(struct malloc_chunk)) macro
1010 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1016 …(unsigned long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MA…
1046 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
1051 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1068 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
1072 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1076 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1081 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
1084 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
1089 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1092 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1095 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1106 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1110 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
1114 #define set_head(p, s) ((p)->size = (s))
1118 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1128 heads of (initially empty) doubly-linked lists of chunks, laid out
1168 #define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
1170 #define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
1178 #define top (bin_at(0)->fd) /* The topmost chunk */
1221 /* field-extraction macros */
1223 #define first(b) ((b)->fd)
1224 #define last(b) ((b)->bk)
1246 #define MAX_SMALLBIN (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1
1254 #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
1259 To help compensate for the large number of bins, a one-level index
1260 structure is used for bin-by-bin searching. `binblocks' is a
1261 one-word bitvector recording whether groups of BINBLOCKWIDTH bins
1262 have any (possibly) non-empty bins, so they can be skipped over
1270 #define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks */
1272 /* bin<->block macros */
1311 STATIC char* sbrk_base = (char*)(-1);
1376 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; in do_check_chunk()
1393 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; in do_check_free_chunk()
1402 if ((long)sz >= (long)MINSIZE) in do_check_free_chunk()
1407 assert(next->prev_size == sz); in do_check_free_chunk()
1413 assert(p->fd->bk == p); in do_check_free_chunk()
1414 assert(p->bk->fd == p); in do_check_free_chunk()
1441 assert(chunksize(next) >= MINSIZE); in do_check_inuse_chunk()
1450 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; in do_check_malloced_chunk()
1456 assert((long)sz >= (long)MINSIZE); in do_check_malloced_chunk()
1459 assert(room < (long)MINSIZE); in do_check_malloced_chunk()
1485 Macro-based internal utilities
1507 FD = BK->fd; \
1508 P->bk = BK; \
1509 P->fd = FD; \
1510 FD->bk = BK->fd = P; \
1516 FD = BK->fd; \
1520 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
1521 BK = FD->bk; \
1523 P->bk = BK; \
1524 P->fd = FD; \
1525 FD->bk = BK->fd = P; \
1534 BK = P->bk; \
1535 FD = P->fd; \
1536 FD->bk = BK; \
1537 BK->fd = FD; \
1544 last_remainder->fd = last_remainder->bk = P; \
1545 P->fd = P->bk = last_remainder; \
1551 (last_remainder->fd = last_remainder->bk = last_remainder)
1566 size_t page_mask = malloc_getpagesize - 1; in mmap_chunk()
1570 static int fd = -1; in mmap_chunk()
1582 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); in mmap_chunk()
1592 if(p == (mchunkptr)-1) return 0; in mmap_chunk()
1597 /* We demand that eight bytes into a page must be 8-byte aligned. */ in mmap_chunk()
1604 p->prev_size = 0; in mmap_chunk()
1631 assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0); in munmap_chunk()
1633 n_mmaps--; in munmap_chunk()
1634 mmapped_mem -= (size + p->prev_size); in munmap_chunk()
1636 ret = munmap((char *)p - p->prev_size, size + p->prev_size); in munmap_chunk()
1638 /* munmap returns non-zero on failure */ in munmap_chunk()
1654 size_t page_mask = malloc_getpagesize - 1; in mremap_chunk()
1655 INTERNAL_SIZE_T offset = p->prev_size; in mremap_chunk()
1662 assert(((size + offset) & (malloc_getpagesize-1)) == 0); in mremap_chunk()
1667 cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1); in mremap_chunk()
1669 if (cp == (char *)-1) return 0; in mremap_chunk()
1675 assert((p->prev_size == offset)); in mremap_chunk()
1676 set_head(p, (new_size - offset)|IS_MMAPPED); in mremap_chunk()
1678 mmapped_mem -= size + offset; in mremap_chunk()
1699 Extend the top-most chunk by obtaining memory from system.
1718 INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE; in malloc_extend_top()
1725 if (sbrk_base != (char*)(-1)) in malloc_extend_top()
1726 sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1); in malloc_extend_top()
1739 && ((uintptr_t)old_end & (pagesz - 1)) == 0) in malloc_extend_top()
1746 if (sbrk_base == (char*)(-1)) /* First time through. Record base */ in malloc_extend_top()
1749 sbrked_mem += brk - (char*)old_end; in malloc_extend_top()
1755 correction = (MALLOC_ALIGNMENT) - front_misalign; in malloc_extend_top()
1762 correction += pagesz - ((uintptr_t)(brk + sbrk_size) & (pagesz - 1)); in malloc_extend_top()
1765 correction &= (pagesz - 1); in malloc_extend_top()
1775 new_brk -= (MALLOC_ALIGNMENT) - front_misalign; in malloc_extend_top()
1781 top_size = new_brk - brk + correction; in malloc_extend_top()
1791 if (old_top_size < MINSIZE) in malloc_extend_top()
1798 old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; in malloc_extend_top()
1800 chunk_at_offset(old_top, old_top_size )->size = in malloc_extend_top()
1802 chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size = in malloc_extend_top()
1805 if (old_top_size >= MINSIZE) in malloc_extend_top()
1821 assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0 in malloc_extend_top()
1838 obtain 8-byte alignment and/or to obtain a size of at least
1839 MINSIZE (currently 16 bytes), the smallest allocatable size.
1840 (All fits are considered `exact' if they are within MINSIZE bytes.)
1851 whenever possible. This limited use of a first-fit style
1858 any remainder. This search is strictly by best-fit; i.e.,
1864 the best-fit search rule. In effect, `top' is treated as
1877 Memory is gathered from the system (in system page-sized
1887 chunk borders either a previously allocated and still in-use chunk,
1936 /* Also scan the next one, since it would have a remainder < MINSIZE */ in mALLOc()
1961 for (victim = last(bin); victim != bin; victim = victim->bk) in mALLOc()
1966 if (remainder_size >= (long)MINSIZE) /* too big */ in mALLOc()
1968 --idx; /* adjust to rescan below after checking last remainder */ in mALLOc()
1986 /* Try to use the last split-off remainder */ in mALLOc()
1988 if ( (victim = last_remainder->fd) != last_remainder) in mALLOc()
1993 if (remainder_size >= (long)MINSIZE) /* re-split */ in mALLOc()
2021 If there are any possibly nonempty big-enough blocks, in mALLOc()
2033 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; in mALLOc()
2053 for (victim = last(bin); victim != bin; victim = victim->bk) in mALLOc()
2058 if (remainder_size >= (long)MINSIZE) /* split */ in mALLOc()
2091 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0); in mALLOc()
2097 if ((startidx & (BINBLOCKWIDTH - 1)) == 0) in mALLOc()
2102 --startidx; in mALLOc()
2126 if (chunksize(top) < nb || remainder_size < (long)MINSIZE) in mALLOc()
2142 if (chunksize(top) < nb || remainder_size < (long)MINSIZE) in mALLOc()
2211 hd = p->size; in fREe()
2234 prevsz = p->prev_size; in fREe()
2235 p = chunk_at_offset(p, -prevsz); in fREe()
2254 prevsz = p->prev_size; in fREe()
2255 p = chunk_at_offset(p, -prevsz); in fREe()
2258 if (p->fd == last_remainder) /* keep as last_remainder */ in fREe()
2268 if (!islr && next->fd == last_remainder) /* re-insert last_remainder */ in fREe()
2290 #pragma GCC diagnostic ignored "-Wmissing-attributes"
2309 chunk can be extended, it is, else a malloc-copy-free sequence is
2319 size argument of zero (re)allocates a minimum-sized chunk.
2325 The old unix realloc convention of allowing the last-free'd chunk
2399 if(oldsize - SIZE_SZ >= nb) in rEALLOc()
2411 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); in rEALLOc()
2433 if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE)) in rEALLOc()
2437 set_head(top, (newsize - nb) | PREV_INUSE); in rEALLOc()
2472 if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE)) in rEALLOc()
2478 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); in rEALLOc()
2480 set_head(top, (newsize - nb) | PREV_INUSE); in rEALLOc()
2495 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); in rEALLOc()
2507 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); in rEALLOc()
2533 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); in rEALLOc()
2544 if (remainder_size >= (long)MINSIZE) /* split off remainder */ in rEALLOc()
2580 8-byte alignment is guaranteed by normal malloc calls, so don't
2606 if (alignment < MINSIZE) alignment = MINSIZE; in mEMALIGn()
2613 if (nb > __SIZE_MAX__ - (alignment + MINSIZE) || nb < bytes) in mEMALIGn()
2619 m = (char*)(mALLOc(nb + alignment + MINSIZE)); in mEMALIGn()
2642 least MINSIZE, if the first calculation places us at in mEMALIGn()
2643 a spot with less than MINSIZE leader, we can move to the in mEMALIGn()
2644 next aligned spot -- we've allocated enough total room so that in mEMALIGn()
2648 brk = (char*)mem2chunk(((uintptr_t)(m + alignment - 1)) & -alignment); in mEMALIGn()
2649 if ((long)(brk - (char*)(p)) < (long)MINSIZE) brk = brk + alignment; in mEMALIGn()
2652 leadsize = brk - (char*)(p); in mEMALIGn()
2653 newsize = chunksize(p) - leadsize; in mEMALIGn()
2658 newp->prev_size = p->prev_size + leadsize; in mEMALIGn()
2680 if (remainder_size >= (long)MINSIZE) in mEMALIGn()
2725 if (bytes > __SIZE_MAX__ - pagesize) in pvALLOc()
2730 return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1)); in pvALLOc()
2800 /* clear only the bytes from non-freshly-sbrked memory */ in cALLOc()
2806 MALLOC_ZERO(mem, csz - SIZE_SZ); in cALLOc()
2840 memory to potentially reduce the system-level memory requirements
2849 structures will be left (one page or less). Non-zero arguments
2851 future expected allocations without having to re-obtain memory
2860 long top_size; /* Amount of top-most memory */ in malloc_trim()
2862 char* current_brk; /* address returned by pre-check sbrk call */ in malloc_trim()
2870 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; in malloc_trim()
2890 new_brk = (char*)(MORECORE (-extra)); in malloc_trim()
2896 top_size = current_brk - (char*)top; in malloc_trim()
2897 if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */ in malloc_trim()
2899 sbrked_mem = current_brk - sbrk_base; in malloc_trim()
2910 set_head(top, (top_size - extra) | PREV_INUSE); in malloc_trim()
2911 sbrked_mem -= extra; in malloc_trim()
2951 return chunksize(p) - SIZE_SZ; in malloc_usable_size()
2953 return chunksize(p) - 2*SIZE_SZ; in malloc_usable_size()
2976 int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0; in malloc_update_mallinfo()
2981 for (p = last(b); p != b; p = p->bk) in malloc_update_mallinfo()
2986 q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE; in malloc_update_mallinfo()
2996 current_mallinfo.uordblks = sbrked_mem - avail; in malloc_update_mallinfo()
3095 The format is to provide a (parameter-number, parameter-value) pair.
3138 if ((align & (align-1)) != 0 || align % sizeof(void *) != 0 || align == 0) in pOSIx_mEMALIGn()
3156 * Fixed ordering problem with boundary-stamping
3165 * Add linux mremap support code from HJ Liu
3170 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
3173 * Use ordered bins instead of best-fit threshhold
3174 * Eliminate block-local decls to simplify tracing and debugging.
3176 * Fix error occuring when initial sbrk_base not word-aligned.
3183 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
3184 * Added macros etc., allowing use in linux libc from
3189 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
3196 * Use best fit for very large chunks to prevent some worst-cases.
3205 (wmglo@Dent.MED.Uni-Muenchen.DE).
3211 * malloc: swap order of clean-bin strategy;
3224 * Add stuff to allow compilation on non-ANSI compilers
3232 * tested on sparc, hp-700, dec-mips, rs6000
3237 * Based loosely on libg++-1.2X malloc. (It retains some of the overall