1 /*
2 Copyright (c) 1990 Regents of the University of California.
3 All rights reserved.
4  */
5 #ifdef MALLOC_PROVIDED
6 int _dummy_mallocr = 1;
7 #else
8 /* ---------- To make a malloc.h, start cutting here ------------ */
9 
10 /*
11   A version of malloc/free/realloc written by Doug Lea and released to the
12   public domain.  Send questions/comments/complaints/performance data
13   to dl@cs.oswego.edu
14 
15 * VERSION 2.6.5  Wed Jun 17 15:55:16 1998  Doug Lea  (dl at gee)
16 
17    Note: There may be an updated version of this malloc obtainable at
18            ftp://g.oswego.edu/pub/misc/malloc.c
19          Check before installing!
20 
21    Note: This version differs from 2.6.4 only by correcting a
22          statement ordering error that could cause failures only
23          when calls to this malloc are interposed with calls to
24          other memory allocators.
25 
26 * Why use this malloc?
27 
28   This is not the fastest, most space-conserving, most portable, or
29   most tunable malloc ever written. However it is among the fastest
30   while also being among the most space-conserving, portable and tunable.
31   Consistent balance across these factors results in a good general-purpose
32   allocator. For a high-level description, see
33      http://g.oswego.edu/dl/html/malloc.html
34 
35 * Synopsis of public routines
36 
37   (Much fuller descriptions are contained in the program documentation below.)
38 
39   malloc(size_t n);
40      Return a pointer to a newly allocated chunk of at least n bytes, or null
41      if no space is available.
42   free(Void_t* p);
43      Release the chunk of memory pointed to by p, or no effect if p is null.
44   realloc(Void_t* p, size_t n);
45      Return a pointer to a chunk of size n that contains the same data
46      as does chunk p up to the minimum of (n, p's size) bytes, or null
47      if no space is available. The returned pointer may or may not be
48      the same as p. If p is null, equivalent to malloc.  Unless the
49      #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
50      size argument of zero (re)allocates a minimum-sized chunk.
51   memalign(size_t alignment, size_t n);
52      Return a pointer to a newly allocated chunk of n bytes, aligned
53      in accord with the alignment argument, which must be a power of
54      two.
55   valloc(size_t n);
56      Equivalent to memalign(pagesize, n), where pagesize is the page
57      size of the system (or as near to this as can be figured out from
58      all the includes/defines below.)
59   pvalloc(size_t n);
60      Equivalent to valloc(minimum-page-that-holds(n)), that is,
61      round up n to nearest pagesize.
62   calloc(size_t unit, size_t quantity);
63      Returns a pointer to quantity * unit bytes, with all locations
64      set to zero.
65   cfree(Void_t* p);
66      Equivalent to free(p).
67   malloc_trim(size_t pad);
68      Release all but pad bytes of freed top-most memory back
69      to the system. Return 1 if successful, else 0.
70   malloc_usable_size(Void_t* p);
71      Report the number usable allocated bytes associated with allocated
72      chunk p. This may or may not report more bytes than were requested,
73      due to alignment and minimum size constraints.
74   malloc_stats();
75      Prints brief summary statistics on stderr.
76   mallinfo()
77      Returns (by copy) a struct containing various summary statistics.
78   mallopt(int parameter_number, int parameter_value)
79      Changes one of the tunable parameters described below. Returns
80      1 if successful in changing the parameter, else 0.
81 
82 * Vital statistics:
83 
84   Alignment:                            8-byte
85        8 byte alignment is currently hardwired into the design.  This
86        seems to suffice for all current machines and C compilers.
87 
88   Assumed pointer representation:       4 or 8 bytes
89        Code for 8-byte pointers is untested by me but has worked
90        reliably by Wolfram Gloger, who contributed most of the
91        changes supporting this.
92 
93   Assumed size_t  representation:       4 or 8 bytes
94        Note that size_t is allowed to be 4 bytes even if pointers are 8.
95 
96   Minimum overhead per allocated chunk: 4 or 8 bytes
97        Each malloced chunk has a hidden overhead of 4 bytes holding size
98        and status information.
99 
100   Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
101                           8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
102 
103        When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
104        ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
105        needed; 4 (8) for a trailing size field
106        and 8 (16) bytes for free list pointers. Thus, the minimum
107        allocatable size is 16/24/32 bytes.
108 
109        Even a request for zero bytes (i.e., malloc(0)) returns a
110        pointer to something of the minimum allocatable size.
111 
112   Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
113                           8-byte size_t: 2^63 - 16 bytes
114 
115        It is assumed that (possibly signed) size_t bit values suffice to
116        represent chunk sizes. `Possibly signed' is due to the fact
117        that `size_t' may be defined on a system as either a signed or
118        an unsigned type. To be conservative, values that would appear
119        as negative numbers are avoided.
120        Requests for sizes with a negative sign bit will return a
121        minimum-sized chunk.
122 
123   Maximum overhead wastage per allocated chunk: normally 15 bytes
124 
125        Alignnment demands, plus the minimum allocatable size restriction
126        make the normal worst-case wastage 15 bytes (i.e., up to 15
127        more bytes will be allocated than were requested in malloc), with
128        two exceptions:
129          1. Because requests for zero bytes allocate non-zero space,
130             the worst case wastage for a request of zero bytes is 24 bytes.
131          2. For requests >= mmap_threshold that are serviced via
132             mmap(), the worst case wastage is 8 bytes plus the remainder
133             from a system page (the minimal mmap unit); typically 4096 bytes.
134 
135 * Limitations
136 
137     Here are some features that are NOT currently supported
138 
139     * No user-definable hooks for callbacks and the like.
140     * No automated mechanism for fully checking that all accesses
141       to malloced memory stay within their bounds.
142     * No support for compaction.
143 
144 * Synopsis of compile-time options:
145 
146     People have reported using previous versions of this malloc on all
147     versions of Unix, sometimes by tweaking some of the defines
148     below. It has been tested most extensively on Solaris and
149     Linux. It is also reported to work on WIN32 platforms.
150     People have also reported adapting this malloc for use in
151     stand-alone embedded systems.
152 
153     The implementation is in straight, hand-tuned ANSI C.  Among other
154     consequences, it uses a lot of macros.  Because of this, to be at
155     all usable, this code should be compiled using an optimizing compiler
156     (for example gcc -O2) that can simplify expressions and control
157     paths.
158 
159   __STD_C                  (default: derived from C compiler defines)
160      Nonzero if using ANSI-standard C compiler, a C++ compiler, or
161      a C compiler sufficiently close to ANSI to get away with it.
162   DEBUG                    (default: NOT defined)
163      Define to enable debugging. Adds fairly extensive assertion-based
164      checking to help track down memory errors, but noticeably slows down
165      execution.
166   SEPARATE_OBJECTS	   (default: NOT defined)
167      Define this to compile into separate .o files.  You must then
168      compile malloc.c several times, defining a DEFINE_* macro each
169      time.  The list of DEFINE_* macros appears below.
170   MALLOC_LOCK		   (default: NOT defined)
171   MALLOC_UNLOCK		   (default: NOT defined)
172      Define these to C expressions which are run to lock and unlock
173      the malloc data structures.  Calls may be nested; that is,
174      MALLOC_LOCK may be called more than once before the corresponding
175      MALLOC_UNLOCK calls.  MALLOC_LOCK must avoid waiting for a lock
176      that it already holds.
177   MALLOC_ALIGNMENT          (default: NOT defined)
178      Define this to 16 if you need 16 byte alignment instead of 8 byte alignment
179      which is the normal default.
180   REALLOC_ZERO_BYTES_FREES (default: NOT defined)
181      Define this if you think that realloc(p, 0) should be equivalent
182      to free(p). Otherwise, since malloc returns a unique pointer for
183      malloc(0), so does realloc(p, 0).
184   HAVE_MEMCPY               (default: defined)
185      Define if you are not otherwise using ANSI STD C, but still
186      have memcpy and memset in your C library and want to use them.
187      Otherwise, simple internal versions are supplied.
188   USE_MEMCPY               (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
189      Define as 1 if you want the C library versions of memset and
190      memcpy called in realloc and calloc (otherwise macro versions are used).
191      At least on some platforms, the simple macro versions usually
192      outperform libc versions.
193   HAVE_MMAP                 (default: defined as 1)
194      Define to non-zero to optionally make malloc() use mmap() to
195      allocate very large blocks.
196   HAVE_MREMAP                 (default: defined as 0 unless Linux libc set)
197      Define to non-zero to optionally make realloc() use mremap() to
198      reallocate very large blocks.
199   malloc_getpagesize        (default: derived from system #includes)
200      Either a constant or routine call returning the system page size.
201   HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
202      Optionally define if you are on a system with a /usr/include/malloc.h
203      that declares struct mallinfo. It is not at all necessary to
204      define this even if you do, but will ensure consistency.
205   INTERNAL_SIZE_T           (default: size_t)
206      Define to a 32-bit type (probably `unsigned int') if you are on a
207      64-bit machine, yet do not want or need to allow malloc requests of
208      greater than 2^31 to be handled. This saves space, especially for
209      very small chunks.
210   INTERNAL_LINUX_C_LIB      (default: NOT defined)
211      Defined only when compiled as part of Linux libc.
212      Also note that there is some odd internal name-mangling via defines
213      (for example, internally, `malloc' is named `mALLOc') needed
214      when compiling in this case. These look funny but don't otherwise
215      affect anything.
216   _LIBC	                    (default: NOT defined)
217      Defined only when compiled as part of the Cygnus newlib
218      distribution.
219   WIN32                     (default: undefined)
220      Define this on MS win (95, nt) platforms to compile in sbrk emulation.
221   LACKS_UNISTD_H            (default: undefined)
222      Define this if your system does not have a <unistd.h>.
223   MORECORE                  (default: sbrk)
224      The name of the routine to call to obtain more memory from the system.
225   MORECORE_FAILURE          (default: -1)
226      The value returned upon failure of MORECORE.
227   MORECORE_CLEARS           (default 1)
228      True (1) if the routine mapped to MORECORE zeroes out memory (which
229      holds for sbrk).
230   DEFAULT_TRIM_THRESHOLD
231   DEFAULT_TOP_PAD
232   DEFAULT_MMAP_THRESHOLD
233   DEFAULT_MMAP_MAX
234      Default values of tunable parameters (described in detail below)
235      controlling interaction with host system routines (sbrk, mmap, etc).
236      These values may also be changed dynamically via mallopt(). The
237      preset defaults are those that give best performance for typical
238      programs/systems.
239 
240 
241 */
242 
243 
244 
245 
246 /* Preliminaries */
247 
248 #ifdef __GNUC__
249 #pragma GCC diagnostic ignored "-Wpragmas"
250 #pragma GCC diagnostic ignored "-Wunknown-warning-option"
251 #pragma GCC diagnostic ignored "-Wanalyzer-malloc-leak"
252 #pragma GCC diagnostic ignored "-Wanalyzer-use-of-uninitialized-value"
253 #pragma GCC diagnostic ignored "-Wanalyzer-out-of-bounds"
254 #pragma GCC diagnostic ignored "-Warray-bounds"
255 #pragma GCC diagnostic ignored "-Wanalyzer-null-dereference"
256 #endif
257 
258 #define _DEFAULT_SOURCE
259 
260 #include <stddef.h>   /* for size_t */
261 #include <stdio.h>    /* needed for malloc_stats */
262 #include <limits.h>   /* needed for overflow checks */
263 #include <errno.h>    /* needed to set errno to ENOMEM */
264 #include <string.h>   /* for memmove */
265 #include <stdint.h>   /* for uintptr_t */
266 #include <unistd.h>   /* for sbrk */
267 #include <sys/lock.h>
268 
269 /*
270   Compile-time options
271 */
272 
273 #define SEPARATE_OBJECTS
274 #define HAVE_MMAP 0
275 #define MORECORE(size) sbrk((size))
276 #define MORECORE_CLEARS 0
277 #define MALLOC_LOCK __LIBC_LOCK()
278 #define MALLOC_UNLOCK __LIBC_UNLOCK()
279 
280 
281 #ifdef SMALL_MEMORY
282 #define malloc_getpagesize (128)
283 #else
284 #define malloc_getpagesize (4096)
285 #endif
286 
287 /*
288     Debugging:
289 
290     Because freed chunks may be overwritten with link fields, this
291     malloc will often die when freed memory is overwritten by user
292     programs.  This can be very effective (albeit in an annoying way)
293     in helping track down dangling pointers.
294 
295     If you compile with -DDEBUG, a number of assertion checks are
296     enabled that will catch more memory errors. You probably won't be
297     able to make much sense of the actual assertion errors, but they
298     should help you locate incorrectly overwritten memory.  The
299     checking is fairly extensive, and will slow down execution
300     noticeably. Calling malloc_stats or mallinfo with DEBUG set will
301     attempt to check every non-mmapped allocated and free chunk in the
302     course of computing the summmaries. (By nature, mmapped regions
303     cannot be checked very much automatically.)
304 
305     Setting DEBUG may also be helpful if you are trying to modify
306     this code. The assertions in the check routines spell out in more
307     detail the assumptions and invariants underlying the algorithms.
308 
309 */
310 
311 #if DEBUG
312 #include <assert.h>
313 #else
314 #define assert(x) ((void)0)
315 #endif
316 
317 /*
318   SEPARATE_OBJECTS should be defined if you want each function to go
319   into a separate .o file.  You must then compile malloc.c once per
320   function, defining the appropriate DEFINE_ macro.  See below for the
321   list of macros.
322  */
323 
324 #ifndef SEPARATE_OBJECTS
325 #define DEFINE_MALLOC
326 #define DEFINE_FREE
327 #define DEFINE_REALLOC
328 #define DEFINE_CALLOC
329 #define DEFINE_CFREE
330 #define DEFINE_MEMALIGN
331 #define DEFINE_VALLOC
332 #define DEFINE_PVALLOC
333 #define DEFINE_MALLINFO
334 #define DEFINE_MALLOC_STATS
335 #define DEFINE_MALLOC_USABLE_SIZE
336 #define DEFINE_MALLOPT
337 
338 #define STATIC static
339 #else
340 #define STATIC
341 #endif
342 
343 /*
344   INTERNAL_SIZE_T is the word-size used for internal bookkeeping
345   of chunk sizes. On a 64-bit machine, you can reduce malloc
346   overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
347   at the expense of not being able to handle requests greater than
348   2^31. This limitation is hardly ever a concern; you are encouraged
349   to set this. However, the default version is the same as size_t.
350   Since the implementation relies on __builtin_mul_overflow, defining
351   a custom INTERNAL_SIZE_T on machines/compilers without
352   __builtin_mul_overflow is not permitted.
353 */
354 
355 #ifndef INTERNAL_SIZE_T
356 #define INTERNAL_SIZE_T size_t
357 #elif !defined(_HAVE_BUILTIN_MUL_OVERFLOW)
358 #error Compiler does not support __builtin_mul_overflow, hence INTERNAL_SIZE_T cannot be set
359 #endif
360 
361 /*
362   Following is needed on implementations whereby long > size_t.
363   The problem is caused because the code performs subtractions of
364   size_t values and stores the result in long values.  In the case
365   where long > size_t and the first value is actually less than
366   the second value, the resultant value is positive.  For example,
367   (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF
368   which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF.  This is due to the
369   fact that assignment from unsigned to signed won't sign extend.
370 */
371 
372 #define long_sub_size_t(x, y)				\
373   (sizeof (long) > sizeof (INTERNAL_SIZE_T) && x < y	\
374    ? -(long) (y - x)					\
375    : (long) (x - y))
376 
377 /*
378   REALLOC_ZERO_BYTES_FREES should be set if a call to
379   realloc with zero bytes should be the same as a call to free.
380   Some people think it should. Otherwise, since this malloc
381   returns a unique pointer for malloc(0), so does realloc(p, 0).
382 */
383 
384 
385 /*   #define REALLOC_ZERO_BYTES_FREES */
386 
387 
388 /*
389   USE_MEMMOVE should be defined as 1 if you actually want to
390   have memset and memmove called. People report that the macro
391   versions are often enough faster than libc versions on many
392   systems that it is better to use them.
393 */
394 
395 #ifndef USE_MEMMOVE
396 #define USE_MEMMOVE 1
397 #endif
398 
399 #if USE_MEMMOVE
400 
401 /* The following macros are only invoked with (2n+1)-multiples of
402    INTERNAL_SIZE_T units, with a positive integer n. This is exploited
403    for fast inline execution when n is small. */
404 
405 #define MALLOC_ZERO(charp, nbytes)                                            \
406 do {                                                                          \
407   INTERNAL_SIZE_T mzsz = (nbytes);                                            \
408   if(mzsz <= 9*sizeof(mzsz)) {                                                \
409     INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \
410     if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
411                                      *mz++ = 0;                               \
412       if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
413                                      *mz++ = 0;                               \
414         if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
415                                      *mz++ = 0; }}}                           \
416                                      *mz++ = 0;                               \
417                                      *mz++ = 0;                               \
418                                      *mz   = 0;                               \
419   } else memset((charp), 0, mzsz);                                            \
420 } while(0)
421 
422 #define MALLOC_COPY(dest,src,nbytes)                                          \
423 do {                                                                          \
424   INTERNAL_SIZE_T mcsz = (nbytes);                                            \
425   if(mcsz <= 9*sizeof(mcsz)) {                                                \
426     INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
427     INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
428     if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
429                                      *mcdst++ = *mcsrc++;                     \
430       if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
431                                      *mcdst++ = *mcsrc++;                     \
432         if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
433                                      *mcdst++ = *mcsrc++; }}}                 \
434                                      *mcdst++ = *mcsrc++;                     \
435                                      *mcdst++ = *mcsrc++;                     \
436                                      *mcdst   = *mcsrc  ;                     \
437   } else memmove(dest, src, mcsz);                                             \
438 } while(0)
439 
440 #else /* !USE_MEMMOVE */
441 
442 /* Use Duff's device for good zeroing/copying performance. */
443 
444 #define MALLOC_ZERO(charp, nbytes)                                            \
445 do {                                                                          \
446   INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
447   long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
448   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
449   switch (mctmp) {                                                            \
450     case 0: for(;;) { *mzp++ = 0;                                             \
451     case 7:           *mzp++ = 0;                                             \
452     case 6:           *mzp++ = 0;                                             \
453     case 5:           *mzp++ = 0;                                             \
454     case 4:           *mzp++ = 0;                                             \
455     case 3:           *mzp++ = 0;                                             \
456     case 2:           *mzp++ = 0;                                             \
457     case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
458   }                                                                           \
459 } while(0)
460 
461 #define MALLOC_COPY(dest,src,nbytes)                                          \
462 do {                                                                          \
463   INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
464   INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
465   long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
466   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
467   switch (mctmp) {                                                            \
468     case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
469     case 7:           *mcdst++ = *mcsrc++;                                    \
470     case 6:           *mcdst++ = *mcsrc++;                                    \
471     case 5:           *mcdst++ = *mcsrc++;                                    \
472     case 4:           *mcdst++ = *mcsrc++;                                    \
473     case 3:           *mcdst++ = *mcsrc++;                                    \
474     case 2:           *mcdst++ = *mcsrc++;                                    \
475     case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
476   }                                                                           \
477 } while(0)
478 
479 #endif
480 
481 
482 /*
483   Define HAVE_MMAP to optionally make malloc() use mmap() to
484   allocate very large blocks.  These will be returned to the
485   operating system immediately after a free().
486 */
487 
488 #ifndef HAVE_MMAP
489 #define HAVE_MMAP 1
490 #endif
491 
492 /*
493   Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
494   large blocks.  This is currently only possible on Linux with
495   kernel versions newer than 1.3.77.
496 */
497 
498 #ifndef HAVE_MREMAP
499 #ifdef INTERNAL_LINUX_C_LIB
500 #define HAVE_MREMAP 1
501 #else
502 #define HAVE_MREMAP 0
503 #endif
504 #endif
505 
506 #if HAVE_MMAP
507 
508 #include <fcntl.h>
509 #include <sys/mman.h>
510 
511 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
512 #define MAP_ANONYMOUS MAP_ANON
513 #endif
514 
515 #endif /* HAVE_MMAP */
516 
517 /*
518   Access to system page size. To the extent possible, this malloc
519   manages memory from the system in page-size units.
520 
521   The following mechanics for getpagesize were adapted from
522   bsd/gnu getpagesize.h
523 */
524 
525 #ifndef malloc_getpagesize
526 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
527 #    ifndef _SC_PAGE_SIZE
528 #      define _SC_PAGE_SIZE _SC_PAGESIZE
529 #    endif
530 #  endif
531 #  ifdef _SC_PAGE_SIZE
532 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
533 #  else
534 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
535        extern size_t getpagesize();
536 #      define malloc_getpagesize getpagesize()
537 #    else
538 #      include <sys/param.h>
539 #      ifdef EXEC_PAGESIZE
540 #        define malloc_getpagesize EXEC_PAGESIZE
541 #      else
542 #        ifdef NBPG
543 #          ifndef CLSIZE
544 #            define malloc_getpagesize NBPG
545 #          else
546 #            define malloc_getpagesize (NBPG * CLSIZE)
547 #          endif
548 #        else
549 #          ifdef NBPC
550 #            define malloc_getpagesize NBPC
551 #          else
552 #            ifdef PAGESIZE
553 #              define malloc_getpagesize PAGESIZE
554 #            else
555 #              define malloc_getpagesize (4096) /* just guess */
556 #            endif
557 #          endif
558 #        endif
559 #      endif
560 #    endif
561 #  endif
562 #endif
563 
564 
565 
566 /*
567 
568   This version of malloc supports the standard SVID/XPG mallinfo
569   routine that returns a struct containing the same kind of
570   information you can get from malloc_stats. It should work on
571   any SVID/XPG compliant system that has a /usr/include/malloc.h
572   defining struct mallinfo. (If you'd like to install such a thing
573   yourself, cut out the preliminary declarations as described above
574   and below and save them in a malloc.h file. But there's no
575   compelling reason to bother to do this.)
576 
577   The main declaration needed is the mallinfo struct that is returned
578   (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
579   bunch of fields, most of which are not even meaningful in this
580   version of malloc. Some of these fields are are instead filled by
581   mallinfo() with other numbers that might possibly be of interest.
582 
583 */
584 
585 #include <malloc.h>
586 
587 /* mallopt options that actually do something */
588 
589 #define M_TRIM_THRESHOLD    -1
590 #define M_TOP_PAD           -2
591 #define M_MMAP_THRESHOLD    -3
592 #define M_MMAP_MAX          -4
593 
594 
595 
596 #ifndef DEFAULT_TRIM_THRESHOLD
597 #define DEFAULT_TRIM_THRESHOLD (128L * 1024L)
598 #endif
599 
600 /*
601     M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
602       to keep before releasing via malloc_trim in free().
603 
604       Automatic trimming is mainly useful in long-lived programs.
605       Because trimming via sbrk can be slow on some systems, and can
606       sometimes be wasteful (in cases where programs immediately
607       afterward allocate more large chunks) the value should be high
608       enough so that your overall system performance would improve by
609       releasing.
610 
611       The trim threshold and the mmap control parameters (see below)
612       can be traded off with one another. Trimming and mmapping are
613       two different ways of releasing unused memory back to the
614       system. Between these two, it is often possible to keep
615       system-level demands of a long-lived program down to a bare
616       minimum. For example, in one test suite of sessions measuring
617       the XF86 X server on Linux, using a trim threshold of 128K and a
618       mmap threshold of 192K led to near-minimal long term resource
619       consumption.
620 
621       If you are using this malloc in a long-lived program, it should
622       pay to experiment with these values.  As a rough guide, you
623       might set to a value close to the average size of a process
624       (program) running on your system.  Releasing this much memory
625       would allow such a process to run in memory.  Generally, it's
626       worth it to tune for trimming rather tham memory mapping when a
627       program undergoes phases where several large chunks are
628       allocated and released in ways that can reuse each other's
629       storage, perhaps mixed with phases where there are no such
630       chunks at all.  And in well-behaved long-lived programs,
631       controlling release of large blocks via trimming versus mapping
632       is usually faster.
633 
634       However, in most programs, these parameters serve mainly as
635       protection against the system-level effects of carrying around
636       massive amounts of unneeded memory. Since frequent calls to
637       sbrk, mmap, and munmap otherwise degrade performance, the default
638       parameters are set to relatively high values that serve only as
639       safeguards.
640 
641       The default trim value is high enough to cause trimming only in
642       fairly extreme (by current memory consumption standards) cases.
643       It must be greater than page size to have any useful effect.  To
644       disable trimming completely, you can set to (unsigned long)(-1);
645 
646 
647 */
648 
649 
650 #ifndef DEFAULT_TOP_PAD
651 #define DEFAULT_TOP_PAD        (0)
652 #endif
653 
654 /*
655     M_TOP_PAD is the amount of extra `padding' space to allocate or
656       retain whenever sbrk is called. It is used in two ways internally:
657 
658       * When sbrk is called to extend the top of the arena to satisfy
659         a new malloc request, this much padding is added to the sbrk
660         request.
661 
662       * When malloc_trim is called automatically from free(),
663         it is used as the `pad' argument.
664 
665       In both cases, the actual amount of padding is rounded
666       so that the end of the arena is always a system page boundary.
667 
668       The main reason for using padding is to avoid calling sbrk so
669       often. Having even a small pad greatly reduces the likelihood
670       that nearly every malloc request during program start-up (or
671       after trimming) will invoke sbrk, which needlessly wastes
672       time.
673 
674       Automatic rounding-up to page-size units is normally sufficient
675       to avoid measurable overhead, so the default is 0.  However, in
676       systems where sbrk is relatively slow, it can pay to increase
677       this value, at the expense of carrying around more memory than
678       the program needs.
679 
680 */
681 
682 
683 #ifndef DEFAULT_MMAP_THRESHOLD
684 #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
685 #endif
686 
687 /*
688 
689     M_MMAP_THRESHOLD is the request size threshold for using mmap()
690       to service a request. Requests of at least this size that cannot
691       be allocated using already-existing space will be serviced via mmap.
692       (If enough normal freed space already exists it is used instead.)
693 
694       Using mmap segregates relatively large chunks of memory so that
695       they can be individually obtained and released from the host
696       system. A request serviced through mmap is never reused by any
697       other request (at least not directly; the system may just so
698       happen to remap successive requests to the same locations).
699 
700       Segregating space in this way has the benefit that mmapped space
701       can ALWAYS be individually released back to the system, which
702       helps keep the system level memory demands of a long-lived
703       program low. Mapped memory can never become `locked' between
704       other chunks, as can happen with normally allocated chunks, which
705       menas that even trimming via malloc_trim would not release them.
706 
707       However, it has the disadvantages that:
708 
709          1. The space cannot be reclaimed, consolidated, and then
710             used to service later requests, as happens with normal chunks.
711          2. It can lead to more wastage because of mmap page alignment
712             requirements
713          3. It causes malloc performance to be more dependent on host
714             system memory management support routines which may vary in
715             implementation quality and may impose arbitrary
716             limitations. Generally, servicing a request via normal
717             malloc steps is faster than going through a system's mmap.
718 
719       All together, these considerations should lead you to use mmap
720       only for relatively large requests.
721 
722 
723 */
724 
725 
726 
727 #ifndef DEFAULT_MMAP_MAX
728 #if HAVE_MMAP
729 #define DEFAULT_MMAP_MAX       (64)
730 #else
731 #define DEFAULT_MMAP_MAX       (0)
732 #endif
733 #endif
734 
735 /*
736     M_MMAP_MAX is the maximum number of requests to simultaneously
737       service using mmap. This parameter exists because:
738 
739          1. Some systems have a limited number of internal tables for
740             use by mmap.
741          2. In most systems, overreliance on mmap can degrade overall
742             performance.
743          3. If a program allocates many large regions, it is probably
744             better off using normal sbrk-based allocation routines that
745             can reclaim and reallocate normal heap memory. Using a
746             small value allows transition into this mode after the
747             first few allocations.
748 
749       Setting to 0 disables all use of mmap.  If HAVE_MMAP is not set,
750       the default value is 0, and attempts to set it to non-zero values
751       in mallopt will fail.
752 */
753 
754 
755 
756 
757 /*
758 
759   Special defines for linux libc
760 
761   Except when compiled using these special defines for Linux libc
762   using weak aliases, this malloc is NOT designed to work in
763   multithreaded applications.  No semaphores or other concurrency
764   control are provided to ensure that multiple malloc or free calls
765   don't run at the same time, which could be disasterous. A single
766   semaphore could be used across malloc, realloc, and free (which is
767   essentially the effect of the linux weak alias approach). It would
768   be hard to obtain finer granularity.
769 
770 */
771 
772 
773 #ifndef MORECORE
774 #define MORECORE sbrk
775 #endif
776 
777 #ifndef MORECORE_FAILURE
778 #define MORECORE_FAILURE -1
779 #endif
780 
781 #ifndef MORECORE_CLEARS
782 #define MORECORE_CLEARS 1
783 #endif
784 
785 #define cALLOc		calloc
786 #define fREe		free
787 #define mALLOc		malloc
788 #define mEMALIGn	memalign
789 #define rEALLOc		realloc
790 #define vALLOc		valloc
791 #define pvALLOc		pvalloc
792 #define mALLINFo	mallinfo
793 #define mALLOPt		mallopt
794 #define pOSIx_mEMALIGn	posix_memalign
795 
796 
797 #define malloc_stats			malloc_stats
798 #define malloc_trim			malloc_trim
799 #define malloc_usable_size		malloc_usable_size
800 
801 #define malloc_update_mallinfo		__malloc_update_mallinfo
802 
803 #define malloc_av_			__malloc_av_
804 #define malloc_current_mallinfo		__malloc_current_mallinfo
805 #define malloc_max_sbrked_mem		__malloc_max_sbrked_mem
806 #define malloc_max_total_mem		__malloc_max_total_mem
807 #define malloc_sbrk_base		__malloc_sbrk_base
808 #define malloc_top_pad			__malloc_top_pad
809 #define malloc_trim_threshold		__malloc_trim_threshold
810 
811 /* Public routines */
812 
813 
814 void* mALLOc(size_t);
815 void    fREe(void*);
816 void* rEALLOc(void*, size_t);
817 void* mEMALIGn(size_t, size_t);
818 void* vALLOc(size_t);
819 void* pvALLOc(size_t);
820 void* cALLOc(size_t, size_t);
821 void    cfree(void*);
822 int     malloc_trim(size_t);
823 size_t  malloc_usable_size(void*);
824 void    malloc_stats(void);
825 int     mALLOPt(int, int);
826 struct mallinfo mALLINFo(void);
827 int     pOSIx_mEMALIGn(void **, size_t, size_t);
828 
829 /* Work around compiler optimizing away stores to 'size' field before
830  * call to free.
831  */
832 #ifdef _HAVE_ALIAS_ATTRIBUTE
833 extern __typeof(free) __malloc_free;
834 #else
835 #define __malloc_free(x) fREe(x)
836 #endif
837 
838 
839 /* ---------- To make a malloc.h, end cutting here ------------ */
840 
841 
842 /*
843   Emulation of sbrk for WIN32
844   All code within the ifdef WIN32 is untested by me.
845 */
846 
847 
848 
849 
850 
851 /*
852   Type declarations
853 */
854 
855 
856 struct malloc_chunk
857 {
858   INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
859   INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */
860   struct malloc_chunk* fd;   /* double links -- used only if free. */
861   struct malloc_chunk* bk;
862 };
863 
864 typedef struct malloc_chunk* mchunkptr;
865 
866 /*
867 
868    malloc_chunk details:
869 
870     (The following includes lightly edited explanations by Colin Plumb.)
871 
872     Chunks of memory are maintained using a `boundary tag' method as
873     described in e.g., Knuth or Standish.  (See the paper by Paul
874     Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
875     survey of such techniques.)  Sizes of free chunks are stored both
876     in the front of each chunk and at the end.  This makes
877     consolidating fragmented chunks into bigger chunks very fast.  The
878     size fields also hold bits representing whether chunks are free or
879     in use.
880 
881     An allocated chunk looks like this:
882 
883 
884     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
885             |             Size of previous chunk, if allocated            | |
886             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
887             |             Size of chunk, in bytes                         |P|
888       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
889             |             User data starts here...                          .
890             .                                                               .
891             .             (malloc_usable_space() bytes)                     .
892             .                                                               |
893 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
894             |             Size of chunk                                     |
895             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
896 
897 
898     Where "chunk" is the front of the chunk for the purpose of most of
899     the malloc code, but "mem" is the pointer that is returned to the
900     user.  "Nextchunk" is the beginning of the next contiguous chunk.
901 
902     Chunks always begin on even word boundries, so the mem portion
903     (which is returned to the user) is also on an even word boundary, and
904     thus double-word aligned.
905 
906     Free chunks are stored in circular doubly-linked lists, and look like this:
907 
908     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
909             |             Size of previous chunk                            |
910             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
911     `head:' |             Size of chunk, in bytes                         |P|
912       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
913             |             Forward pointer to next chunk in list             |
914             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
915             |             Back pointer to previous chunk in list            |
916             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
917             |             Unused space (may be 0 bytes long)                .
918             .                                                               .
919             .                                                               |
920 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
921     `foot:' |             Size of chunk, in bytes                           |
922             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
923 
924     The P (PREV_INUSE) bit, stored in the unused low-order bit of the
925     chunk size (which is always a multiple of two words), is an in-use
926     bit for the *previous* chunk.  If that bit is *clear*, then the
927     word before the current chunk size contains the previous chunk
928     size, and can be used to find the front of the previous chunk.
929     (The very first chunk allocated always has this bit set,
930     preventing access to non-existent (or non-owned) memory.)
931 
932     Note that the `foot' of the current chunk is actually represented
933     as the prev_size of the NEXT chunk. (This makes it easier to
934     deal with alignments etc).
935 
936     The two exceptions to all this are
937 
938      1. The special chunk `top', which doesn't bother using the
939         trailing size field since there is no
940         next contiguous chunk that would have to index off it. (After
941         initialization, `top' is forced to always exist.  If it would
942         become less than MINSIZE bytes long, it is replenished via
943         malloc_extend_top.)
944 
945      2. Chunks allocated via mmap, which have the second-lowest-order
946         bit (IS_MMAPPED) set in their size fields.  Because they are
947         never merged or traversed from any other chunk, they have no
948         foot size or inuse information.
949 
950     Available chunks are kept in any of several places (all declared below):
951 
952     * `av': An array of chunks serving as bin headers for consolidated
953        chunks. Each bin is doubly linked.  The bins are approximately
954        proportionally (log) spaced.  There are a lot of these bins
955        (128). This may look excessive, but works very well in
956        practice.  All procedures maintain the invariant that no
957        consolidated chunk physically borders another one. Chunks in
958        bins are kept in size order, with ties going to the
959        approximately least recently used chunk.
960 
961        The chunks in each bin are maintained in decreasing sorted order by
962        size.  This is irrelevant for the small bins, which all contain
963        the same-sized chunks, but facilitates best-fit allocation for
964        larger chunks. (These lists are just sequential. Keeping them in
965        order almost never requires enough traversal to warrant using
966        fancier ordered data structures.)  Chunks of the same size are
967        linked with the most recently freed at the front, and allocations
968        are taken from the back.  This results in LRU or FIFO allocation
969        order, which tends to give each chunk an equal opportunity to be
970        consolidated with adjacent freed chunks, resulting in larger free
971        chunks and less fragmentation.
972 
973     * `top': The top-most available chunk (i.e., the one bordering the
974        end of available memory) is treated specially. It is never
975        included in any bin, is used only if no other chunk is
976        available, and is released back to the system if it is very
977        large (see M_TRIM_THRESHOLD).
978 
979     * `last_remainder': A bin holding only the remainder of the
980        most recently split (non-top) chunk. This bin is checked
981        before other non-fitting chunks, so as to provide better
982        locality for runs of sequentially allocated chunks.
983 
984     *  Implicitly, through the host system's memory mapping tables.
985        If supported, requests greater than a threshold are usually
986        serviced via calls to mmap, and then later released via munmap.
987 
988 */
989 
990 
991 
992 
993 
994 
995 /*  sizes, alignments */
996 
997 #define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
998 #ifndef MALLOC_ALIGNMENT
999 #define MALLOC_ALIGN           8
1000 #define MALLOC_ALIGNMENT       (SIZE_SZ < 4 ? 8 : (SIZE_SZ + SIZE_SZ))
1001 #else
1002 #define MALLOC_ALIGN           MALLOC_ALIGNMENT
1003 #endif
1004 #define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
1005 #define MINSIZE                (sizeof(struct malloc_chunk))
1006 
1007 /* conversion from malloc headers to user pointers, and back */
1008 
1009 #define chunk2mem(p)   ((void*)((char*)(p) + 2*SIZE_SZ))
1010 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1011 
1012 /* pad request bytes into a usable size */
1013 
1014 #define request2size(req) \
1015  (((unsigned long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
1016   (unsigned long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \
1017    (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
1018 
1019 /* Check if m has acceptable alignment */
1020 
1021 #define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1022 
1023 
1024 
1025 
1026 /*
1027   Physical chunk operations
1028 */
1029 
1030 
1031 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1032 
1033 #define PREV_INUSE 0x1
1034 
1035 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1036 
1037 #define IS_MMAPPED 0x2
1038 
1039 /* Bits to mask off when extracting size */
1040 
1041 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
1042 
1043 
1044 /* Ptr to next physical malloc_chunk. */
1045 
1046 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
1047 
1048 /* Ptr to previous physical malloc_chunk */
1049 
1050 #define prev_chunk(p)\
1051    ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1052 
1053 
1054 /* Treat space at ptr + offset as a chunk */
1055 
1056 #define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
1057 
1058 
1059 
1060 
1061 /*
1062   Dealing with use bits
1063 */
1064 
1065 /* extract p's inuse bit */
1066 
1067 #define inuse(p)\
1068 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
1069 
1070 /* extract inuse bit of previous chunk */
1071 
1072 #define prev_inuse(p)  ((p)->size & PREV_INUSE)
1073 
1074 /* check for mmap()'ed chunk */
1075 
1076 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1077 
1078 /* set/clear chunk as in use without otherwise disturbing */
1079 
1080 #define set_inuse(p)\
1081 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
1082 
1083 #define clear_inuse(p)\
1084 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
1085 
1086 /* check/set/clear inuse bits in known places */
1087 
1088 #define inuse_bit_at_offset(p, s)\
1089  (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1090 
1091 #define set_inuse_bit_at_offset(p, s)\
1092  (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1093 
1094 #define clear_inuse_bit_at_offset(p, s)\
1095  (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1096 
1097 
1098 
1099 
1100 /*
1101   Dealing with size fields
1102 */
1103 
1104 /* Get size, ignoring use bits */
1105 
1106 #define chunksize(p)          ((p)->size & ~(SIZE_BITS))
1107 
1108 /* Set size at head, without disturbing its use bit */
1109 
1110 #define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
1111 
1112 /* Set size/use ignoring previous bits in header */
1113 
1114 #define set_head(p, s)        ((p)->size = (s))
1115 
1116 /* Set size at footer (only when chunk is not in use) */
1117 
1118 #define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1119 
1120 
1121 
1122 
1123 
1124 /*
1125    Bins
1126 
1127     The bins, `av_' are an array of pairs of pointers serving as the
1128     heads of (initially empty) doubly-linked lists of chunks, laid out
1129     in a way so that each pair can be treated as if it were in a
1130     malloc_chunk. (This way, the fd/bk offsets for linking bin heads
1131     and chunks are the same).
1132 
1133     Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1134     8 bytes apart. Larger bins are approximately logarithmically
1135     spaced. (See the table below.) The `av_' array is never mentioned
1136     directly in the code, but instead via bin access macros.
1137 
1138     Bin layout:
1139 
1140     64 bins of size       8
1141     32 bins of size      64
1142     16 bins of size     512
1143      8 bins of size    4096
1144      4 bins of size   32768
1145      2 bins of size  262144
1146      1 bin  of size what's left
1147 
1148     There is actually a little bit of slop in the numbers in bin_index
1149     for the sake of speed. This makes no difference elsewhere.
1150 
1151     The special chunks `top' and `last_remainder' get their own bins,
1152     (this is implemented via yet more trickery with the av_ array),
1153     although `top' is never properly linked to its bin since it is
1154     always handled specially.
1155 
1156 */
1157 
1158 #ifdef SEPARATE_OBJECTS
1159 #define av_ malloc_av_
1160 #endif
1161 
1162 #define NAV             128   /* number of bins */
1163 
1164 typedef struct malloc_chunk* mbinptr;
1165 
1166 /* access macros */
1167 
1168 #define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
1169 #define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
1170 #define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
1171 
1172 /*
1173    The first 2 bins are never indexed. The corresponding av_ cells are instead
1174    used for bookkeeping. This is not to save space, but to simplify
1175    indexing, maintain locality, and avoid some initialization tests.
1176 */
1177 
1178 #define top            (bin_at(0)->fd)   /* The topmost chunk */
1179 #define last_remainder (bin_at(1))       /* remainder from last split */
1180 
1181 
1182 /*
1183    Because top initially points to its own bin with initial
1184    zero size, thus forcing extension on the first malloc request,
1185    we avoid having any special code in malloc to check whether
1186    it even exists yet. But we still need to in malloc_extend_top.
1187 */
1188 
1189 #define initial_top    ((mchunkptr)(bin_at(0)))
1190 
1191 /* Helper macro to initialize bins */
1192 
1193 #define IAV(i)  bin_at(i), bin_at(i)
1194 
1195 #ifdef DEFINE_MALLOC
1196 STATIC mbinptr av_[NAV * 2 + 2] = {
1197  0, 0,
1198  IAV(0),   IAV(1),   IAV(2),   IAV(3),   IAV(4),   IAV(5),   IAV(6),   IAV(7),
1199  IAV(8),   IAV(9),   IAV(10),  IAV(11),  IAV(12),  IAV(13),  IAV(14),  IAV(15),
1200  IAV(16),  IAV(17),  IAV(18),  IAV(19),  IAV(20),  IAV(21),  IAV(22),  IAV(23),
1201  IAV(24),  IAV(25),  IAV(26),  IAV(27),  IAV(28),  IAV(29),  IAV(30),  IAV(31),
1202  IAV(32),  IAV(33),  IAV(34),  IAV(35),  IAV(36),  IAV(37),  IAV(38),  IAV(39),
1203  IAV(40),  IAV(41),  IAV(42),  IAV(43),  IAV(44),  IAV(45),  IAV(46),  IAV(47),
1204  IAV(48),  IAV(49),  IAV(50),  IAV(51),  IAV(52),  IAV(53),  IAV(54),  IAV(55),
1205  IAV(56),  IAV(57),  IAV(58),  IAV(59),  IAV(60),  IAV(61),  IAV(62),  IAV(63),
1206  IAV(64),  IAV(65),  IAV(66),  IAV(67),  IAV(68),  IAV(69),  IAV(70),  IAV(71),
1207  IAV(72),  IAV(73),  IAV(74),  IAV(75),  IAV(76),  IAV(77),  IAV(78),  IAV(79),
1208  IAV(80),  IAV(81),  IAV(82),  IAV(83),  IAV(84),  IAV(85),  IAV(86),  IAV(87),
1209  IAV(88),  IAV(89),  IAV(90),  IAV(91),  IAV(92),  IAV(93),  IAV(94),  IAV(95),
1210  IAV(96),  IAV(97),  IAV(98),  IAV(99),  IAV(100), IAV(101), IAV(102), IAV(103),
1211  IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
1212  IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
1213  IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
1214 };
1215 #else
1216 extern mbinptr av_[NAV * 2 + 2];
1217 #endif
1218 
1219 
1220 
1221 /* field-extraction macros */
1222 
1223 #define first(b) ((b)->fd)
1224 #define last(b)  ((b)->bk)
1225 
1226 /*
1227   Indexing into bins
1228 */
1229 
1230 #define bin_index(sz)                                                          \
1231 (((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \
1232  ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \
1233  ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \
1234  ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
1235  ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
1236  ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
1237                                           126)
1238 /*
1239   bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold
1240   identically sized chunks. This is exploited in malloc.
1241 */
1242 
1243 #define MAX_SMALLBIN_SIZE   512
1244 #define SMALLBIN_WIDTH        8
1245 #define SMALLBIN_WIDTH_BITS   3
1246 #define MAX_SMALLBIN        (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1
1247 
1248 #define smallbin_index(sz)  (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS)
1249 
1250 /*
1251    Requests are `small' if both the corresponding and the next bin are small
1252 */
1253 
1254 #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
1255 
1256 
1257 
1258 /*
1259     To help compensate for the large number of bins, a one-level index
1260     structure is used for bin-by-bin searching.  `binblocks' is a
1261     one-word bitvector recording whether groups of BINBLOCKWIDTH bins
1262     have any (possibly) non-empty bins, so they can be skipped over
1263     all at once during during traversals. The bits are NOT always
1264     cleared as soon as all bins in a block are empty, but instead only
1265     when all are noticed to be empty during traversal in malloc.
1266 */
1267 
1268 #define BINBLOCKWIDTH     4   /* bins per block */
1269 
1270 #define binblocks      (bin_at(0)->size) /* bitvector of nonempty blocks */
1271 
1272 /* bin<->block macros */
1273 
1274 #define idx2binblock(ix)    ((unsigned long)1 << (ix / BINBLOCKWIDTH))
1275 #define mark_binblock(ii)   (binblocks |= idx2binblock(ii))
1276 #define clear_binblock(ii)  (binblocks &= ~(idx2binblock(ii)))
1277 
1278 
1279 
1280 
1281 
1282 /*  Other static bookkeeping data */
1283 
1284 #ifdef SEPARATE_OBJECTS
1285 #define trim_threshold		malloc_trim_threshold
1286 #define top_pad			malloc_top_pad
1287 #define n_mmaps_max		malloc_n_mmaps_max
1288 #define mmap_threshold		malloc_mmap_threshold
1289 #define sbrk_base		malloc_sbrk_base
1290 #define max_sbrked_mem		malloc_max_sbrked_mem
1291 #define max_total_mem		malloc_max_total_mem
1292 #define current_mallinfo	malloc_current_mallinfo
1293 #define n_mmaps			malloc_n_mmaps
1294 #define max_n_mmaps		malloc_max_n_mmaps
1295 #define mmapped_mem		malloc_mmapped_mem
1296 #define max_mmapped_mem		malloc_max_mmapped_mem
1297 #endif
1298 
1299 /* variables holding tunable values */
1300 
1301 #ifdef DEFINE_MALLOC
1302 
1303 STATIC unsigned long trim_threshold   = DEFAULT_TRIM_THRESHOLD;
1304 STATIC unsigned long top_pad          = DEFAULT_TOP_PAD;
1305 #if HAVE_MMAP
1306 STATIC unsigned int  n_mmaps_max      = DEFAULT_MMAP_MAX;
1307 STATIC unsigned long mmap_threshold   = DEFAULT_MMAP_THRESHOLD;
1308 #endif
1309 
1310 /* The first value returned from sbrk */
1311 STATIC char* sbrk_base = (char*)(-1);
1312 
1313 /* The maximum memory obtained from system via sbrk */
1314 STATIC unsigned long max_sbrked_mem = 0;
1315 
1316 /* The maximum via either sbrk or mmap */
1317 STATIC unsigned long max_total_mem = 0;
1318 
1319 /* internal working copy of mallinfo */
1320 STATIC struct mallinfo current_mallinfo = {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1321 
1322 #if HAVE_MMAP
1323 
1324 /* Tracking mmaps */
1325 
1326 STATIC unsigned int n_mmaps = 0;
1327 STATIC unsigned int max_n_mmaps = 0;
1328 STATIC unsigned long mmapped_mem = 0;
1329 STATIC unsigned long max_mmapped_mem = 0;
1330 
1331 #endif
1332 
1333 #else /* ! DEFINE_MALLOC */
1334 
1335 extern unsigned long trim_threshold;
1336 extern unsigned long top_pad;
1337 #if HAVE_MMAP
1338 extern unsigned int  n_mmaps_max;
1339 extern unsigned long mmap_threshold;
1340 #endif
1341 extern char* sbrk_base;
1342 extern unsigned long max_sbrked_mem;
1343 extern unsigned long max_total_mem;
1344 extern struct mallinfo current_mallinfo;
1345 #if HAVE_MMAP
1346 extern unsigned int n_mmaps;
1347 extern unsigned int max_n_mmaps;
1348 extern unsigned long mmapped_mem;
1349 extern unsigned long max_mmapped_mem;
1350 #endif
1351 
1352 #endif /* ! DEFINE_MALLOC */
1353 
1354 /* The total memory obtained from system via sbrk */
1355 #define sbrked_mem  (current_mallinfo.arena)
1356 
1357 
1358 
1359 /*
1360   Debugging support
1361 */
1362 
1363 #if DEBUG
1364 
1365 
1366 /*
1367   These routines make a number of assertions about the states
1368   of data structures that should be true at all times. If any
1369   are not true, it's very likely that a user program has somehow
1370   trashed memory. (It's also possible that there is a coding error
1371   in malloc. In which case, please report it!)
1372 */
1373 
do_check_chunk(mchunkptr p)1374 static void do_check_chunk(mchunkptr p)
1375 {
1376   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1377 
1378   /* No checkable chunk is mmapped */
1379   assert(!chunk_is_mmapped(p));
1380 
1381   /* Check for legal address ... */
1382   assert((char*)p >= sbrk_base);
1383   if (p != top)
1384     assert((char*)p + sz <= (char*)top);
1385   else
1386     assert((char*)p + sz <= sbrk_base + sbrked_mem);
1387 
1388 }
1389 
1390 
do_check_free_chunk(mchunkptr p)1391 static void do_check_free_chunk(mchunkptr p)
1392 {
1393   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1394   mchunkptr next = chunk_at_offset(p, sz);
1395 
1396   do_check_chunk(p);
1397 
1398   /* Check whether it claims to be free ... */
1399   assert(!inuse(p));
1400 
1401   /* Unless a special marker, must have OK fields */
1402   if ((long)sz >= (long)MINSIZE)
1403   {
1404     assert((sz & MALLOC_ALIGN_MASK) == 0);
1405     assert(aligned_OK(chunk2mem(p)));
1406     /* ... matching footer field */
1407     assert(next->prev_size == sz);
1408     /* ... and is fully consolidated */
1409     assert(prev_inuse(p));
1410     assert (next == top || inuse(next));
1411 
1412     /* ... and has minimally sane links */
1413     assert(p->fd->bk == p);
1414     assert(p->bk->fd == p);
1415   }
1416   else /* markers are always of size SIZE_SZ */
1417     assert(sz == SIZE_SZ);
1418 }
1419 
do_check_inuse_chunk(mchunkptr p)1420 static void do_check_inuse_chunk(mchunkptr p)
1421 {
1422   mchunkptr next = next_chunk(p);
1423   do_check_chunk(p);
1424 
1425   /* Check whether it claims to be in use ... */
1426   assert(inuse(p));
1427 
1428   /* ... and is surrounded by OK chunks.
1429     Since more things can be checked with free chunks than inuse ones,
1430     if an inuse chunk borders them and debug is on, it's worth doing them.
1431   */
1432   if (!prev_inuse(p))
1433   {
1434     mchunkptr prv = prev_chunk(p);
1435     assert(next_chunk(prv) == p);
1436     do_check_free_chunk(prv);
1437   }
1438   if (next == top)
1439   {
1440     assert(prev_inuse(next));
1441     assert(chunksize(next) >= MINSIZE);
1442   }
1443   else if (!inuse(next))
1444     do_check_free_chunk(next);
1445 
1446 }
1447 
do_check_malloced_chunk(mchunkptr p,INTERNAL_SIZE_T s)1448 static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
1449 {
1450   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1451   long room = long_sub_size_t(sz, s);
1452 
1453   do_check_inuse_chunk(p);
1454 
1455   /* Legal size ... */
1456   assert((long)sz >= (long)MINSIZE);
1457   assert((sz & MALLOC_ALIGN_MASK) == 0);
1458   assert(room >= 0);
1459   assert(room < (long)MINSIZE);
1460 
1461   /* ... and alignment */
1462   assert(aligned_OK(chunk2mem(p)));
1463 
1464 
1465   /* ... and was allocated at front of an available chunk */
1466   assert(prev_inuse(p));
1467 
1468 }
1469 
1470 
1471 #define check_free_chunk(P)  do_check_free_chunk(P)
1472 #define check_inuse_chunk(P) do_check_inuse_chunk(P)
1473 #define check_chunk(P) do_check_chunk(P)
1474 #define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
1475 #else
1476 #define check_free_chunk(P)
1477 #define check_inuse_chunk(P)
1478 #define check_chunk(P)
1479 #define check_malloced_chunk(P,N)
1480 #endif
1481 
1482 
1483 
1484 /*
1485   Macro-based internal utilities
1486 */
1487 
1488 
1489 /*
1490   Linking chunks in bin lists.
1491   Call these only with variables, not arbitrary expressions, as arguments.
1492 */
1493 
1494 /*
1495   Place chunk p of size s in its bin, in size order,
1496   putting it ahead of others of same size.
1497 */
1498 
1499 
1500 #define frontlink(P, S, IDX, BK, FD)                                          \
1501 {                                                                             \
1502   if (S < MAX_SMALLBIN_SIZE)                                                  \
1503   {                                                                           \
1504     IDX = smallbin_index(S);                                                  \
1505     mark_binblock(IDX);                                                       \
1506     BK = bin_at(IDX);                                                         \
1507     FD = BK->fd;                                                              \
1508     P->bk = BK;                                                               \
1509     P->fd = FD;                                                               \
1510     FD->bk = BK->fd = P;                                                      \
1511   }                                                                           \
1512   else                                                                        \
1513   {                                                                           \
1514     IDX = bin_index(S);                                                       \
1515     BK = bin_at(IDX);                                                         \
1516     FD = BK->fd;                                                              \
1517     if (FD == BK) mark_binblock(IDX);                                         \
1518     else                                                                      \
1519     {                                                                         \
1520       while (FD != BK && S < chunksize(FD)) FD = FD->fd;                      \
1521       BK = FD->bk;                                                            \
1522     }                                                                         \
1523     P->bk = BK;                                                               \
1524     P->fd = FD;                                                               \
1525     FD->bk = BK->fd = P;                                                      \
1526   }                                                                           \
1527 }
1528 
1529 
1530 /* take a chunk off a list */
1531 
1532 #define unlink(P, BK, FD)                                                     \
1533 {                                                                             \
1534   BK = P->bk;                                                                 \
1535   FD = P->fd;                                                                 \
1536   FD->bk = BK;                                                        \
1537   BK->fd = FD;                                                        \
1538 }                                                                             \
1539 
1540 /* Place p as the last remainder */
1541 
1542 #define link_last_remainder(P)                                                \
1543 {                                                                             \
1544   last_remainder->fd = last_remainder->bk =  P;                               \
1545   P->fd = P->bk = last_remainder;                                             \
1546 }
1547 
1548 /* Clear the last_remainder bin */
1549 
1550 #define clear_last_remainder \
1551   (last_remainder->fd = last_remainder->bk = last_remainder)
1552 
1553 
1554 
1555 
1556 
1557 
1558 /* Routines dealing with mmap(). */
1559 
1560 #if HAVE_MMAP
1561 
1562 #ifdef DEFINE_MALLOC
1563 
mmap_chunk(size_t size)1564 static mchunkptr mmap_chunk(size_t size)
1565 {
1566   size_t page_mask = malloc_getpagesize - 1;
1567   mchunkptr p;
1568 
1569 #ifndef MAP_ANONYMOUS
1570   static int fd = -1;
1571 #endif
1572 
1573   if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
1574 
1575   /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
1576    * there is no following chunk whose prev_size field could be used.
1577    */
1578   size = (size + SIZE_SZ + page_mask) & ~page_mask;
1579 
1580 #ifdef MAP_ANONYMOUS
1581   p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
1582 		      MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1583 #else /* !MAP_ANONYMOUS */
1584   if (fd < 0)
1585   {
1586     fd = open("/dev/zero", O_RDWR);
1587     if(fd < 0) return 0;
1588   }
1589   p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
1590 #endif
1591 
1592   if(p == (mchunkptr)-1) return 0;
1593 
1594   n_mmaps++;
1595   if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
1596 
1597   /* We demand that eight bytes into a page must be 8-byte aligned. */
1598   assert(aligned_OK(chunk2mem(p)));
1599 
1600   /* The offset to the start of the mmapped region is stored
1601    * in the prev_size field of the chunk; normally it is zero,
1602    * but that can be changed in memalign().
1603    */
1604   p->prev_size = 0;
1605   set_head(p, size|IS_MMAPPED);
1606 
1607   mmapped_mem += size;
1608   if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1609     max_mmapped_mem = mmapped_mem;
1610   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1611     max_total_mem = mmapped_mem + sbrked_mem;
1612   return p;
1613 }
1614 
1615 #endif /* DEFINE_MALLOC */
1616 
1617 #ifdef SEPARATE_OBJECTS
1618 #define munmap_chunk malloc_munmap_chunk
1619 #endif
1620 
1621 #ifdef DEFINE_FREE
1622 
munmap_chunk(mchunkptr p)1623 STATIC void munmap_chunk(mchunkptr p)
1624 {
1625   INTERNAL_SIZE_T size = chunksize(p);
1626   int ret;
1627 
1628   assert (chunk_is_mmapped(p));
1629   assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1630   assert((n_mmaps > 0));
1631   assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
1632 
1633   n_mmaps--;
1634   mmapped_mem -= (size + p->prev_size);
1635 
1636   ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1637 
1638   /* munmap returns non-zero on failure */
1639   assert(ret == 0);
1640 }
1641 
1642 #else /* ! DEFINE_FREE */
1643 
1644 extern void munmap_chunk(mchunkptr);
1645 
1646 #endif /* ! DEFINE_FREE */
1647 
1648 #if HAVE_MREMAP
1649 
1650 #ifdef DEFINE_REALLOC
1651 
mremap_chunk(mchunkptr p,size_t new_size)1652 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
1653 {
1654   size_t page_mask = malloc_getpagesize - 1;
1655   INTERNAL_SIZE_T offset = p->prev_size;
1656   INTERNAL_SIZE_T size = chunksize(p);
1657   char *cp;
1658 
1659   assert (chunk_is_mmapped(p));
1660   assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1661   assert((n_mmaps > 0));
1662   assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1663 
1664   /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
1665   new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
1666 
1667   cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
1668 
1669   if (cp == (char *)-1) return 0;
1670 
1671   p = (mchunkptr)(cp + offset);
1672 
1673   assert(aligned_OK(chunk2mem(p)));
1674 
1675   assert((p->prev_size == offset));
1676   set_head(p, (new_size - offset)|IS_MMAPPED);
1677 
1678   mmapped_mem -= size + offset;
1679   mmapped_mem += new_size;
1680   if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1681     max_mmapped_mem = mmapped_mem;
1682   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1683     max_total_mem = mmapped_mem + sbrked_mem;
1684   return p;
1685 }
1686 
1687 #endif /* DEFINE_REALLOC */
1688 
1689 #endif /* HAVE_MREMAP */
1690 
1691 #endif /* HAVE_MMAP */
1692 
1693 
1694 
1695 
1696 #ifdef DEFINE_MALLOC
1697 
1698 /*
1699   Extend the top-most chunk by obtaining memory from system.
1700   Main interface to sbrk (but see also malloc_trim).
1701 */
1702 
malloc_extend_top(INTERNAL_SIZE_T nb)1703 static void malloc_extend_top(INTERNAL_SIZE_T nb)
1704 {
1705   char*     brk;                  /* return value from sbrk */
1706   INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
1707   INTERNAL_SIZE_T correction;     /* bytes for 2nd sbrk call */
1708   int correction_failed = 0;      /* whether we should relax the assertion */
1709   char*     new_brk;              /* return of 2nd sbrk call */
1710   INTERNAL_SIZE_T top_size;       /* new size of top chunk */
1711 
1712   mchunkptr old_top     = top;  /* Record state of old top */
1713   INTERNAL_SIZE_T old_top_size = chunksize(old_top);
1714   char*     old_end      = (char*)(chunk_at_offset(old_top, old_top_size));
1715 
1716   /* Pad request with top_pad plus minimal overhead */
1717 
1718   INTERNAL_SIZE_T    sbrk_size     = nb + top_pad + MINSIZE;
1719   unsigned long pagesz    = malloc_getpagesize;
1720 
1721   /* If not the first time through, round to preserve page boundary */
1722   /* Otherwise, we need to correct to a page size below anyway. */
1723   /* (We also correct below if an intervening foreign sbrk call.) */
1724 
1725   if (sbrk_base != (char*)(-1))
1726     sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1727 
1728   brk = (char*)(MORECORE (sbrk_size));
1729 
1730   /* Fail if sbrk failed or if a foreign sbrk call killed our space */
1731   if (brk == (char*)(MORECORE_FAILURE) ||
1732       (brk < old_end && old_top != initial_top))
1733     return;
1734 
1735   sbrked_mem += sbrk_size;
1736 
1737   if (brk == old_end /* can just add bytes to current top, unless
1738 			previous correction failed */
1739       && ((uintptr_t)old_end & (pagesz - 1)) == 0)
1740   {
1741     top_size = sbrk_size + old_top_size;
1742     set_head(top, top_size | PREV_INUSE);
1743   }
1744   else
1745   {
1746     if (sbrk_base == (char*)(-1))  /* First time through. Record base */
1747       sbrk_base = brk;
1748     else  /* Someone else called sbrk().  Count those bytes as sbrked_mem. */
1749       sbrked_mem += brk - (char*)old_end;
1750 
1751     /* Guarantee alignment of first new chunk made from this space */
1752     front_misalign = (uintptr_t)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1753     if (front_misalign > 0)
1754     {
1755       correction = (MALLOC_ALIGNMENT) - front_misalign;
1756       brk += correction;
1757     }
1758     else
1759       correction = 0;
1760 
1761     /* Guarantee the next brk will be at a page boundary */
1762     correction += pagesz - ((uintptr_t)(brk + sbrk_size) & (pagesz - 1));
1763 
1764     /* To guarantee page boundary, correction should be less than pagesz */
1765     correction &= (pagesz - 1);
1766 
1767     /* Allocate correction */
1768     new_brk = (char*)(MORECORE (correction));
1769     if (new_brk == (char*)(MORECORE_FAILURE))
1770       {
1771 	correction = 0;
1772 	correction_failed = 1;
1773 	new_brk = brk + sbrk_size;
1774 	if (front_misalign > 0)
1775 	  new_brk -= (MALLOC_ALIGNMENT) - front_misalign;
1776       }
1777 
1778     sbrked_mem += correction;
1779 
1780     top = (mchunkptr)brk;
1781     top_size = new_brk - brk + correction;
1782     set_head(top, top_size | PREV_INUSE);
1783 
1784     if (old_top != initial_top)
1785     {
1786 
1787       /* There must have been an intervening foreign sbrk call. */
1788       /* A double fencepost is necessary to prevent consolidation */
1789 
1790       /* If not enough space to do this, then user did something very wrong */
1791       if (old_top_size < MINSIZE)
1792       {
1793         set_head(top, PREV_INUSE); /* will force null return from malloc */
1794         return;
1795       }
1796 
1797       /* Also keep size a multiple of MALLOC_ALIGNMENT */
1798       old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1799       set_head_size(old_top, old_top_size);
1800       chunk_at_offset(old_top, old_top_size          )->size =
1801         SIZE_SZ|PREV_INUSE;
1802       chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
1803         SIZE_SZ|PREV_INUSE;
1804       /* If possible, release the rest. */
1805       if (old_top_size >= MINSIZE)
1806         __malloc_free(chunk2mem(old_top));
1807     }
1808   }
1809 
1810   if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
1811     max_sbrked_mem = sbrked_mem;
1812 #if HAVE_MMAP
1813   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1814     max_total_mem = mmapped_mem + sbrked_mem;
1815 #else
1816   if ((unsigned long)(sbrked_mem) > (unsigned long)max_total_mem)
1817     max_total_mem = sbrked_mem;
1818 #endif
1819 
1820   /* We always land on a page boundary */
1821   assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0
1822 	 || correction_failed);
1823   (void) correction_failed;
1824 }
1825 
1826 #endif /* DEFINE_MALLOC */
1827 
1828 
1829 /* Main public routines */
1830 
1831 #ifdef DEFINE_MALLOC
1832 
1833 /*
1834   Malloc Algorthim:
1835 
1836     The requested size is first converted into a usable form, `nb'.
1837     This currently means to add 4 bytes overhead plus possibly more to
1838     obtain 8-byte alignment and/or to obtain a size of at least
1839     MINSIZE (currently 16 bytes), the smallest allocatable size.
1840     (All fits are considered `exact' if they are within MINSIZE bytes.)
1841 
1842     From there, the first successful of the following steps is taken:
1843 
1844       1. The bin corresponding to the request size is scanned, and if
1845          a chunk of exactly the right size is found, it is taken.
1846 
1847       2. The most recently remaindered chunk is used if it is big
1848          enough.  This is a form of (roving) first fit, used only in
1849          the absence of exact fits. Runs of consecutive requests use
1850          the remainder of the chunk used for the previous such request
1851          whenever possible. This limited use of a first-fit style
1852          allocation strategy tends to give contiguous chunks
1853          coextensive lifetimes, which improves locality and can reduce
1854          fragmentation in the long run.
1855 
1856       3. Other bins are scanned in increasing size order, using a
1857          chunk big enough to fulfill the request, and splitting off
1858          any remainder.  This search is strictly by best-fit; i.e.,
1859          the smallest (with ties going to approximately the least
1860          recently used) chunk that fits is selected.
1861 
1862       4. If large enough, the chunk bordering the end of memory
1863          (`top') is split off. (This use of `top' is in accord with
1864          the best-fit search rule.  In effect, `top' is treated as
1865          larger (and thus less well fitting) than any other available
1866          chunk since it can be extended to be as large as necessary
1867          (up to system limitations).
1868 
1869       5. If the request size meets the mmap threshold and the
1870          system supports mmap, and there are few enough currently
1871          allocated mmapped regions, and a call to mmap succeeds,
1872          the request is allocated via direct memory mapping.
1873 
1874       6. Otherwise, the top of memory is extended by
1875          obtaining more space from the system (normally using sbrk,
1876          but definable to anything else via the MORECORE macro).
1877          Memory is gathered from the system (in system page-sized
1878          units) in a way that allows chunks obtained across different
1879          sbrk calls to be consolidated, but does not require
1880          contiguous memory. Thus, it should be safe to intersperse
1881          mallocs with other sbrk calls.
1882 
1883 
1884       All allocations are made from the the `lowest' part of any found
1885       chunk. (The implementation invariant is that prev_inuse is
1886       always true of any allocated chunk; i.e., that each allocated
1887       chunk borders either a previously allocated and still in-use chunk,
1888       or the base of its memory arena.)
1889 
1890 */
1891 
mALLOc(size_t bytes)1892 void* mALLOc(size_t bytes)
1893 {
1894 #ifdef MALLOC_PROVIDED
1895 
1896   return malloc (bytes); // Make sure that the pointer returned by malloc is returned back.
1897 
1898 #else
1899 
1900   mchunkptr victim;                  /* inspected/selected chunk */
1901   INTERNAL_SIZE_T victim_size;       /* its size */
1902   int       idx;                     /* index for bin traversal */
1903   mbinptr   bin;                     /* associated bin */
1904   mchunkptr remainder;               /* remainder from a split */
1905   long      remainder_size;          /* its size */
1906   int       remainder_index;         /* its bin index */
1907   unsigned long block;               /* block traverser bit */
1908   int       startidx;                /* first bin of a traversed block */
1909   mchunkptr fwd;                     /* misc temp for linking */
1910   mchunkptr bck;                     /* misc temp for linking */
1911   mbinptr q;                         /* misc temp */
1912 
1913   INTERNAL_SIZE_T nb  = request2size(bytes);  /* padded request size; */
1914 
1915   /* Check for overflow and just fail, if so. */
1916   if (nb > INT_MAX || nb < bytes)
1917   {
1918     errno = ENOMEM;
1919     return 0;
1920   }
1921 
1922   MALLOC_LOCK;
1923 
1924   /* Check for exact match in a bin */
1925 
1926   if (is_small_request(nb))  /* Faster version for small requests */
1927   {
1928     idx = smallbin_index(nb);
1929 
1930     /* No traversal or size check necessary for small bins.  */
1931 
1932     q = bin_at(idx);
1933     victim = last(q);
1934 
1935 #if MALLOC_ALIGN != 16
1936     /* Also scan the next one, since it would have a remainder < MINSIZE */
1937     if (victim == q)
1938     {
1939       q = next_bin(q);
1940       victim = last(q);
1941     }
1942 #endif
1943     if (victim != q)
1944     {
1945       victim_size = chunksize(victim);
1946       unlink(victim, bck, fwd);
1947       set_inuse_bit_at_offset(victim, victim_size);
1948       check_malloced_chunk(victim, nb);
1949       MALLOC_UNLOCK;
1950       return chunk2mem(victim);
1951     }
1952 
1953     idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1954 
1955   }
1956   else
1957   {
1958     idx = bin_index(nb);
1959     bin = bin_at(idx);
1960 
1961     for (victim = last(bin); victim != bin; victim = victim->bk)
1962     {
1963       victim_size = chunksize(victim);
1964       remainder_size = long_sub_size_t(victim_size, nb);
1965 
1966       if (remainder_size >= (long)MINSIZE) /* too big */
1967       {
1968         --idx; /* adjust to rescan below after checking last remainder */
1969         break;
1970       }
1971 
1972       else if (remainder_size >= 0) /* exact fit */
1973       {
1974         unlink(victim, bck, fwd);
1975         set_inuse_bit_at_offset(victim, victim_size);
1976         check_malloced_chunk(victim, nb);
1977 	MALLOC_UNLOCK;
1978         return chunk2mem(victim);
1979       }
1980     }
1981 
1982     ++idx;
1983 
1984   }
1985 
1986   /* Try to use the last split-off remainder */
1987 
1988   if ( (victim = last_remainder->fd) != last_remainder)
1989   {
1990     victim_size = chunksize(victim);
1991     remainder_size = long_sub_size_t(victim_size, nb);
1992 
1993     if (remainder_size >= (long)MINSIZE) /* re-split */
1994     {
1995       remainder = chunk_at_offset(victim, nb);
1996       set_head(victim, nb | PREV_INUSE);
1997       link_last_remainder(remainder);
1998       set_head(remainder, remainder_size | PREV_INUSE);
1999       set_foot(remainder, remainder_size);
2000       check_malloced_chunk(victim, nb);
2001       MALLOC_UNLOCK;
2002       return chunk2mem(victim);
2003     }
2004 
2005     clear_last_remainder;
2006 
2007     if (remainder_size >= 0)  /* exhaust */
2008     {
2009       set_inuse_bit_at_offset(victim, victim_size);
2010       check_malloced_chunk(victim, nb);
2011       MALLOC_UNLOCK;
2012       return chunk2mem(victim);
2013     }
2014 
2015     /* Else place in bin */
2016 
2017     frontlink(victim, victim_size, remainder_index, bck, fwd);
2018   }
2019 
2020   /*
2021      If there are any possibly nonempty big-enough blocks,
2022      search for best fitting chunk by scanning bins in blockwidth units.
2023   */
2024 
2025   if ( (block = idx2binblock(idx)) <= binblocks)
2026   {
2027 
2028     /* Get to the first marked block */
2029 
2030     if ( (block & binblocks) == 0)
2031     {
2032       /* force to an even block boundary */
2033       idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
2034       block <<= 1;
2035       while ((block & binblocks) == 0)
2036       {
2037         idx += BINBLOCKWIDTH;
2038         block <<= 1;
2039       }
2040     }
2041 
2042     /* For each possibly nonempty block ... */
2043     for (;;)
2044     {
2045       startidx = idx;          /* (track incomplete blocks) */
2046       q = bin = bin_at(idx);
2047 
2048       /* For each bin in this block ... */
2049       do
2050       {
2051         /* Find and use first big enough chunk ... */
2052 
2053         for (victim = last(bin); victim != bin; victim = victim->bk)
2054         {
2055           victim_size = chunksize(victim);
2056           remainder_size = long_sub_size_t(victim_size, nb);
2057 
2058           if (remainder_size >= (long)MINSIZE) /* split */
2059           {
2060             remainder = chunk_at_offset(victim, nb);
2061             set_head(victim, nb | PREV_INUSE);
2062             unlink(victim, bck, fwd);
2063             link_last_remainder(remainder);
2064             set_head(remainder, remainder_size | PREV_INUSE);
2065             set_foot(remainder, remainder_size);
2066             check_malloced_chunk(victim, nb);
2067 	    MALLOC_UNLOCK;
2068             return chunk2mem(victim);
2069           }
2070 
2071           else if (remainder_size >= 0)  /* take */
2072           {
2073             set_inuse_bit_at_offset(victim, victim_size);
2074             unlink(victim, bck, fwd);
2075             check_malloced_chunk(victim, nb);
2076 	    MALLOC_UNLOCK;
2077             return chunk2mem(victim);
2078           }
2079 
2080         }
2081 
2082        bin = next_bin(bin);
2083 
2084 #if MALLOC_ALIGN == 16
2085        if (idx < MAX_SMALLBIN)
2086          {
2087            bin = next_bin(bin);
2088            ++idx;
2089          }
2090 #endif
2091       } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
2092 
2093       /* Clear out the block bit. */
2094 
2095       do   /* Possibly backtrack to try to clear a partial block */
2096       {
2097         if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
2098         {
2099           binblocks &= ~block;
2100           break;
2101         }
2102         --startidx;
2103        q = prev_bin(q);
2104       } while (first(q) == q);
2105 
2106       /* Get to the next possibly nonempty block */
2107 
2108       if ( (block <<= 1) <= binblocks && (block != 0) )
2109       {
2110         while ((block & binblocks) == 0)
2111         {
2112           idx += BINBLOCKWIDTH;
2113           block <<= 1;
2114         }
2115       }
2116       else
2117         break;
2118     }
2119   }
2120 
2121 
2122   /* Try to use top chunk */
2123 
2124   /* Require that there be a remainder, ensuring top always exists  */
2125   remainder_size = long_sub_size_t(chunksize(top), nb);
2126   if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
2127   {
2128 
2129 #if HAVE_MMAP
2130     /* If big and would otherwise need to extend, try to use mmap instead */
2131     if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
2132         (victim = mmap_chunk(nb)) != 0)
2133     {
2134       MALLOC_UNLOCK;
2135       return chunk2mem(victim);
2136     }
2137 #endif
2138 
2139     /* Try to extend */
2140     malloc_extend_top(nb);
2141     remainder_size = long_sub_size_t(chunksize(top), nb);
2142     if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
2143     {
2144       MALLOC_UNLOCK;
2145       return 0; /* propagate failure */
2146     }
2147   }
2148 
2149   victim = top;
2150   set_head(victim, nb | PREV_INUSE);
2151   top = chunk_at_offset(victim, nb);
2152   set_head(top, remainder_size | PREV_INUSE);
2153   check_malloced_chunk(victim, nb);
2154   MALLOC_UNLOCK;
2155   return chunk2mem(victim);
2156 
2157 #endif /* MALLOC_PROVIDED */
2158 }
2159 
2160 #endif /* DEFINE_MALLOC */
2161 
2162 #ifdef DEFINE_FREE
2163 
2164 /*
2165 
2166   free() algorithm :
2167 
2168     cases:
2169 
2170        1. free(0) has no effect.
2171 
2172        2. If the chunk was allocated via mmap, it is release via munmap().
2173 
2174        3. If a returned chunk borders the current high end of memory,
2175           it is consolidated into the top, and if the total unused
2176           topmost memory exceeds the trim threshold, malloc_trim is
2177           called.
2178 
2179        4. Other chunks are consolidated as they arrive, and
2180           placed in corresponding bins. (This includes the case of
2181           consolidating with the current `last_remainder').
2182 
2183 */
2184 
2185 
fREe(void * mem)2186 void fREe(void* mem)
2187 {
2188 #ifdef MALLOC_PROVIDED
2189 
2190   free (mem);
2191 
2192 #else
2193 
2194   mchunkptr p;         /* chunk corresponding to mem */
2195   INTERNAL_SIZE_T hd;  /* its head field */
2196   INTERNAL_SIZE_T sz;  /* its size */
2197   int       idx;       /* its bin index */
2198   mchunkptr next;      /* next contiguous chunk */
2199   INTERNAL_SIZE_T nextsz; /* its size */
2200   INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
2201   mchunkptr bck;       /* misc temp for linking */
2202   mchunkptr fwd;       /* misc temp for linking */
2203   int       islr;      /* track whether merging with last_remainder */
2204 
2205   if (mem == 0)                              /* free(0) has no effect */
2206     return;
2207 
2208   MALLOC_LOCK;
2209 
2210   p = mem2chunk(mem);
2211   hd = p->size;
2212 
2213 #if HAVE_MMAP
2214   if (hd & IS_MMAPPED)                       /* release mmapped memory. */
2215   {
2216     munmap_chunk(p);
2217     MALLOC_UNLOCK;
2218     return;
2219   }
2220 #endif
2221 
2222   check_inuse_chunk(p);
2223 
2224   sz = hd & ~PREV_INUSE;
2225   next = chunk_at_offset(p, sz);
2226   nextsz = chunksize(next);
2227 
2228   if (next == top)                            /* merge with top */
2229   {
2230     sz += nextsz;
2231 
2232     if (!(hd & PREV_INUSE))                    /* consolidate backward */
2233     {
2234       prevsz = p->prev_size;
2235       p = chunk_at_offset(p, -prevsz);
2236       sz += prevsz;
2237       unlink(p, bck, fwd);
2238     }
2239 
2240     set_head(p, sz | PREV_INUSE);
2241     top = p;
2242     if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
2243       malloc_trim(top_pad);
2244     MALLOC_UNLOCK;
2245     return;
2246   }
2247 
2248   set_head(next, nextsz);                    /* clear inuse bit */
2249 
2250   islr = 0;
2251 
2252   if (!(hd & PREV_INUSE))                    /* consolidate backward */
2253   {
2254     prevsz = p->prev_size;
2255     p = chunk_at_offset(p, -prevsz);
2256     sz += prevsz;
2257 
2258     if (p->fd == last_remainder)             /* keep as last_remainder */
2259       islr = 1;
2260     else
2261       unlink(p, bck, fwd);
2262   }
2263 
2264   if (!(inuse_bit_at_offset(next, nextsz)))   /* consolidate forward */
2265   {
2266     sz += nextsz;
2267 
2268     if (!islr && next->fd == last_remainder)  /* re-insert last_remainder */
2269     {
2270       islr = 1;
2271       link_last_remainder(p);
2272     }
2273     else
2274       unlink(next, bck, fwd);
2275   }
2276 
2277 
2278   set_head(p, sz | PREV_INUSE);
2279   set_foot(p, sz);
2280   if (!islr)
2281     frontlink(p, sz, idx, bck, fwd);
2282 
2283   MALLOC_UNLOCK;
2284 
2285 #endif /* MALLOC_PROVIDED */
2286 }
2287 #ifdef _HAVE_ALIAS_ATTRIBUTE
2288 #pragma GCC diagnostic push
2289 #ifndef __clang__
2290 #pragma GCC diagnostic ignored "-Wmissing-attributes"
2291 #endif
2292 __strong_reference(free, __malloc_free);
2293 #pragma GCC diagnostic pop
2294 #endif
2295 #endif /* DEFINE_FREE */
2296 
2297 #ifdef DEFINE_REALLOC
2298 
2299 /*
2300 
2301   Realloc algorithm:
2302 
2303     Chunks that were obtained via mmap cannot be extended or shrunk
2304     unless HAVE_MREMAP is defined, in which case mremap is used.
2305     Otherwise, if their reallocation is for additional space, they are
2306     copied.  If for less, they are just left alone.
2307 
2308     Otherwise, if the reallocation is for additional space, and the
2309     chunk can be extended, it is, else a malloc-copy-free sequence is
2310     taken.  There are several different ways that a chunk could be
2311     extended. All are tried:
2312 
2313        * Extending forward into following adjacent free chunk.
2314        * Shifting backwards, joining preceding adjacent space
2315        * Both shifting backwards and extending forward.
2316        * Extending into newly sbrked space
2317 
2318     Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
2319     size argument of zero (re)allocates a minimum-sized chunk.
2320 
2321     If the reallocation is for less space, and the new request is for
2322     a `small' (<512 bytes) size, then the newly unused space is lopped
2323     off and freed.
2324 
2325     The old unix realloc convention of allowing the last-free'd chunk
2326     to be used as an argument to realloc is no longer supported.
2327     I don't know of any programs still relying on this feature,
2328     and allowing it would also allow too many other incorrect
2329     usages of realloc to be sensible.
2330 
2331 
2332 */
2333 
2334 
rEALLOc(void * oldmem,size_t bytes)2335 void* rEALLOc(void* oldmem, size_t bytes)
2336 {
2337 #ifdef MALLOC_PROVIDED
2338 
2339   realloc (oldmem, bytes);
2340 
2341 #else
2342 
2343   INTERNAL_SIZE_T    nb;      /* padded request size */
2344 
2345   mchunkptr oldp;             /* chunk corresponding to oldmem */
2346   INTERNAL_SIZE_T    oldsize; /* its size */
2347 
2348   mchunkptr newp;             /* chunk to return */
2349   INTERNAL_SIZE_T    newsize; /* its size */
2350   void*   newmem;           /* corresponding user mem */
2351 
2352   mchunkptr next;             /* next contiguous chunk after oldp */
2353   INTERNAL_SIZE_T  nextsize;  /* its size */
2354 
2355   mchunkptr prev;             /* previous contiguous chunk before oldp */
2356   INTERNAL_SIZE_T  prevsize;  /* its size */
2357 
2358   mchunkptr remainder;        /* holds split off extra space from newp */
2359   INTERNAL_SIZE_T  remainder_size;   /* its size */
2360 
2361   mchunkptr bck;              /* misc temp for linking */
2362   mchunkptr fwd;              /* misc temp for linking */
2363 
2364 #ifdef REALLOC_ZERO_BYTES_FREES
2365   if (bytes == 0) { fREe(oldmem); return 0; }
2366 #endif
2367 
2368 
2369   /* realloc of null is supposed to be same as malloc */
2370   if (oldmem == 0) return mALLOc(bytes);
2371 
2372   MALLOC_LOCK;
2373 
2374   newp    = oldp    = mem2chunk(oldmem);
2375   newsize = oldsize = chunksize(oldp);
2376 
2377 
2378   nb = request2size(bytes);
2379 
2380   /* Check for overflow and just fail, if so. */
2381   if (nb > INT_MAX || nb < bytes)
2382   {
2383     errno = ENOMEM;
2384     return 0;
2385   }
2386 
2387 #if HAVE_MMAP
2388   if (chunk_is_mmapped(oldp))
2389   {
2390 #if HAVE_MREMAP
2391     newp = mremap_chunk(oldp, nb);
2392     if(newp)
2393     {
2394       MALLOC_UNLOCK;
2395       return chunk2mem(newp);
2396     }
2397 #endif
2398     /* Note the extra SIZE_SZ overhead. */
2399     if(oldsize - SIZE_SZ >= nb)
2400     {
2401       MALLOC_UNLOCK;
2402       return oldmem; /* do nothing */
2403     }
2404     /* Must alloc, copy, free. */
2405     newmem = mALLOc(bytes);
2406     if (newmem == 0)
2407     {
2408       MALLOC_UNLOCK;
2409       return 0; /* propagate failure */
2410     }
2411     MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
2412     munmap_chunk(oldp);
2413     MALLOC_UNLOCK;
2414     return newmem;
2415   }
2416 #endif
2417 
2418   check_inuse_chunk(oldp);
2419 
2420   if ((long)(oldsize) < (long)(nb))
2421   {
2422 
2423     /* Try expanding forward */
2424 
2425     next = chunk_at_offset(oldp, oldsize);
2426     if (next == top || !inuse(next))
2427     {
2428       nextsize = chunksize(next);
2429 
2430       /* Forward into top only if a remainder */
2431       if (next == top)
2432       {
2433         if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
2434         {
2435           newsize += nextsize;
2436           top = chunk_at_offset(oldp, nb);
2437           set_head(top, (newsize - nb) | PREV_INUSE);
2438           set_head_size(oldp, nb);
2439 	  MALLOC_UNLOCK;
2440           return chunk2mem(oldp);
2441         }
2442       }
2443 
2444       /* Forward into next chunk */
2445       else if (((long)(nextsize + newsize) >= (long)(nb)))
2446       {
2447         unlink(next, bck, fwd);
2448         newsize  += nextsize;
2449         goto split;
2450       }
2451     }
2452     else
2453     {
2454       next = 0;
2455       nextsize = 0;
2456     }
2457 
2458     /* Try shifting backwards. */
2459 
2460     if (!prev_inuse(oldp))
2461     {
2462       prev = prev_chunk(oldp);
2463       prevsize = chunksize(prev);
2464 
2465       /* try forward + backward first to save a later consolidation */
2466 
2467       if (next != 0)
2468       {
2469         /* into top */
2470         if (next == top)
2471         {
2472           if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
2473           {
2474             unlink(prev, bck, fwd);
2475             newp = prev;
2476             newsize += prevsize + nextsize;
2477             newmem = chunk2mem(newp);
2478             MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2479             top = chunk_at_offset(newp, nb);
2480             set_head(top, (newsize - nb) | PREV_INUSE);
2481             set_head_size(newp, nb);
2482 	    MALLOC_UNLOCK;
2483             return newmem;
2484           }
2485         }
2486 
2487         /* into next chunk */
2488         else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
2489         {
2490           unlink(next, bck, fwd);
2491           unlink(prev, bck, fwd);
2492           newp = prev;
2493           newsize += nextsize + prevsize;
2494           newmem = chunk2mem(newp);
2495           MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2496           goto split;
2497         }
2498       }
2499 
2500       /* backward only */
2501       if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
2502       {
2503         unlink(prev, bck, fwd);
2504         newp = prev;
2505         newsize += prevsize;
2506         newmem = chunk2mem(newp);
2507         MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2508         goto split;
2509       }
2510     }
2511 
2512     /* Must allocate */
2513 
2514     newmem = mALLOc (bytes);
2515 
2516     if (newmem == 0)  /* propagate failure */
2517     {
2518       MALLOC_UNLOCK;
2519       return 0;
2520     }
2521 
2522     /* Avoid copy if newp is next chunk after oldp. */
2523     /* (This can only happen when new chunk is sbrk'ed.) */
2524 
2525     if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
2526     {
2527       newsize += chunksize(newp);
2528       newp = oldp;
2529       goto split;
2530     }
2531 
2532     /* Otherwise copy, free, and exit */
2533     MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2534     __malloc_free(oldmem);
2535     MALLOC_UNLOCK;
2536     return newmem;
2537   }
2538 
2539 
2540  split:  /* split off extra room in old or expanded chunk */
2541 
2542   remainder_size = long_sub_size_t(newsize, nb);
2543 
2544   if (remainder_size >= (long)MINSIZE) /* split off remainder */
2545   {
2546     remainder = chunk_at_offset(newp, nb);
2547     set_head_size(newp, nb);
2548     set_head(remainder, remainder_size | PREV_INUSE);
2549     set_inuse_bit_at_offset(remainder, remainder_size);
2550     __malloc_free(chunk2mem(remainder)); /* let free() deal with it */
2551   }
2552   else
2553   {
2554     set_head_size(newp, newsize);
2555     set_inuse_bit_at_offset(newp, newsize);
2556   }
2557 
2558   check_inuse_chunk(newp);
2559   MALLOC_UNLOCK;
2560   return chunk2mem(newp);
2561 
2562 #endif /* MALLOC_PROVIDED */
2563 }
2564 
2565 #endif /* DEFINE_REALLOC */
2566 
2567 #ifdef DEFINE_MEMALIGN
2568 
2569 /*
2570 
2571   memalign algorithm:
2572 
2573     memalign requests more than enough space from malloc, finds a spot
2574     within that chunk that meets the alignment request, and then
2575     possibly frees the leading and trailing space.
2576 
2577     The alignment argument must be a power of two. This property is not
2578     checked by memalign, so misuse may result in random runtime errors.
2579 
2580     8-byte alignment is guaranteed by normal malloc calls, so don't
2581     bother calling memalign with an argument of 8 or less.
2582 
2583     Overreliance on memalign is a sure way to fragment space.
2584 
2585 */
2586 
2587 
mEMALIGn(size_t alignment,size_t bytes)2588 void* mEMALIGn(size_t alignment, size_t bytes)
2589 {
2590   INTERNAL_SIZE_T    nb;      /* padded  request size */
2591   char*     m;                /* memory returned by malloc call */
2592   mchunkptr p;                /* corresponding chunk */
2593   char*     brk;              /* alignment point within p */
2594   mchunkptr newp;             /* chunk to return */
2595   INTERNAL_SIZE_T  newsize;   /* its size */
2596   INTERNAL_SIZE_T  leadsize;  /* leading space befor alignment point */
2597   mchunkptr remainder;        /* spare room at end to split off */
2598   long      remainder_size;   /* its size */
2599 
2600   /* If need less alignment than we give anyway, just relay to malloc */
2601 
2602   if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
2603 
2604   /* Otherwise, ensure that it is at least a minimum chunk size */
2605 
2606   if (alignment <  MINSIZE) alignment = MINSIZE;
2607 
2608   /* Call malloc with worst case padding to hit alignment. */
2609 
2610   nb = request2size(bytes);
2611 
2612   /* Check for overflow. */
2613   if (nb > __SIZE_MAX__ - (alignment + MINSIZE) || nb < bytes)
2614   {
2615     errno = ENOMEM;
2616     return 0;
2617   }
2618 
2619   m  = (char*)(mALLOc(nb + alignment + MINSIZE));
2620 
2621   if (m == 0) return 0; /* propagate failure */
2622 
2623   MALLOC_LOCK;
2624 
2625   p = mem2chunk(m);
2626 
2627   if ((((uintptr_t)(m)) % alignment) == 0) /* aligned */
2628   {
2629 #if HAVE_MMAP
2630     if(chunk_is_mmapped(p))
2631     {
2632       MALLOC_UNLOCK;
2633       return chunk2mem(p); /* nothing more to do */
2634     }
2635 #endif
2636   }
2637   else /* misaligned */
2638   {
2639     /*
2640       Find an aligned spot inside chunk.
2641       Since we need to give back leading space in a chunk of at
2642       least MINSIZE, if the first calculation places us at
2643       a spot with less than MINSIZE leader, we can move to the
2644       next aligned spot -- we've allocated enough total room so that
2645       this is always possible.
2646     */
2647 
2648     brk = (char*)mem2chunk(((uintptr_t)(m + alignment - 1)) & -alignment);
2649     if ((long)(brk - (char*)(p)) < (long)MINSIZE) brk = brk + alignment;
2650 
2651     newp = (mchunkptr)brk;
2652     leadsize = brk - (char*)(p);
2653     newsize = chunksize(p) - leadsize;
2654 
2655 #if HAVE_MMAP
2656     if(chunk_is_mmapped(p))
2657     {
2658       newp->prev_size = p->prev_size + leadsize;
2659       set_head(newp, newsize|IS_MMAPPED);
2660       MALLOC_UNLOCK;
2661       return chunk2mem(newp);
2662     }
2663 #endif
2664 
2665     /* give back leader, use the rest */
2666 
2667     set_head(newp, newsize | PREV_INUSE);
2668     set_inuse_bit_at_offset(newp, newsize);
2669     set_head_size(p, leadsize);
2670     __malloc_free(chunk2mem(p));
2671     p = newp;
2672 
2673     assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
2674   }
2675 
2676   /* Also give back spare room at the end */
2677 
2678   remainder_size = long_sub_size_t(chunksize(p), nb);
2679 
2680   if (remainder_size >= (long)MINSIZE)
2681   {
2682     remainder = chunk_at_offset(p, nb);
2683     set_head(remainder, remainder_size | PREV_INUSE);
2684     set_head_size(p, nb);
2685     __malloc_free(chunk2mem(remainder));
2686   }
2687 
2688   check_inuse_chunk(p);
2689   MALLOC_UNLOCK;
2690   return chunk2mem(p);
2691 
2692 }
2693 
2694 #ifdef _HAVE_ALIAS_ATTRIBUTE
2695 __strong_reference(memalign, aligned_alloc);
2696 #endif
2697 #endif /* DEFINE_MEMALIGN */
2698 
2699 #ifdef DEFINE_VALLOC
2700 
2701 /*
2702     valloc just invokes memalign with alignment argument equal
2703     to the page size of the system (or as near to this as can
2704     be figured out from all the includes/defines above.)
2705 */
2706 
vALLOc(size_t bytes)2707 void* vALLOc(size_t bytes)
2708 {
2709   return mEMALIGn (malloc_getpagesize, bytes);
2710 }
2711 
2712 #endif /* DEFINE_VALLOC */
2713 
2714 #ifdef DEFINE_PVALLOC
2715 
2716 /*
2717   pvalloc just invokes valloc for the nearest pagesize
2718   that will accommodate request
2719 */
2720 
2721 
pvALLOc(size_t bytes)2722 void* pvALLOc(size_t bytes)
2723 {
2724   size_t pagesize = malloc_getpagesize;
2725   if (bytes > __SIZE_MAX__ - pagesize)
2726   {
2727     errno = ENOMEM;
2728     return 0;
2729   }
2730   return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
2731 }
2732 
2733 #endif /* DEFINE_PVALLOC */
2734 
2735 #ifdef DEFINE_CALLOC
2736 #include "mul_overflow.h"
2737 /*
2738 
2739   calloc calls malloc, then zeroes out the allocated chunk.
2740 
2741 */
2742 
cALLOc(size_t n,size_t elem_size)2743 void* cALLOc(size_t n, size_t elem_size)
2744 {
2745   mchunkptr p;
2746   INTERNAL_SIZE_T csz;
2747 
2748   INTERNAL_SIZE_T sz;
2749 
2750 #if MORECORE_CLEARS
2751   mchunkptr oldtop;
2752   INTERNAL_SIZE_T oldtopsize;
2753 #endif
2754   void* mem;
2755 
2756   if (mul_overflow((INTERNAL_SIZE_T) n, (INTERNAL_SIZE_T) elem_size, &sz))
2757   {
2758     errno = ENOMEM;
2759     return 0;
2760   }
2761 
2762   /* check if expand_top called, in which case don't need to clear */
2763 #if MORECORE_CLEARS
2764   MALLOC_LOCK;
2765   oldtop = top;
2766   oldtopsize = chunksize(top);
2767 #endif
2768 
2769   mem = mALLOc (sz);
2770 
2771   if (mem == 0)
2772   {
2773 #if MORECORE_CLEARS
2774     MALLOC_UNLOCK;
2775 #endif
2776     return 0;
2777   }
2778   else
2779   {
2780     p = mem2chunk(mem);
2781 
2782     /* Two optional cases in which clearing not necessary */
2783 
2784 
2785 #if HAVE_MMAP
2786     if (chunk_is_mmapped(p))
2787     {
2788 #if MORECORE_CLEARS
2789       MALLOC_UNLOCK;
2790 #endif
2791       return mem;
2792     }
2793 #endif
2794 
2795     csz = chunksize(p);
2796 
2797 #if MORECORE_CLEARS
2798     if (p == oldtop && csz > oldtopsize)
2799     {
2800       /* clear only the bytes from non-freshly-sbrked memory */
2801       csz = oldtopsize;
2802     }
2803     MALLOC_UNLOCK;
2804 #endif
2805 
2806     MALLOC_ZERO(mem, csz - SIZE_SZ);
2807     return mem;
2808   }
2809 }
2810 
2811 #endif /* DEFINE_CALLOC */
2812 
2813 #if defined(DEFINE_CFREE) && !defined(__CYGWIN__)
2814 
2815 /*
2816 
2817   cfree just calls free. It is needed/defined on some systems
2818   that pair it with calloc, presumably for odd historical reasons.
2819 
2820 */
2821 
2822 #if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
2823 #if !defined(_LIBC) || !defined(_REENT_ONLY)
cfree(void * mem)2824 void cfree(void *mem)
2825 {
2826   fREe(mem);
2827 }
2828 #endif
2829 #endif
2830 
2831 #endif /* DEFINE_CFREE */
2832 
2833 #ifdef DEFINE_FREE
2834 
2835 /*
2836 
2837     Malloc_trim gives memory back to the system (via negative
2838     arguments to sbrk) if there is unused memory at the `high' end of
2839     the malloc pool. You can call this after freeing large blocks of
2840     memory to potentially reduce the system-level memory requirements
2841     of a program. However, it cannot guarantee to reduce memory. Under
2842     some allocation patterns, some large free blocks of memory will be
2843     locked between two used chunks, so they cannot be given back to
2844     the system.
2845 
2846     The `pad' argument to malloc_trim represents the amount of free
2847     trailing space to leave untrimmed. If this argument is zero,
2848     only the minimum amount of memory to maintain internal data
2849     structures will be left (one page or less). Non-zero arguments
2850     can be supplied to maintain enough trailing space to service
2851     future expected allocations without having to re-obtain memory
2852     from the system.
2853 
2854     Malloc_trim returns 1 if it actually released any memory, else 0.
2855 
2856 */
2857 
malloc_trim(size_t pad)2858 int malloc_trim(size_t pad)
2859 {
2860   long  top_size;        /* Amount of top-most memory */
2861   long  extra;           /* Amount to release */
2862   char* current_brk;     /* address returned by pre-check sbrk call */
2863   char* new_brk;         /* address returned by negative sbrk call */
2864 
2865   unsigned long pagesz = malloc_getpagesize;
2866 
2867   MALLOC_LOCK;
2868 
2869   top_size = chunksize(top);
2870   extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2871 
2872   if (extra < (long)pagesz)  /* Not enough memory to release */
2873   {
2874     MALLOC_UNLOCK;
2875     return 0;
2876   }
2877 
2878   else
2879   {
2880     /* Test to make sure no one else called sbrk */
2881     current_brk = (char*)(MORECORE (0));
2882     if (current_brk != (char*)(top) + top_size)
2883     {
2884       MALLOC_UNLOCK;
2885       return 0;     /* Apparently we don't own memory; must fail */
2886     }
2887 
2888     else
2889     {
2890       new_brk = (char*)(MORECORE (-extra));
2891 
2892       if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
2893       {
2894         /* Try to figure out what we have */
2895         current_brk = (char*)(MORECORE (0));
2896         top_size = current_brk - (char*)top;
2897         if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
2898         {
2899           sbrked_mem = current_brk - sbrk_base;
2900           set_head(top, top_size | PREV_INUSE);
2901         }
2902         check_chunk(top);
2903 	MALLOC_UNLOCK;
2904         return 0;
2905       }
2906 
2907       else
2908       {
2909         /* Success. Adjust top accordingly. */
2910         set_head(top, (top_size - extra) | PREV_INUSE);
2911         sbrked_mem -= extra;
2912         check_chunk(top);
2913 	MALLOC_UNLOCK;
2914         return 1;
2915       }
2916     }
2917   }
2918 }
2919 
2920 #endif /* DEFINE_FREE */
2921 
2922 #ifdef DEFINE_MALLOC_USABLE_SIZE
2923 
2924 /*
2925   malloc_usable_size:
2926 
2927     This routine tells you how many bytes you can actually use in an
2928     allocated chunk, which may be more than you requested (although
2929     often not). You can use this many bytes without worrying about
2930     overwriting other allocated objects. Not a particularly great
2931     programming practice, but still sometimes useful.
2932 
2933 */
2934 
malloc_usable_size(void * mem)2935 size_t malloc_usable_size(void* mem)
2936 {
2937   mchunkptr p;
2938   if (mem == 0)
2939     return 0;
2940   else
2941   {
2942     p = mem2chunk(mem);
2943     if(!chunk_is_mmapped(p))
2944     {
2945       if (!inuse(p)) return 0;
2946 #if DEBUG
2947       MALLOC_LOCK;
2948       check_inuse_chunk(p);
2949       MALLOC_UNLOCK;
2950 #endif
2951       return chunksize(p) - SIZE_SZ;
2952     }
2953     return chunksize(p) - 2*SIZE_SZ;
2954   }
2955 }
2956 
2957 #endif /* DEFINE_MALLOC_USABLE_SIZE */
2958 
2959 
2960 extern void malloc_update_mallinfo(void);
2961 
2962 #ifdef DEFINE_MALLINFO
2963 
2964 /* Utility to update current_mallinfo for malloc_stats and mallinfo() */
2965 
malloc_update_mallinfo(void)2966 STATIC void malloc_update_mallinfo(void)
2967 {
2968   int i;
2969   mbinptr b;
2970   mchunkptr p;
2971 #if DEBUG
2972   mchunkptr q;
2973 #endif
2974 
2975   INTERNAL_SIZE_T avail = chunksize(top);
2976   int   navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
2977 
2978   for (i = 1; i < NAV; ++i)
2979   {
2980     b = bin_at(i);
2981     for (p = last(b); p != b; p = p->bk)
2982     {
2983 #if DEBUG
2984       check_free_chunk(p);
2985       for (q = next_chunk(p);
2986            q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
2987            q = next_chunk(q))
2988         check_inuse_chunk(q);
2989 #endif
2990       avail += chunksize(p);
2991       navail++;
2992     }
2993   }
2994 
2995   current_mallinfo.ordblks = navail;
2996   current_mallinfo.uordblks = sbrked_mem - avail;
2997   current_mallinfo.fordblks = avail;
2998 #if HAVE_MMAP
2999   current_mallinfo.hblks = n_mmaps;
3000   current_mallinfo.hblkhd = mmapped_mem;
3001 #endif
3002   current_mallinfo.keepcost = chunksize(top);
3003 
3004 }
3005 
3006 #endif /* ! DEFINE_MALLINFO */
3007 
3008 
3009 #ifdef DEFINE_MALLOC_STATS
3010 
3011 /*
3012 
3013   malloc_stats:
3014 
3015     Prints on stderr the amount of space obtain from the system (both
3016     via sbrk and mmap), the maximum amount (which may be more than
3017     current if malloc_trim and/or munmap got called), the maximum
3018     number of simultaneous mmap regions used, and the current number
3019     of bytes allocated via malloc (or realloc, etc) but not yet
3020     freed. (Note that this is the number of bytes allocated, not the
3021     number requested. It will be larger than the number requested
3022     because of alignment and bookkeeping overhead.)
3023 
3024 */
3025 
malloc_stats(void)3026 void malloc_stats(void)
3027 {
3028   unsigned long local_max_total_mem;
3029   int local_sbrked_mem;
3030   struct mallinfo local_mallinfo;
3031 #if HAVE_MMAP
3032   unsigned long local_mmapped_mem, local_max_n_mmaps;
3033 #endif
3034   FILE *fp;
3035 
3036   MALLOC_LOCK;
3037   malloc_update_mallinfo();
3038   local_max_total_mem = max_total_mem;
3039   local_sbrked_mem = sbrked_mem;
3040   local_mallinfo = current_mallinfo;
3041 #if HAVE_MMAP
3042   local_mmapped_mem = mmapped_mem;
3043   local_max_n_mmaps = max_n_mmaps;
3044 #endif
3045   MALLOC_UNLOCK;
3046 
3047   fp = stderr;
3048 
3049   fprintf(fp, "max system bytes = %10u\n",
3050 	  (unsigned int)(local_max_total_mem));
3051 #if HAVE_MMAP
3052   fprintf(fp, "system bytes     = %10u\n",
3053 	  (unsigned int)(local_sbrked_mem + local_mmapped_mem));
3054   fprintf(fp, "in use bytes     = %10u\n",
3055 	  (unsigned int)(local_mallinfo.uordblks + local_mmapped_mem));
3056 #else
3057   fprintf(fp, "system bytes     = %10u\n",
3058 	  (unsigned int)local_sbrked_mem);
3059   fprintf(fp, "in use bytes     = %10u\n",
3060 	  (unsigned int)local_mallinfo.uordblks);
3061 #endif
3062 #if HAVE_MMAP
3063   fprintf(fp, "max mmap regions = %10u\n",
3064 	  (unsigned int)local_max_n_mmaps);
3065 #endif
3066 }
3067 
3068 #endif /* DEFINE_MALLOC_STATS */
3069 
3070 #ifdef DEFINE_MALLINFO
3071 
3072 /*
3073   mallinfo returns a copy of updated current mallinfo.
3074 */
3075 
mALLINFo(void)3076 struct mallinfo mALLINFo(void)
3077 {
3078   struct mallinfo ret;
3079 
3080   MALLOC_LOCK;
3081   malloc_update_mallinfo();
3082   ret = current_mallinfo;
3083   MALLOC_UNLOCK;
3084   return ret;
3085 }
3086 
3087 #endif /* DEFINE_MALLINFO */
3088 
3089 #ifdef DEFINE_MALLOPT
3090 
3091 /*
3092   mallopt:
3093 
3094     mallopt is the general SVID/XPG interface to tunable parameters.
3095     The format is to provide a (parameter-number, parameter-value) pair.
3096     mallopt then sets the corresponding parameter to the argument
3097     value if it can (i.e., so long as the value is meaningful),
3098     and returns 1 if successful else 0.
3099 
3100     See descriptions of tunable parameters above.
3101 
3102 */
3103 
mALLOPt(int param_number,int value)3104 int mALLOPt(int param_number, int value)
3105 {
3106   MALLOC_LOCK;
3107   switch(param_number)
3108   {
3109     case M_TRIM_THRESHOLD:
3110       trim_threshold = value; MALLOC_UNLOCK; return 1;
3111     case M_TOP_PAD:
3112       top_pad = value; MALLOC_UNLOCK; return 1;
3113     case M_MMAP_THRESHOLD:
3114 #if HAVE_MMAP
3115       mmap_threshold = value;
3116 #endif
3117       MALLOC_UNLOCK;
3118       return 1;
3119     case M_MMAP_MAX:
3120 #if HAVE_MMAP
3121       n_mmaps_max = value; MALLOC_UNLOCK; return 1;
3122 #else
3123       MALLOC_UNLOCK; return value == 0;
3124 #endif
3125 
3126     default:
3127       MALLOC_UNLOCK;
3128       return 0;
3129   }
3130 }
3131 
3132 #endif /* DEFINE_MALLOPT */
3133 
3134 #ifdef DEFINE_POSIX_MEMALIGN
pOSIx_mEMALIGn(void ** memptr,size_t align,size_t size)3135 int pOSIx_mEMALIGn(void **memptr, size_t align, size_t size)
3136 {
3137     /* Return EINVAL if align isn't power of 2 or not a multiple of a pointer size */
3138     if ((align & (align-1)) != 0 || align % sizeof(void *) != 0 || align == 0)
3139         return EINVAL;
3140 
3141     void *mem = mEMALIGn(align, size);
3142 
3143     if (!mem)
3144         return ENOMEM;
3145 
3146     *memptr = mem;
3147     return 0;
3148 }
3149 #endif
3150 
3151 /*
3152 
3153 History:
3154 
3155     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
3156       * Fixed ordering problem with boundary-stamping
3157 
3158     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
3159       * Added pvalloc, as recommended by H.J. Liu
3160       * Added 64bit pointer support mainly from Wolfram Gloger
3161       * Added anonymously donated WIN32 sbrk emulation
3162       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
3163       * malloc_extend_top: fix mask error that caused wastage after
3164         foreign sbrks
3165       * Add linux mremap support code from HJ Liu
3166 
3167     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
3168       * Integrated most documentation with the code.
3169       * Add support for mmap, with help from
3170         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
3171       * Use last_remainder in more cases.
3172       * Pack bins using idea from  colin@nyx10.cs.du.edu
3173       * Use ordered bins instead of best-fit threshhold
3174       * Eliminate block-local decls to simplify tracing and debugging.
3175       * Support another case of realloc via move into top
3176       * Fix error occuring when initial sbrk_base not word-aligned.
3177       * Rely on page size for units instead of SBRK_UNIT to
3178         avoid surprises about sbrk alignment conventions.
3179       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
3180         (raymond@es.ele.tue.nl) for the suggestion.
3181       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
3182       * More precautions for cases where other routines call sbrk,
3183         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
3184       * Added macros etc., allowing use in linux libc from
3185         H.J. Lu (hjl@gnu.ai.mit.edu)
3186       * Inverted this history list
3187 
3188     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
3189       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
3190       * Removed all preallocation code since under current scheme
3191         the work required to undo bad preallocations exceeds
3192         the work saved in good cases for most test programs.
3193       * No longer use return list or unconsolidated bins since
3194         no scheme using them consistently outperforms those that don't
3195         given above changes.
3196       * Use best fit for very large chunks to prevent some worst-cases.
3197       * Added some support for debugging
3198 
3199     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
3200       * Removed footers when chunks are in use. Thanks to
3201         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
3202 
3203     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
3204       * Added malloc_trim, with help from Wolfram Gloger
3205         (wmglo@Dent.MED.Uni-Muenchen.DE).
3206 
3207     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
3208 
3209     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
3210       * realloc: try to expand in both directions
3211       * malloc: swap order of clean-bin strategy;
3212       * realloc: only conditionally expand backwards
3213       * Try not to scavenge used bins
3214       * Use bin counts as a guide to preallocation
3215       * Occasionally bin return list chunks in first scan
3216       * Add a few optimizations from colin@nyx10.cs.du.edu
3217 
3218     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
3219       * faster bin computation & slightly different binning
3220       * merged all consolidations to one part of malloc proper
3221          (eliminating old malloc_find_space & malloc_clean_bin)
3222       * Scan 2 returns chunks (not just 1)
3223       * Propagate failure in realloc if malloc returns 0
3224       * Add stuff to allow compilation on non-ANSI compilers
3225           from kpv@research.att.com
3226 
3227     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
3228       * removed potential for odd address access in prev_chunk
3229       * removed dependency on getpagesize.h
3230       * misc cosmetics and a bit more internal documentation
3231       * anticosmetics: mangled names in macros to evade debugger strangeness
3232       * tested on sparc, hp-700, dec-mips, rs6000
3233           with gcc & native cc (hp, dec only) allowing
3234           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
3235 
3236     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
3237       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
3238          structure of old version,  but most details differ.)
3239 
3240 */
3241 #endif
3242