1 /*
2 Copyright (c) 1990 Regents of the University of California.
3 All rights reserved.
4  */
5 #ifdef MALLOC_PROVIDED
6 int _dummy_mallocr = 1;
7 #else
8 /* ---------- To make a malloc.h, start cutting here ------------ */
9 
10 /*
11   A version of malloc/free/realloc written by Doug Lea and released to the
12   public domain.  Send questions/comments/complaints/performance data
13   to dl@cs.oswego.edu
14 
15 * VERSION 2.6.5  Wed Jun 17 15:55:16 1998  Doug Lea  (dl at gee)
16 
17    Note: There may be an updated version of this malloc obtainable at
18            ftp://g.oswego.edu/pub/misc/malloc.c
19          Check before installing!
20 
21    Note: This version differs from 2.6.4 only by correcting a
22          statement ordering error that could cause failures only
23          when calls to this malloc are interposed with calls to
24          other memory allocators.
25 
26 * Why use this malloc?
27 
28   This is not the fastest, most space-conserving, most portable, or
29   most tunable malloc ever written. However it is among the fastest
30   while also being among the most space-conserving, portable and tunable.
31   Consistent balance across these factors results in a good general-purpose
32   allocator. For a high-level description, see
33      http://g.oswego.edu/dl/html/malloc.html
34 
35 * Synopsis of public routines
36 
37   (Much fuller descriptions are contained in the program documentation below.)
38 
39   malloc(size_t n);
40      Return a pointer to a newly allocated chunk of at least n bytes, or null
41      if no space is available.
42   free(Void_t* p);
43      Release the chunk of memory pointed to by p, or no effect if p is null.
44   realloc(Void_t* p, size_t n);
45      Return a pointer to a chunk of size n that contains the same data
46      as does chunk p up to the minimum of (n, p's size) bytes, or null
47      if no space is available. The returned pointer may or may not be
48      the same as p. If p is null, equivalent to malloc.  Unless the
49      #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
50      size argument of zero (re)allocates a minimum-sized chunk.
51   memalign(size_t alignment, size_t n);
52      Return a pointer to a newly allocated chunk of n bytes, aligned
53      in accord with the alignment argument, which must be a power of
54      two.
55   valloc(size_t n);
56      Equivalent to memalign(pagesize, n), where pagesize is the page
57      size of the system (or as near to this as can be figured out from
58      all the includes/defines below.)
59   pvalloc(size_t n);
60      Equivalent to valloc(minimum-page-that-holds(n)), that is,
61      round up n to nearest pagesize.
62   calloc(size_t unit, size_t quantity);
63      Returns a pointer to quantity * unit bytes, with all locations
64      set to zero.
65   cfree(Void_t* p);
66      Equivalent to free(p).
67   malloc_trim(size_t pad);
68      Release all but pad bytes of freed top-most memory back
69      to the system. Return 1 if successful, else 0.
70   malloc_usable_size(Void_t* p);
71      Report the number usable allocated bytes associated with allocated
72      chunk p. This may or may not report more bytes than were requested,
73      due to alignment and minimum size constraints.
74   malloc_stats();
75      Prints brief summary statistics on stderr.
76   mallinfo()
77      Returns (by copy) a struct containing various summary statistics.
78   mallopt(int parameter_number, int parameter_value)
79      Changes one of the tunable parameters described below. Returns
80      1 if successful in changing the parameter, else 0.
81 
82 * Vital statistics:
83 
84   Alignment:                            8-byte
85        8 byte alignment is currently hardwired into the design.  This
86        seems to suffice for all current machines and C compilers.
87 
88   Assumed pointer representation:       4 or 8 bytes
89        Code for 8-byte pointers is untested by me but has worked
90        reliably by Wolfram Gloger, who contributed most of the
91        changes supporting this.
92 
93   Assumed size_t  representation:       4 or 8 bytes
94        Note that size_t is allowed to be 4 bytes even if pointers are 8.
95 
96   Minimum overhead per allocated chunk: 4 or 8 bytes
97        Each malloced chunk has a hidden overhead of 4 bytes holding size
98        and status information.
99 
100   Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
101                           8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
102 
103        When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
104        ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
105        needed; 4 (8) for a trailing size field
106        and 8 (16) bytes for free list pointers. Thus, the minimum
107        allocatable size is 16/24/32 bytes.
108 
109        Even a request for zero bytes (i.e., malloc(0)) returns a
110        pointer to something of the minimum allocatable size.
111 
112   Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
113                           8-byte size_t: 2^63 - 16 bytes
114 
115        It is assumed that (possibly signed) size_t bit values suffice to
116        represent chunk sizes. `Possibly signed' is due to the fact
117        that `size_t' may be defined on a system as either a signed or
118        an unsigned type. To be conservative, values that would appear
119        as negative numbers are avoided.
120        Requests for sizes with a negative sign bit will return a
121        minimum-sized chunk.
122 
123   Maximum overhead wastage per allocated chunk: normally 15 bytes
124 
125        Alignnment demands, plus the minimum allocatable size restriction
126        make the normal worst-case wastage 15 bytes (i.e., up to 15
127        more bytes will be allocated than were requested in malloc), with
128        two exceptions:
129          1. Because requests for zero bytes allocate non-zero space,
130             the worst case wastage for a request of zero bytes is 24 bytes.
131          2. For requests >= mmap_threshold that are serviced via
132             mmap(), the worst case wastage is 8 bytes plus the remainder
133             from a system page (the minimal mmap unit); typically 4096 bytes.
134 
135 * Limitations
136 
137     Here are some features that are NOT currently supported
138 
139     * No user-definable hooks for callbacks and the like.
140     * No automated mechanism for fully checking that all accesses
141       to malloced memory stay within their bounds.
142     * No support for compaction.
143 
144 * Synopsis of compile-time options:
145 
146     People have reported using previous versions of this malloc on all
147     versions of Unix, sometimes by tweaking some of the defines
148     below. It has been tested most extensively on Solaris and
149     Linux. It is also reported to work on WIN32 platforms.
150     People have also reported adapting this malloc for use in
151     stand-alone embedded systems.
152 
153     The implementation is in straight, hand-tuned ANSI C.  Among other
154     consequences, it uses a lot of macros.  Because of this, to be at
155     all usable, this code should be compiled using an optimizing compiler
156     (for example gcc -O2) that can simplify expressions and control
157     paths.
158 
159   __STD_C                  (default: derived from C compiler defines)
160      Nonzero if using ANSI-standard C compiler, a C++ compiler, or
161      a C compiler sufficiently close to ANSI to get away with it.
162   DEBUG                    (default: NOT defined)
163      Define to enable debugging. Adds fairly extensive assertion-based
164      checking to help track down memory errors, but noticeably slows down
165      execution.
166   SEPARATE_OBJECTS	   (default: NOT defined)
167      Define this to compile into separate .o files.  You must then
168      compile malloc.c several times, defining a DEFINE_* macro each
169      time.  The list of DEFINE_* macros appears below.
170   MALLOC_LOCK		   (default: NOT defined)
171   MALLOC_UNLOCK		   (default: NOT defined)
172      Define these to C expressions which are run to lock and unlock
173      the malloc data structures.  Calls may be nested; that is,
174      MALLOC_LOCK may be called more than once before the corresponding
175      MALLOC_UNLOCK calls.  MALLOC_LOCK must avoid waiting for a lock
176      that it already holds.
177   MALLOC_ALIGNMENT          (default: NOT defined)
178      Define this to 16 if you need 16 byte alignment instead of 8 byte alignment
179      which is the normal default.
180   REALLOC_ZERO_BYTES_FREES (default: NOT defined)
181      Define this if you think that realloc(p, 0) should be equivalent
182      to free(p). Otherwise, since malloc returns a unique pointer for
183      malloc(0), so does realloc(p, 0).
184   HAVE_MEMCPY               (default: defined)
185      Define if you are not otherwise using ANSI STD C, but still
186      have memcpy and memset in your C library and want to use them.
187      Otherwise, simple internal versions are supplied.
188   USE_MEMCPY               (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
189      Define as 1 if you want the C library versions of memset and
190      memcpy called in realloc and calloc (otherwise macro versions are used).
191      At least on some platforms, the simple macro versions usually
192      outperform libc versions.
193   HAVE_MMAP                 (default: defined as 1)
194      Define to non-zero to optionally make malloc() use mmap() to
195      allocate very large blocks.
196   HAVE_MREMAP                 (default: defined as 0 unless Linux libc set)
197      Define to non-zero to optionally make realloc() use mremap() to
198      reallocate very large blocks.
199   malloc_getpagesize        (default: derived from system #includes)
200      Either a constant or routine call returning the system page size.
201   HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
202      Optionally define if you are on a system with a /usr/include/malloc.h
203      that declares struct mallinfo. It is not at all necessary to
204      define this even if you do, but will ensure consistency.
205   INTERNAL_SIZE_T           (default: size_t)
206      Define to a 32-bit type (probably `unsigned int') if you are on a
207      64-bit machine, yet do not want or need to allow malloc requests of
208      greater than 2^31 to be handled. This saves space, especially for
209      very small chunks.
210   INTERNAL_LINUX_C_LIB      (default: NOT defined)
211      Defined only when compiled as part of Linux libc.
212      Also note that there is some odd internal name-mangling via defines
213      (for example, internally, `malloc' is named `mALLOc') needed
214      when compiling in this case. These look funny but don't otherwise
215      affect anything.
216   _LIBC	                    (default: NOT defined)
217      Defined only when compiled as part of the Cygnus newlib
218      distribution.
219   WIN32                     (default: undefined)
220      Define this on MS win (95, nt) platforms to compile in sbrk emulation.
221   LACKS_UNISTD_H            (default: undefined)
222      Define this if your system does not have a <unistd.h>.
223   MORECORE                  (default: sbrk)
224      The name of the routine to call to obtain more memory from the system.
225   MORECORE_FAILURE          (default: -1)
226      The value returned upon failure of MORECORE.
227   MORECORE_CLEARS           (default 1)
228      True (1) if the routine mapped to MORECORE zeroes out memory (which
229      holds for sbrk).
230   DEFAULT_TRIM_THRESHOLD
231   DEFAULT_TOP_PAD
232   DEFAULT_MMAP_THRESHOLD
233   DEFAULT_MMAP_MAX
234      Default values of tunable parameters (described in detail below)
235      controlling interaction with host system routines (sbrk, mmap, etc).
236      These values may also be changed dynamically via mallopt(). The
237      preset defaults are those that give best performance for typical
238      programs/systems.
239 
240 
241 */
242 
243 
244 
245 
246 /* Preliminaries */
247 
248 #define _DEFAULT_SOURCE
249 #ifndef __STD_C
250 #ifdef __STDC__
251 #define __STD_C     1
252 #else
253 #if __cplusplus
254 #define __STD_C     1
255 #else
256 #define __STD_C     0
257 #endif /*__cplusplus*/
258 #endif /*__STDC__*/
259 #endif /*__STD_C*/
260 
261 #ifndef Void_t
262 #if __STD_C
263 #define Void_t      void
264 #else
265 #define Void_t      char
266 #endif
267 #endif /*Void_t*/
268 
269 #if __STD_C
270 #include <stddef.h>   /* for size_t */
271 #else
272 #include <sys/types.h>
273 #endif
274 
275 #ifdef __cplusplus
276 extern "C" {
277 #endif
278 
279 #include <stdio.h>    /* needed for malloc_stats */
280 #include <limits.h>   /* needed for overflow checks */
281 #include <errno.h>    /* needed to set errno to ENOMEM */
282 
283 #ifdef WIN32
284 #define WIN32_LEAN_AND_MEAN
285 #include <windows.h>
286 #endif
287 
288 /*
289   Compile-time options
290 */
291 
292 
293 /*
294 
295   Special defines for Cygnus newlib distribution.
296 
297  */
298 
299 #ifdef _LIBC
300 
301 #include <sys/config.h>
302 #include <sys/lock.h>
303 
304 #define POINTER_UINT unsigned _POINTER_INT
305 #define SEPARATE_OBJECTS
306 #define HAVE_MMAP 0
307 #define MORECORE(size) sbrk((size))
308 #define MORECORE_CLEARS 0
309 #define MALLOC_LOCK __LIBC_LOCK()
310 #define MALLOC_UNLOCK __LIBC_UNLOCK()
311 
312 #ifdef __CYGWIN__
313 # undef _WIN32
314 # undef WIN32
315 #endif
316 
317 #ifndef _WIN32
318 #ifdef SMALL_MEMORY
319 #define malloc_getpagesize (128)
320 #else
321 #define malloc_getpagesize (4096)
322 #endif
323 #endif
324 
325 #if __STD_C
326 extern void __malloc_lock(void);
327 extern void __malloc_unlock(void);
328 #else
329 extern void __malloc_lock();
330 extern void __malloc_unlock();
331 #endif
332 
333 #else /* ! _LIBC */
334 
335 #define POINTER_UINT unsigned long
336 
337 #endif /* ! _LIBC */
338 
339 /*
340     Debugging:
341 
342     Because freed chunks may be overwritten with link fields, this
343     malloc will often die when freed memory is overwritten by user
344     programs.  This can be very effective (albeit in an annoying way)
345     in helping track down dangling pointers.
346 
347     If you compile with -DDEBUG, a number of assertion checks are
348     enabled that will catch more memory errors. You probably won't be
349     able to make much sense of the actual assertion errors, but they
350     should help you locate incorrectly overwritten memory.  The
351     checking is fairly extensive, and will slow down execution
352     noticeably. Calling malloc_stats or mallinfo with DEBUG set will
353     attempt to check every non-mmapped allocated and free chunk in the
354     course of computing the summmaries. (By nature, mmapped regions
355     cannot be checked very much automatically.)
356 
357     Setting DEBUG may also be helpful if you are trying to modify
358     this code. The assertions in the check routines spell out in more
359     detail the assumptions and invariants underlying the algorithms.
360 
361 */
362 
363 #if DEBUG
364 #include <assert.h>
365 #else
366 #define assert(x) ((void)0)
367 #endif
368 
369 
370 /*
371   SEPARATE_OBJECTS should be defined if you want each function to go
372   into a separate .o file.  You must then compile malloc.c once per
373   function, defining the appropriate DEFINE_ macro.  See below for the
374   list of macros.
375  */
376 
377 #ifndef SEPARATE_OBJECTS
378 #define DEFINE_MALLOC
379 #define DEFINE_FREE
380 #define DEFINE_REALLOC
381 #define DEFINE_CALLOC
382 #define DEFINE_CFREE
383 #define DEFINE_MEMALIGN
384 #define DEFINE_VALLOC
385 #define DEFINE_PVALLOC
386 #define DEFINE_MALLINFO
387 #define DEFINE_MALLOC_STATS
388 #define DEFINE_MALLOC_USABLE_SIZE
389 #define DEFINE_MALLOPT
390 
391 #define STATIC static
392 #else
393 #define STATIC
394 #endif
395 
396 /*
397   INTERNAL_SIZE_T is the word-size used for internal bookkeeping
398   of chunk sizes. On a 64-bit machine, you can reduce malloc
399   overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
400   at the expense of not being able to handle requests greater than
401   2^31. This limitation is hardly ever a concern; you are encouraged
402   to set this. However, the default version is the same as size_t.
403   Since the implementation relies on __builtin_mul_overflow, defining
404   a custom INTERNAL_SIZE_T on machines/compilers without
405   __builtin_mul_overflow is not permitted.
406 */
407 
408 #ifndef INTERNAL_SIZE_T
409 #define INTERNAL_SIZE_T size_t
410 #elif !defined(_HAVE_BUILTIN_MUL_OVERFLOW)
411 #error Compiler does not support __builtin_mul_overflow, hence INTERNAL_SIZE_T cannot be set
412 #endif
413 
414 /*
415   Following is needed on implementations whereby long > size_t.
416   The problem is caused because the code performs subtractions of
417   size_t values and stores the result in long values.  In the case
418   where long > size_t and the first value is actually less than
419   the second value, the resultant value is positive.  For example,
420   (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF
421   which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF.  This is due to the
422   fact that assignment from unsigned to signed won't sign extend.
423 */
424 
425 #define long_sub_size_t(x, y)				\
426   (sizeof (long) > sizeof (INTERNAL_SIZE_T) && x < y	\
427    ? -(long) (y - x)					\
428    : (long) (x - y))
429 
430 /*
431   REALLOC_ZERO_BYTES_FREES should be set if a call to
432   realloc with zero bytes should be the same as a call to free.
433   Some people think it should. Otherwise, since this malloc
434   returns a unique pointer for malloc(0), so does realloc(p, 0).
435 */
436 
437 
438 /*   #define REALLOC_ZERO_BYTES_FREES */
439 
440 
441 /*
442   WIN32 causes an emulation of sbrk to be compiled in
443   mmap-based options are not currently supported in WIN32.
444 */
445 
446 /* #define WIN32 */
447 #ifdef WIN32
448 #define MORECORE wsbrk
449 #define HAVE_MMAP 0
450 #endif
451 
452 
453 /*
454   HAVE_MEMCPY should be defined if you are not otherwise using
455   ANSI STD C, but still have memcpy and memset in your C library
456   and want to use them in calloc and realloc. Otherwise simple
457   macro versions are defined here.
458 
459   USE_MEMCPY should be defined as 1 if you actually want to
460   have memset and memcpy called. People report that the macro
461   versions are often enough faster than libc versions on many
462   systems that it is better to use them.
463 
464 */
465 
466 #define HAVE_MEMCPY
467 
468 /* Although the original macro is called USE_MEMCPY, newlib actually
469    uses memmove to handle cases whereby a platform's memcpy implementation
470    copies backwards and thus destructive overlap may occur in realloc
471    whereby we are reclaiming free memory prior to the old allocation.  */
472 #ifndef USE_MEMCPY
473 #ifdef HAVE_MEMCPY
474 #define USE_MEMCPY 1
475 #else
476 #define USE_MEMCPY 0
477 #endif
478 #endif
479 
480 #if (__STD_C || defined(HAVE_MEMCPY))
481 
482 #if __STD_C
483 void* memset(void*, int, size_t);
484 void* memcpy(void*, const void*, size_t);
485 void* memmove(void*, const void*, size_t);
486 #else
487 Void_t* memset();
488 Void_t* memcpy();
489 Void_t* memmove();
490 #endif
491 #endif
492 
493 #if USE_MEMCPY
494 
495 /* The following macros are only invoked with (2n+1)-multiples of
496    INTERNAL_SIZE_T units, with a positive integer n. This is exploited
497    for fast inline execution when n is small. */
498 
499 #define MALLOC_ZERO(charp, nbytes)                                            \
500 do {                                                                          \
501   INTERNAL_SIZE_T mzsz = (nbytes);                                            \
502   if(mzsz <= 9*sizeof(mzsz)) {                                                \
503     INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \
504     if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
505                                      *mz++ = 0;                               \
506       if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
507                                      *mz++ = 0;                               \
508         if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
509                                      *mz++ = 0; }}}                           \
510                                      *mz++ = 0;                               \
511                                      *mz++ = 0;                               \
512                                      *mz   = 0;                               \
513   } else memset((charp), 0, mzsz);                                            \
514 } while(0)
515 
516 #define MALLOC_COPY(dest,src,nbytes)                                          \
517 do {                                                                          \
518   INTERNAL_SIZE_T mcsz = (nbytes);                                            \
519   if(mcsz <= 9*sizeof(mcsz)) {                                                \
520     INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
521     INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
522     if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
523                                      *mcdst++ = *mcsrc++;                     \
524       if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
525                                      *mcdst++ = *mcsrc++;                     \
526         if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
527                                      *mcdst++ = *mcsrc++; }}}                 \
528                                      *mcdst++ = *mcsrc++;                     \
529                                      *mcdst++ = *mcsrc++;                     \
530                                      *mcdst   = *mcsrc  ;                     \
531   } else memmove(dest, src, mcsz);                                             \
532 } while(0)
533 
534 #else /* !USE_MEMCPY */
535 
536 /* Use Duff's device for good zeroing/copying performance. */
537 
538 #define MALLOC_ZERO(charp, nbytes)                                            \
539 do {                                                                          \
540   INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
541   long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
542   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
543   switch (mctmp) {                                                            \
544     case 0: for(;;) { *mzp++ = 0;                                             \
545     case 7:           *mzp++ = 0;                                             \
546     case 6:           *mzp++ = 0;                                             \
547     case 5:           *mzp++ = 0;                                             \
548     case 4:           *mzp++ = 0;                                             \
549     case 3:           *mzp++ = 0;                                             \
550     case 2:           *mzp++ = 0;                                             \
551     case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
552   }                                                                           \
553 } while(0)
554 
555 #define MALLOC_COPY(dest,src,nbytes)                                          \
556 do {                                                                          \
557   INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
558   INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
559   long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
560   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
561   switch (mctmp) {                                                            \
562     case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
563     case 7:           *mcdst++ = *mcsrc++;                                    \
564     case 6:           *mcdst++ = *mcsrc++;                                    \
565     case 5:           *mcdst++ = *mcsrc++;                                    \
566     case 4:           *mcdst++ = *mcsrc++;                                    \
567     case 3:           *mcdst++ = *mcsrc++;                                    \
568     case 2:           *mcdst++ = *mcsrc++;                                    \
569     case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
570   }                                                                           \
571 } while(0)
572 
573 #endif
574 
575 
576 /*
577   Define HAVE_MMAP to optionally make malloc() use mmap() to
578   allocate very large blocks.  These will be returned to the
579   operating system immediately after a free().
580 */
581 
582 #ifndef HAVE_MMAP
583 #define HAVE_MMAP 1
584 #endif
585 
586 /*
587   Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
588   large blocks.  This is currently only possible on Linux with
589   kernel versions newer than 1.3.77.
590 */
591 
592 #ifndef HAVE_MREMAP
593 #ifdef INTERNAL_LINUX_C_LIB
594 #define HAVE_MREMAP 1
595 #else
596 #define HAVE_MREMAP 0
597 #endif
598 #endif
599 
600 #if HAVE_MMAP
601 
602 #include <unistd.h>
603 #include <fcntl.h>
604 #include <sys/mman.h>
605 
606 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
607 #define MAP_ANONYMOUS MAP_ANON
608 #endif
609 
610 #endif /* HAVE_MMAP */
611 
612 /*
613   Access to system page size. To the extent possible, this malloc
614   manages memory from the system in page-size units.
615 
616   The following mechanics for getpagesize were adapted from
617   bsd/gnu getpagesize.h
618 */
619 
620 #ifndef LACKS_UNISTD_H
621 #  include <unistd.h>
622 #endif
623 
624 #ifndef malloc_getpagesize
625 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
626 #    ifndef _SC_PAGE_SIZE
627 #      define _SC_PAGE_SIZE _SC_PAGESIZE
628 #    endif
629 #  endif
630 #  ifdef _SC_PAGE_SIZE
631 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
632 #  else
633 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
634        extern size_t getpagesize();
635 #      define malloc_getpagesize getpagesize()
636 #    else
637 #      include <sys/param.h>
638 #      ifdef EXEC_PAGESIZE
639 #        define malloc_getpagesize EXEC_PAGESIZE
640 #      else
641 #        ifdef NBPG
642 #          ifndef CLSIZE
643 #            define malloc_getpagesize NBPG
644 #          else
645 #            define malloc_getpagesize (NBPG * CLSIZE)
646 #          endif
647 #        else
648 #          ifdef NBPC
649 #            define malloc_getpagesize NBPC
650 #          else
651 #            ifdef PAGESIZE
652 #              define malloc_getpagesize PAGESIZE
653 #            else
654 #              define malloc_getpagesize (4096) /* just guess */
655 #            endif
656 #          endif
657 #        endif
658 #      endif
659 #    endif
660 #  endif
661 #endif
662 
663 
664 
665 /*
666 
667   This version of malloc supports the standard SVID/XPG mallinfo
668   routine that returns a struct containing the same kind of
669   information you can get from malloc_stats. It should work on
670   any SVID/XPG compliant system that has a /usr/include/malloc.h
671   defining struct mallinfo. (If you'd like to install such a thing
672   yourself, cut out the preliminary declarations as described above
673   and below and save them in a malloc.h file. But there's no
674   compelling reason to bother to do this.)
675 
676   The main declaration needed is the mallinfo struct that is returned
677   (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
678   bunch of fields, most of which are not even meaningful in this
679   version of malloc. Some of these fields are are instead filled by
680   mallinfo() with other numbers that might possibly be of interest.
681 
682   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
683   /usr/include/malloc.h file that includes a declaration of struct
684   mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
685   version is declared below.  These must be precisely the same for
686   mallinfo() to work.
687 
688 */
689 
690 /* #define HAVE_USR_INCLUDE_MALLOC_H */
691 
692 #if HAVE_USR_INCLUDE_MALLOC_H
693 #include "/usr/include/malloc.h"
694 #else
695 
696 /* SVID2/XPG mallinfo structure */
697 
698 struct mallinfo {
699   int arena;    /* total space allocated from system */
700   int ordblks;  /* number of non-inuse chunks */
701   int smblks;   /* unused -- always zero */
702   int hblks;    /* number of mmapped regions */
703   int hblkhd;   /* total space in mmapped regions */
704   int usmblks;  /* unused -- always zero */
705   int fsmblks;  /* unused -- always zero */
706   int uordblks; /* total allocated space */
707   int fordblks; /* total non-inuse space */
708   int keepcost; /* top-most, releasable (via malloc_trim) space */
709 };
710 
711 /* SVID2/XPG mallopt options */
712 
713 #define M_MXFAST  1    /* UNUSED in this malloc */
714 #define M_NLBLKS  2    /* UNUSED in this malloc */
715 #define M_GRAIN   3    /* UNUSED in this malloc */
716 #define M_KEEP    4    /* UNUSED in this malloc */
717 
718 #endif
719 
720 /* mallopt options that actually do something */
721 
722 #define M_TRIM_THRESHOLD    -1
723 #define M_TOP_PAD           -2
724 #define M_MMAP_THRESHOLD    -3
725 #define M_MMAP_MAX          -4
726 
727 
728 
729 #ifndef DEFAULT_TRIM_THRESHOLD
730 #define DEFAULT_TRIM_THRESHOLD (128L * 1024L)
731 #endif
732 
733 /*
734     M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
735       to keep before releasing via malloc_trim in free().
736 
737       Automatic trimming is mainly useful in long-lived programs.
738       Because trimming via sbrk can be slow on some systems, and can
739       sometimes be wasteful (in cases where programs immediately
740       afterward allocate more large chunks) the value should be high
741       enough so that your overall system performance would improve by
742       releasing.
743 
744       The trim threshold and the mmap control parameters (see below)
745       can be traded off with one another. Trimming and mmapping are
746       two different ways of releasing unused memory back to the
747       system. Between these two, it is often possible to keep
748       system-level demands of a long-lived program down to a bare
749       minimum. For example, in one test suite of sessions measuring
750       the XF86 X server on Linux, using a trim threshold of 128K and a
751       mmap threshold of 192K led to near-minimal long term resource
752       consumption.
753 
754       If you are using this malloc in a long-lived program, it should
755       pay to experiment with these values.  As a rough guide, you
756       might set to a value close to the average size of a process
757       (program) running on your system.  Releasing this much memory
758       would allow such a process to run in memory.  Generally, it's
759       worth it to tune for trimming rather tham memory mapping when a
760       program undergoes phases where several large chunks are
761       allocated and released in ways that can reuse each other's
762       storage, perhaps mixed with phases where there are no such
763       chunks at all.  And in well-behaved long-lived programs,
764       controlling release of large blocks via trimming versus mapping
765       is usually faster.
766 
767       However, in most programs, these parameters serve mainly as
768       protection against the system-level effects of carrying around
769       massive amounts of unneeded memory. Since frequent calls to
770       sbrk, mmap, and munmap otherwise degrade performance, the default
771       parameters are set to relatively high values that serve only as
772       safeguards.
773 
774       The default trim value is high enough to cause trimming only in
775       fairly extreme (by current memory consumption standards) cases.
776       It must be greater than page size to have any useful effect.  To
777       disable trimming completely, you can set to (unsigned long)(-1);
778 
779 
780 */
781 
782 
783 #ifndef DEFAULT_TOP_PAD
784 #define DEFAULT_TOP_PAD        (0)
785 #endif
786 
787 /*
788     M_TOP_PAD is the amount of extra `padding' space to allocate or
789       retain whenever sbrk is called. It is used in two ways internally:
790 
791       * When sbrk is called to extend the top of the arena to satisfy
792         a new malloc request, this much padding is added to the sbrk
793         request.
794 
795       * When malloc_trim is called automatically from free(),
796         it is used as the `pad' argument.
797 
798       In both cases, the actual amount of padding is rounded
799       so that the end of the arena is always a system page boundary.
800 
801       The main reason for using padding is to avoid calling sbrk so
802       often. Having even a small pad greatly reduces the likelihood
803       that nearly every malloc request during program start-up (or
804       after trimming) will invoke sbrk, which needlessly wastes
805       time.
806 
807       Automatic rounding-up to page-size units is normally sufficient
808       to avoid measurable overhead, so the default is 0.  However, in
809       systems where sbrk is relatively slow, it can pay to increase
810       this value, at the expense of carrying around more memory than
811       the program needs.
812 
813 */
814 
815 
816 #ifndef DEFAULT_MMAP_THRESHOLD
817 #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
818 #endif
819 
820 /*
821 
822     M_MMAP_THRESHOLD is the request size threshold for using mmap()
823       to service a request. Requests of at least this size that cannot
824       be allocated using already-existing space will be serviced via mmap.
825       (If enough normal freed space already exists it is used instead.)
826 
827       Using mmap segregates relatively large chunks of memory so that
828       they can be individually obtained and released from the host
829       system. A request serviced through mmap is never reused by any
830       other request (at least not directly; the system may just so
831       happen to remap successive requests to the same locations).
832 
833       Segregating space in this way has the benefit that mmapped space
834       can ALWAYS be individually released back to the system, which
835       helps keep the system level memory demands of a long-lived
836       program low. Mapped memory can never become `locked' between
837       other chunks, as can happen with normally allocated chunks, which
838       menas that even trimming via malloc_trim would not release them.
839 
840       However, it has the disadvantages that:
841 
842          1. The space cannot be reclaimed, consolidated, and then
843             used to service later requests, as happens with normal chunks.
844          2. It can lead to more wastage because of mmap page alignment
845             requirements
846          3. It causes malloc performance to be more dependent on host
847             system memory management support routines which may vary in
848             implementation quality and may impose arbitrary
849             limitations. Generally, servicing a request via normal
850             malloc steps is faster than going through a system's mmap.
851 
852       All together, these considerations should lead you to use mmap
853       only for relatively large requests.
854 
855 
856 */
857 
858 
859 
860 #ifndef DEFAULT_MMAP_MAX
861 #if HAVE_MMAP
862 #define DEFAULT_MMAP_MAX       (64)
863 #else
864 #define DEFAULT_MMAP_MAX       (0)
865 #endif
866 #endif
867 
868 /*
869     M_MMAP_MAX is the maximum number of requests to simultaneously
870       service using mmap. This parameter exists because:
871 
872          1. Some systems have a limited number of internal tables for
873             use by mmap.
874          2. In most systems, overreliance on mmap can degrade overall
875             performance.
876          3. If a program allocates many large regions, it is probably
877             better off using normal sbrk-based allocation routines that
878             can reclaim and reallocate normal heap memory. Using a
879             small value allows transition into this mode after the
880             first few allocations.
881 
882       Setting to 0 disables all use of mmap.  If HAVE_MMAP is not set,
883       the default value is 0, and attempts to set it to non-zero values
884       in mallopt will fail.
885 */
886 
887 
888 
889 
890 /*
891 
892   Special defines for linux libc
893 
894   Except when compiled using these special defines for Linux libc
895   using weak aliases, this malloc is NOT designed to work in
896   multithreaded applications.  No semaphores or other concurrency
897   control are provided to ensure that multiple malloc or free calls
898   don't run at the same time, which could be disasterous. A single
899   semaphore could be used across malloc, realloc, and free (which is
900   essentially the effect of the linux weak alias approach). It would
901   be hard to obtain finer granularity.
902 
903 */
904 
905 
906 #ifdef INTERNAL_LINUX_C_LIB
907 
908 #if __STD_C
909 
910 Void_t * __default_morecore_init (ptrdiff_t);
911 Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init;
912 
913 #else
914 
915 Void_t * __default_morecore_init ();
916 Void_t *(*__morecore)() = __default_morecore_init;
917 
918 #endif
919 
920 #define MORECORE (*__morecore)
921 #define MORECORE_FAILURE 0
922 #define MORECORE_CLEARS 1
923 
924 #else /* INTERNAL_LINUX_C_LIB */
925 
926 #ifndef _LIBC
927 #if __STD_C
928 extern Void_t*     sbrk(ptrdiff_t);
929 #else
930 extern Void_t*     sbrk();
931 #endif
932 #endif
933 
934 #ifndef MORECORE
935 #define MORECORE sbrk
936 #endif
937 
938 #ifndef MORECORE_FAILURE
939 #define MORECORE_FAILURE -1
940 #endif
941 
942 #ifndef MORECORE_CLEARS
943 #define MORECORE_CLEARS 1
944 #endif
945 
946 #endif /* INTERNAL_LINUX_C_LIB */
947 
948 #if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__)
949 
950 #define cALLOc		__libc_calloc
951 #define fREe		__libc_free
952 #define mALLOc		__libc_malloc
953 #define mEMALIGn	__libc_memalign
954 #define rEALLOc		__libc_realloc
955 #define vALLOc		__libc_valloc
956 #define pvALLOc		__libc_pvalloc
957 #define mALLINFo	__libc_mallinfo
958 #define mALLOPt		__libc_mallopt
959 
960 #pragma weak calloc = __libc_calloc
961 #pragma weak free = __libc_free
962 #pragma weak cfree = __libc_free
963 #pragma weak malloc = __libc_malloc
964 #pragma weak memalign = __libc_memalign
965 #pragma weak realloc = __libc_realloc
966 #pragma weak valloc = __libc_valloc
967 #pragma weak pvalloc = __libc_pvalloc
968 #pragma weak mallinfo = __libc_mallinfo
969 #pragma weak mallopt = __libc_mallopt
970 
971 #else
972 
973 #define cALLOc		calloc
974 #define fREe		free
975 #define mALLOc		malloc
976 #define mEMALIGn	memalign
977 #define rEALLOc		realloc
978 #define vALLOc		valloc
979 #define pvALLOc		pvalloc
980 #define mALLINFo	mallinfo
981 #define mALLOPt		mallopt
982 
983 #ifdef _LIBC
984 
985 #define malloc_stats			malloc_stats
986 #define malloc_trim			malloc_trim
987 #define malloc_usable_size		malloc_usable_size
988 
989 #define malloc_update_mallinfo		__malloc_update_mallinfo
990 
991 #define malloc_av_			__malloc_av_
992 #define malloc_current_mallinfo		__malloc_current_mallinfo
993 #define malloc_max_sbrked_mem		__malloc_max_sbrked_mem
994 #define malloc_max_total_mem		__malloc_max_total_mem
995 #define malloc_sbrk_base		__malloc_sbrk_base
996 #define malloc_top_pad			__malloc_top_pad
997 #define malloc_trim_threshold		__malloc_trim_threshold
998 
999 #endif /* ! _LIBC */
1000 #endif
1001 
1002 /* Public routines */
1003 
1004 #if __STD_C
1005 
1006 Void_t* mALLOc(size_t);
1007 void    fREe(Void_t*);
1008 Void_t* rEALLOc(Void_t*, size_t);
1009 Void_t* mEMALIGn(size_t, size_t);
1010 Void_t* vALLOc(size_t);
1011 Void_t* pvALLOc(size_t);
1012 Void_t* cALLOc(size_t, size_t);
1013 void    cfree(Void_t*);
1014 int     malloc_trim(size_t);
1015 size_t  malloc_usable_size(Void_t*);
1016 void    malloc_stats(void);
1017 int     mALLOPt(int, int);
1018 struct mallinfo mALLINFo(void);
1019 #else
1020 Void_t* mALLOc();
1021 void    fREe();
1022 Void_t* rEALLOc();
1023 Void_t* mEMALIGn();
1024 Void_t* vALLOc();
1025 Void_t* pvALLOc();
1026 Void_t* cALLOc();
1027 void    cfree();
1028 int     malloc_trim();
1029 size_t  malloc_usable_size();
1030 void    malloc_stats();
1031 int     mALLOPt();
1032 struct mallinfo mALLINFo();
1033 #endif
1034 
1035 /* Work around compiler optimizing away stores to 'size' field before
1036  * call to free.
1037  */
1038 #ifdef _HAVE_ALIAS_ATTRIBUTE
1039 extern __typeof(free) __malloc_free;
1040 #else
1041 #define __malloc_free(x) fREe(x)
1042 #endif
1043 
1044 #ifdef __cplusplus
1045 };  /* end of extern "C" */
1046 #endif
1047 
1048 /* ---------- To make a malloc.h, end cutting here ------------ */
1049 
1050 
1051 /*
1052   Emulation of sbrk for WIN32
1053   All code within the ifdef WIN32 is untested by me.
1054 */
1055 
1056 
1057 #ifdef WIN32
1058 
1059 #define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
1060 ~(malloc_getpagesize-1))
1061 
1062 /* resrve 64MB to insure large contiguous space */
1063 #define RESERVED_SIZE (1024*1024*64)
1064 #define NEXT_SIZE (2048*1024)
1065 #define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
1066 
1067 struct GmListElement;
1068 typedef struct GmListElement GmListElement;
1069 
1070 struct GmListElement
1071 {
1072 	GmListElement* next;
1073 	void* base;
1074 };
1075 
1076 static GmListElement* head = 0;
1077 static unsigned int gNextAddress = 0;
1078 static unsigned int gAddressBase = 0;
1079 static unsigned int gAllocatedSize = 0;
1080 
1081 static
makeGmListElement(void * bas)1082 GmListElement* makeGmListElement (void* bas)
1083 {
1084 	GmListElement* this;
1085 	this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
1086 	ASSERT (this);
1087 	if (this)
1088 	{
1089 		this->base = bas;
1090 		this->next = head;
1091 		head = this;
1092 	}
1093 	return this;
1094 }
1095 
gcleanup(void)1096 void gcleanup (void)
1097 {
1098 	BOOL rval;
1099 	ASSERT ( (head == NULL) || (head->base == (void*)gAddressBase));
1100 	if (gAddressBase && (gNextAddress - gAddressBase))
1101 	{
1102 		rval = VirtualFree ((void*)gAddressBase,
1103 							gNextAddress - gAddressBase,
1104 							MEM_DECOMMIT);
1105         ASSERT (rval);
1106 	}
1107 	while (head)
1108 	{
1109 		GmListElement* next = head->next;
1110 		rval = VirtualFree (head->base, 0, MEM_RELEASE);
1111 		ASSERT (rval);
1112 		LocalFree (head);
1113 		head = next;
1114 	}
1115 }
1116 
1117 static
findRegion(void * start_address,unsigned long size)1118 void* findRegion (void* start_address, unsigned long size)
1119 {
1120 	MEMORY_BASIC_INFORMATION info;
1121 	while ((unsigned long)start_address < TOP_MEMORY)
1122 	{
1123 		VirtualQuery (start_address, &info, sizeof (info));
1124 		if (info.State != MEM_FREE)
1125 			start_address = (char*)info.BaseAddress + info.RegionSize;
1126 		else if (info.RegionSize >= size)
1127 			return start_address;
1128 		else
1129 			start_address = (char*)info.BaseAddress + info.RegionSize;
1130 	}
1131 	return NULL;
1132 
1133 }
1134 
1135 
wsbrk(long size)1136 void* wsbrk (long size)
1137 {
1138 	void* tmp;
1139 	if (size > 0)
1140 	{
1141 		if (gAddressBase == 0)
1142 		{
1143 			gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
1144 			gNextAddress = gAddressBase =
1145 				(unsigned int)VirtualAlloc (NULL, gAllocatedSize,
1146 											MEM_RESERVE, PAGE_NOACCESS);
1147 		} else if (AlignPage (gNextAddress + size) > (gAddressBase +
1148 gAllocatedSize))
1149 		{
1150 			long new_size = max (NEXT_SIZE, AlignPage (size));
1151 			void* new_address = (void*)(gAddressBase+gAllocatedSize);
1152 			do
1153 			{
1154 				new_address = findRegion (new_address, new_size);
1155 
1156 				if (new_address == 0)
1157 					return (void*)-1;
1158 
1159 				gAddressBase = gNextAddress =
1160 					(unsigned int)VirtualAlloc (new_address, new_size,
1161 												MEM_RESERVE, PAGE_NOACCESS);
1162 				// repeat in case of race condition
1163 				// The region that we found has been snagged
1164 				// by another thread
1165 			}
1166 			while (gAddressBase == 0);
1167 
1168 			ASSERT (new_address == (void*)gAddressBase);
1169 
1170 			gAllocatedSize = new_size;
1171 
1172 			if (!makeGmListElement ((void*)gAddressBase))
1173 				return (void*)-1;
1174 		}
1175 		if ((size + gNextAddress) > AlignPage (gNextAddress))
1176 		{
1177 			void* res;
1178 			res = VirtualAlloc ((void*)AlignPage (gNextAddress),
1179 								(size + gNextAddress -
1180 								 AlignPage (gNextAddress)),
1181 								MEM_COMMIT, PAGE_READWRITE);
1182 			if (res == 0)
1183 				return (void*)-1;
1184 		}
1185 		tmp = (void*)gNextAddress;
1186 		gNextAddress = (unsigned int)tmp + size;
1187 		return tmp;
1188 	}
1189 	else if (size < 0)
1190 	{
1191 		unsigned int alignedGoal = AlignPage (gNextAddress + size);
1192 		/* Trim by releasing the virtual memory */
1193 		if (alignedGoal >= gAddressBase)
1194 		{
1195 			VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
1196 						 MEM_DECOMMIT);
1197 			gNextAddress = gNextAddress + size;
1198 			return (void*)gNextAddress;
1199 		}
1200 		else
1201 		{
1202 			VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
1203 						 MEM_DECOMMIT);
1204 			gNextAddress = gAddressBase;
1205 			return (void*)-1;
1206 		}
1207 	}
1208 	else
1209 	{
1210 		return (void*)gNextAddress;
1211 	}
1212 }
1213 
1214 #endif
1215 
1216 
1217 
1218 /*
1219   Type declarations
1220 */
1221 
1222 
1223 struct malloc_chunk
1224 {
1225   INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1226   INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */
1227   struct malloc_chunk* fd;   /* double links -- used only if free. */
1228   struct malloc_chunk* bk;
1229 };
1230 
1231 typedef struct malloc_chunk* mchunkptr;
1232 
1233 /*
1234 
1235    malloc_chunk details:
1236 
1237     (The following includes lightly edited explanations by Colin Plumb.)
1238 
1239     Chunks of memory are maintained using a `boundary tag' method as
1240     described in e.g., Knuth or Standish.  (See the paper by Paul
1241     Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1242     survey of such techniques.)  Sizes of free chunks are stored both
1243     in the front of each chunk and at the end.  This makes
1244     consolidating fragmented chunks into bigger chunks very fast.  The
1245     size fields also hold bits representing whether chunks are free or
1246     in use.
1247 
1248     An allocated chunk looks like this:
1249 
1250 
1251     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1252             |             Size of previous chunk, if allocated            | |
1253             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1254             |             Size of chunk, in bytes                         |P|
1255       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1256             |             User data starts here...                          .
1257             .                                                               .
1258             .             (malloc_usable_space() bytes)                     .
1259             .                                                               |
1260 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1261             |             Size of chunk                                     |
1262             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1263 
1264 
1265     Where "chunk" is the front of the chunk for the purpose of most of
1266     the malloc code, but "mem" is the pointer that is returned to the
1267     user.  "Nextchunk" is the beginning of the next contiguous chunk.
1268 
1269     Chunks always begin on even word boundries, so the mem portion
1270     (which is returned to the user) is also on an even word boundary, and
1271     thus double-word aligned.
1272 
1273     Free chunks are stored in circular doubly-linked lists, and look like this:
1274 
1275     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1276             |             Size of previous chunk                            |
1277             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1278     `head:' |             Size of chunk, in bytes                         |P|
1279       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1280             |             Forward pointer to next chunk in list             |
1281             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1282             |             Back pointer to previous chunk in list            |
1283             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1284             |             Unused space (may be 0 bytes long)                .
1285             .                                                               .
1286             .                                                               |
1287 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1288     `foot:' |             Size of chunk, in bytes                           |
1289             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1290 
1291     The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1292     chunk size (which is always a multiple of two words), is an in-use
1293     bit for the *previous* chunk.  If that bit is *clear*, then the
1294     word before the current chunk size contains the previous chunk
1295     size, and can be used to find the front of the previous chunk.
1296     (The very first chunk allocated always has this bit set,
1297     preventing access to non-existent (or non-owned) memory.)
1298 
1299     Note that the `foot' of the current chunk is actually represented
1300     as the prev_size of the NEXT chunk. (This makes it easier to
1301     deal with alignments etc).
1302 
1303     The two exceptions to all this are
1304 
1305      1. The special chunk `top', which doesn't bother using the
1306         trailing size field since there is no
1307         next contiguous chunk that would have to index off it. (After
1308         initialization, `top' is forced to always exist.  If it would
1309         become less than MINSIZE bytes long, it is replenished via
1310         malloc_extend_top.)
1311 
1312      2. Chunks allocated via mmap, which have the second-lowest-order
1313         bit (IS_MMAPPED) set in their size fields.  Because they are
1314         never merged or traversed from any other chunk, they have no
1315         foot size or inuse information.
1316 
1317     Available chunks are kept in any of several places (all declared below):
1318 
1319     * `av': An array of chunks serving as bin headers for consolidated
1320        chunks. Each bin is doubly linked.  The bins are approximately
1321        proportionally (log) spaced.  There are a lot of these bins
1322        (128). This may look excessive, but works very well in
1323        practice.  All procedures maintain the invariant that no
1324        consolidated chunk physically borders another one. Chunks in
1325        bins are kept in size order, with ties going to the
1326        approximately least recently used chunk.
1327 
1328        The chunks in each bin are maintained in decreasing sorted order by
1329        size.  This is irrelevant for the small bins, which all contain
1330        the same-sized chunks, but facilitates best-fit allocation for
1331        larger chunks. (These lists are just sequential. Keeping them in
1332        order almost never requires enough traversal to warrant using
1333        fancier ordered data structures.)  Chunks of the same size are
1334        linked with the most recently freed at the front, and allocations
1335        are taken from the back.  This results in LRU or FIFO allocation
1336        order, which tends to give each chunk an equal opportunity to be
1337        consolidated with adjacent freed chunks, resulting in larger free
1338        chunks and less fragmentation.
1339 
1340     * `top': The top-most available chunk (i.e., the one bordering the
1341        end of available memory) is treated specially. It is never
1342        included in any bin, is used only if no other chunk is
1343        available, and is released back to the system if it is very
1344        large (see M_TRIM_THRESHOLD).
1345 
1346     * `last_remainder': A bin holding only the remainder of the
1347        most recently split (non-top) chunk. This bin is checked
1348        before other non-fitting chunks, so as to provide better
1349        locality for runs of sequentially allocated chunks.
1350 
1351     *  Implicitly, through the host system's memory mapping tables.
1352        If supported, requests greater than a threshold are usually
1353        serviced via calls to mmap, and then later released via munmap.
1354 
1355 */
1356 
1357 
1358 
1359 
1360 
1361 
1362 /*  sizes, alignments */
1363 
1364 #define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
1365 #ifndef MALLOC_ALIGNMENT
1366 #define MALLOC_ALIGN           8
1367 #define MALLOC_ALIGNMENT       (SIZE_SZ < 4 ? 8 : (SIZE_SZ + SIZE_SZ))
1368 #else
1369 #define MALLOC_ALIGN           MALLOC_ALIGNMENT
1370 #endif
1371 #define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
1372 #define MINSIZE                (sizeof(struct malloc_chunk))
1373 
1374 /* conversion from malloc headers to user pointers, and back */
1375 
1376 #define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1377 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1378 
1379 /* pad request bytes into a usable size */
1380 
1381 #define request2size(req) \
1382  (((unsigned long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
1383   (unsigned long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \
1384    (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
1385 
1386 /* Check if m has acceptable alignment */
1387 
1388 #define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1389 
1390 
1391 
1392 
1393 /*
1394   Physical chunk operations
1395 */
1396 
1397 
1398 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1399 
1400 #define PREV_INUSE 0x1
1401 
1402 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1403 
1404 #define IS_MMAPPED 0x2
1405 
1406 /* Bits to mask off when extracting size */
1407 
1408 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
1409 
1410 
1411 /* Ptr to next physical malloc_chunk. */
1412 
1413 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
1414 
1415 /* Ptr to previous physical malloc_chunk */
1416 
1417 #define prev_chunk(p)\
1418    ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1419 
1420 
1421 /* Treat space at ptr + offset as a chunk */
1422 
1423 #define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
1424 
1425 
1426 
1427 
1428 /*
1429   Dealing with use bits
1430 */
1431 
1432 /* extract p's inuse bit */
1433 
1434 #define inuse(p)\
1435 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
1436 
1437 /* extract inuse bit of previous chunk */
1438 
1439 #define prev_inuse(p)  ((p)->size & PREV_INUSE)
1440 
1441 /* check for mmap()'ed chunk */
1442 
1443 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1444 
1445 /* set/clear chunk as in use without otherwise disturbing */
1446 
1447 #define set_inuse(p)\
1448 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
1449 
1450 #define clear_inuse(p)\
1451 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
1452 
1453 /* check/set/clear inuse bits in known places */
1454 
1455 #define inuse_bit_at_offset(p, s)\
1456  (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1457 
1458 #define set_inuse_bit_at_offset(p, s)\
1459  (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1460 
1461 #define clear_inuse_bit_at_offset(p, s)\
1462  (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1463 
1464 
1465 
1466 
1467 /*
1468   Dealing with size fields
1469 */
1470 
1471 /* Get size, ignoring use bits */
1472 
1473 #define chunksize(p)          ((p)->size & ~(SIZE_BITS))
1474 
1475 /* Set size at head, without disturbing its use bit */
1476 
1477 #define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
1478 
1479 /* Set size/use ignoring previous bits in header */
1480 
1481 #define set_head(p, s)        ((p)->size = (s))
1482 
1483 /* Set size at footer (only when chunk is not in use) */
1484 
1485 #define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1486 
1487 
1488 
1489 
1490 
1491 /*
1492    Bins
1493 
1494     The bins, `av_' are an array of pairs of pointers serving as the
1495     heads of (initially empty) doubly-linked lists of chunks, laid out
1496     in a way so that each pair can be treated as if it were in a
1497     malloc_chunk. (This way, the fd/bk offsets for linking bin heads
1498     and chunks are the same).
1499 
1500     Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1501     8 bytes apart. Larger bins are approximately logarithmically
1502     spaced. (See the table below.) The `av_' array is never mentioned
1503     directly in the code, but instead via bin access macros.
1504 
1505     Bin layout:
1506 
1507     64 bins of size       8
1508     32 bins of size      64
1509     16 bins of size     512
1510      8 bins of size    4096
1511      4 bins of size   32768
1512      2 bins of size  262144
1513      1 bin  of size what's left
1514 
1515     There is actually a little bit of slop in the numbers in bin_index
1516     for the sake of speed. This makes no difference elsewhere.
1517 
1518     The special chunks `top' and `last_remainder' get their own bins,
1519     (this is implemented via yet more trickery with the av_ array),
1520     although `top' is never properly linked to its bin since it is
1521     always handled specially.
1522 
1523 */
1524 
1525 #ifdef SEPARATE_OBJECTS
1526 #define av_ malloc_av_
1527 #endif
1528 
1529 #define NAV             128   /* number of bins */
1530 
1531 typedef struct malloc_chunk* mbinptr;
1532 
1533 /* access macros */
1534 
1535 #define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
1536 #define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
1537 #define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
1538 
1539 /*
1540    The first 2 bins are never indexed. The corresponding av_ cells are instead
1541    used for bookkeeping. This is not to save space, but to simplify
1542    indexing, maintain locality, and avoid some initialization tests.
1543 */
1544 
1545 #define top            (bin_at(0)->fd)   /* The topmost chunk */
1546 #define last_remainder (bin_at(1))       /* remainder from last split */
1547 
1548 
1549 /*
1550    Because top initially points to its own bin with initial
1551    zero size, thus forcing extension on the first malloc request,
1552    we avoid having any special code in malloc to check whether
1553    it even exists yet. But we still need to in malloc_extend_top.
1554 */
1555 
1556 #define initial_top    ((mchunkptr)(bin_at(0)))
1557 
1558 /* Helper macro to initialize bins */
1559 
1560 #define IAV(i)  bin_at(i), bin_at(i)
1561 
1562 #ifdef DEFINE_MALLOC
1563 STATIC mbinptr av_[NAV * 2 + 2] = {
1564  0, 0,
1565  IAV(0),   IAV(1),   IAV(2),   IAV(3),   IAV(4),   IAV(5),   IAV(6),   IAV(7),
1566  IAV(8),   IAV(9),   IAV(10),  IAV(11),  IAV(12),  IAV(13),  IAV(14),  IAV(15),
1567  IAV(16),  IAV(17),  IAV(18),  IAV(19),  IAV(20),  IAV(21),  IAV(22),  IAV(23),
1568  IAV(24),  IAV(25),  IAV(26),  IAV(27),  IAV(28),  IAV(29),  IAV(30),  IAV(31),
1569  IAV(32),  IAV(33),  IAV(34),  IAV(35),  IAV(36),  IAV(37),  IAV(38),  IAV(39),
1570  IAV(40),  IAV(41),  IAV(42),  IAV(43),  IAV(44),  IAV(45),  IAV(46),  IAV(47),
1571  IAV(48),  IAV(49),  IAV(50),  IAV(51),  IAV(52),  IAV(53),  IAV(54),  IAV(55),
1572  IAV(56),  IAV(57),  IAV(58),  IAV(59),  IAV(60),  IAV(61),  IAV(62),  IAV(63),
1573  IAV(64),  IAV(65),  IAV(66),  IAV(67),  IAV(68),  IAV(69),  IAV(70),  IAV(71),
1574  IAV(72),  IAV(73),  IAV(74),  IAV(75),  IAV(76),  IAV(77),  IAV(78),  IAV(79),
1575  IAV(80),  IAV(81),  IAV(82),  IAV(83),  IAV(84),  IAV(85),  IAV(86),  IAV(87),
1576  IAV(88),  IAV(89),  IAV(90),  IAV(91),  IAV(92),  IAV(93),  IAV(94),  IAV(95),
1577  IAV(96),  IAV(97),  IAV(98),  IAV(99),  IAV(100), IAV(101), IAV(102), IAV(103),
1578  IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
1579  IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
1580  IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
1581 };
1582 #else
1583 extern mbinptr av_[NAV * 2 + 2];
1584 #endif
1585 
1586 
1587 
1588 /* field-extraction macros */
1589 
1590 #define first(b) ((b)->fd)
1591 #define last(b)  ((b)->bk)
1592 
1593 /*
1594   Indexing into bins
1595 */
1596 
1597 #define bin_index(sz)                                                          \
1598 (((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \
1599  ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \
1600  ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \
1601  ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
1602  ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
1603  ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
1604                                           126)
1605 /*
1606   bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold
1607   identically sized chunks. This is exploited in malloc.
1608 */
1609 
1610 #define MAX_SMALLBIN_SIZE   512
1611 #define SMALLBIN_WIDTH        8
1612 #define SMALLBIN_WIDTH_BITS   3
1613 #define MAX_SMALLBIN        (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1
1614 
1615 #define smallbin_index(sz)  (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS)
1616 
1617 /*
1618    Requests are `small' if both the corresponding and the next bin are small
1619 */
1620 
1621 #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
1622 
1623 
1624 
1625 /*
1626     To help compensate for the large number of bins, a one-level index
1627     structure is used for bin-by-bin searching.  `binblocks' is a
1628     one-word bitvector recording whether groups of BINBLOCKWIDTH bins
1629     have any (possibly) non-empty bins, so they can be skipped over
1630     all at once during during traversals. The bits are NOT always
1631     cleared as soon as all bins in a block are empty, but instead only
1632     when all are noticed to be empty during traversal in malloc.
1633 */
1634 
1635 #define BINBLOCKWIDTH     4   /* bins per block */
1636 
1637 #define binblocks      (bin_at(0)->size) /* bitvector of nonempty blocks */
1638 
1639 /* bin<->block macros */
1640 
1641 #define idx2binblock(ix)    ((unsigned long)1 << (ix / BINBLOCKWIDTH))
1642 #define mark_binblock(ii)   (binblocks |= idx2binblock(ii))
1643 #define clear_binblock(ii)  (binblocks &= ~(idx2binblock(ii)))
1644 
1645 
1646 
1647 
1648 
1649 /*  Other static bookkeeping data */
1650 
1651 #ifdef SEPARATE_OBJECTS
1652 #define trim_threshold		malloc_trim_threshold
1653 #define top_pad			malloc_top_pad
1654 #define n_mmaps_max		malloc_n_mmaps_max
1655 #define mmap_threshold		malloc_mmap_threshold
1656 #define sbrk_base		malloc_sbrk_base
1657 #define max_sbrked_mem		malloc_max_sbrked_mem
1658 #define max_total_mem		malloc_max_total_mem
1659 #define current_mallinfo	malloc_current_mallinfo
1660 #define n_mmaps			malloc_n_mmaps
1661 #define max_n_mmaps		malloc_max_n_mmaps
1662 #define mmapped_mem		malloc_mmapped_mem
1663 #define max_mmapped_mem		malloc_max_mmapped_mem
1664 #endif
1665 
1666 /* variables holding tunable values */
1667 
1668 #ifdef DEFINE_MALLOC
1669 
1670 STATIC unsigned long trim_threshold   = DEFAULT_TRIM_THRESHOLD;
1671 STATIC unsigned long top_pad          = DEFAULT_TOP_PAD;
1672 #if HAVE_MMAP
1673 STATIC unsigned int  n_mmaps_max      = DEFAULT_MMAP_MAX;
1674 STATIC unsigned long mmap_threshold   = DEFAULT_MMAP_THRESHOLD;
1675 #endif
1676 
1677 /* The first value returned from sbrk */
1678 STATIC char* sbrk_base = (char*)(-1);
1679 
1680 /* The maximum memory obtained from system via sbrk */
1681 STATIC unsigned long max_sbrked_mem = 0;
1682 
1683 /* The maximum via either sbrk or mmap */
1684 STATIC unsigned long max_total_mem = 0;
1685 
1686 /* internal working copy of mallinfo */
1687 STATIC struct mallinfo current_mallinfo = {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1688 
1689 #if HAVE_MMAP
1690 
1691 /* Tracking mmaps */
1692 
1693 STATIC unsigned int n_mmaps = 0;
1694 STATIC unsigned int max_n_mmaps = 0;
1695 STATIC unsigned long mmapped_mem = 0;
1696 STATIC unsigned long max_mmapped_mem = 0;
1697 
1698 #endif
1699 
1700 #else /* ! DEFINE_MALLOC */
1701 
1702 extern unsigned long trim_threshold;
1703 extern unsigned long top_pad;
1704 #if HAVE_MMAP
1705 extern unsigned int  n_mmaps_max;
1706 extern unsigned long mmap_threshold;
1707 #endif
1708 extern char* sbrk_base;
1709 extern unsigned long max_sbrked_mem;
1710 extern unsigned long max_total_mem;
1711 extern struct mallinfo current_mallinfo;
1712 #if HAVE_MMAP
1713 extern unsigned int n_mmaps;
1714 extern unsigned int max_n_mmaps;
1715 extern unsigned long mmapped_mem;
1716 extern unsigned long max_mmapped_mem;
1717 #endif
1718 
1719 #endif /* ! DEFINE_MALLOC */
1720 
1721 /* The total memory obtained from system via sbrk */
1722 #define sbrked_mem  (current_mallinfo.arena)
1723 
1724 
1725 
1726 /*
1727   Debugging support
1728 */
1729 
1730 #if DEBUG
1731 
1732 
1733 /*
1734   These routines make a number of assertions about the states
1735   of data structures that should be true at all times. If any
1736   are not true, it's very likely that a user program has somehow
1737   trashed memory. (It's also possible that there is a coding error
1738   in malloc. In which case, please report it!)
1739 */
1740 
1741 #if __STD_C
do_check_chunk(mchunkptr p)1742 static void do_check_chunk(mchunkptr p)
1743 #else
1744 static void do_check_chunk(p) mchunkptr p;
1745 #endif
1746 {
1747   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1748 
1749   /* No checkable chunk is mmapped */
1750   assert(!chunk_is_mmapped(p));
1751 
1752   /* Check for legal address ... */
1753   assert((char*)p >= sbrk_base);
1754   if (p != top)
1755     assert((char*)p + sz <= (char*)top);
1756   else
1757     assert((char*)p + sz <= sbrk_base + sbrked_mem);
1758 
1759 }
1760 
1761 
1762 #if __STD_C
do_check_free_chunk(mchunkptr p)1763 static void do_check_free_chunk(mchunkptr p)
1764 #else
1765 static void do_check_free_chunk(p) mchunkptr p;
1766 #endif
1767 {
1768   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1769   mchunkptr next = chunk_at_offset(p, sz);
1770 
1771   do_check_chunk(p);
1772 
1773   /* Check whether it claims to be free ... */
1774   assert(!inuse(p));
1775 
1776   /* Unless a special marker, must have OK fields */
1777   if ((long)sz >= (long)MINSIZE)
1778   {
1779     assert((sz & MALLOC_ALIGN_MASK) == 0);
1780     assert(aligned_OK(chunk2mem(p)));
1781     /* ... matching footer field */
1782     assert(next->prev_size == sz);
1783     /* ... and is fully consolidated */
1784     assert(prev_inuse(p));
1785     assert (next == top || inuse(next));
1786 
1787     /* ... and has minimally sane links */
1788     assert(p->fd->bk == p);
1789     assert(p->bk->fd == p);
1790   }
1791   else /* markers are always of size SIZE_SZ */
1792     assert(sz == SIZE_SZ);
1793 }
1794 
1795 #if __STD_C
do_check_inuse_chunk(mchunkptr p)1796 static void do_check_inuse_chunk(mchunkptr p)
1797 #else
1798 static void do_check_inuse_chunk(p) mchunkptr p;
1799 #endif
1800 {
1801   mchunkptr next = next_chunk(p);
1802   do_check_chunk(p);
1803 
1804   /* Check whether it claims to be in use ... */
1805   assert(inuse(p));
1806 
1807   /* ... and is surrounded by OK chunks.
1808     Since more things can be checked with free chunks than inuse ones,
1809     if an inuse chunk borders them and debug is on, it's worth doing them.
1810   */
1811   if (!prev_inuse(p))
1812   {
1813     mchunkptr prv = prev_chunk(p);
1814     assert(next_chunk(prv) == p);
1815     do_check_free_chunk(prv);
1816   }
1817   if (next == top)
1818   {
1819     assert(prev_inuse(next));
1820     assert(chunksize(next) >= MINSIZE);
1821   }
1822   else if (!inuse(next))
1823     do_check_free_chunk(next);
1824 
1825 }
1826 
1827 #if __STD_C
do_check_malloced_chunk(mchunkptr p,INTERNAL_SIZE_T s)1828 static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
1829 #else
1830 static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
1831 #endif
1832 {
1833   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1834   long room = long_sub_size_t(sz, s);
1835 
1836   do_check_inuse_chunk(p);
1837 
1838   /* Legal size ... */
1839   assert((long)sz >= (long)MINSIZE);
1840   assert((sz & MALLOC_ALIGN_MASK) == 0);
1841   assert(room >= 0);
1842   assert(room < (long)MINSIZE);
1843 
1844   /* ... and alignment */
1845   assert(aligned_OK(chunk2mem(p)));
1846 
1847 
1848   /* ... and was allocated at front of an available chunk */
1849   assert(prev_inuse(p));
1850 
1851 }
1852 
1853 
1854 #define check_free_chunk(P)  do_check_free_chunk(P)
1855 #define check_inuse_chunk(P) do_check_inuse_chunk(P)
1856 #define check_chunk(P) do_check_chunk(P)
1857 #define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
1858 #else
1859 #define check_free_chunk(P)
1860 #define check_inuse_chunk(P)
1861 #define check_chunk(P)
1862 #define check_malloced_chunk(P,N)
1863 #endif
1864 
1865 
1866 
1867 /*
1868   Macro-based internal utilities
1869 */
1870 
1871 
1872 /*
1873   Linking chunks in bin lists.
1874   Call these only with variables, not arbitrary expressions, as arguments.
1875 */
1876 
1877 /*
1878   Place chunk p of size s in its bin, in size order,
1879   putting it ahead of others of same size.
1880 */
1881 
1882 
1883 #define frontlink(P, S, IDX, BK, FD)                                          \
1884 {                                                                             \
1885   if (S < MAX_SMALLBIN_SIZE)                                                  \
1886   {                                                                           \
1887     IDX = smallbin_index(S);                                                  \
1888     mark_binblock(IDX);                                                       \
1889     BK = bin_at(IDX);                                                         \
1890     FD = BK->fd;                                                              \
1891     P->bk = BK;                                                               \
1892     P->fd = FD;                                                               \
1893     FD->bk = BK->fd = P;                                                      \
1894   }                                                                           \
1895   else                                                                        \
1896   {                                                                           \
1897     IDX = bin_index(S);                                                       \
1898     BK = bin_at(IDX);                                                         \
1899     FD = BK->fd;                                                              \
1900     if (FD == BK) mark_binblock(IDX);                                         \
1901     else                                                                      \
1902     {                                                                         \
1903       while (FD != BK && S < chunksize(FD)) FD = FD->fd;                      \
1904       BK = FD->bk;                                                            \
1905     }                                                                         \
1906     P->bk = BK;                                                               \
1907     P->fd = FD;                                                               \
1908     FD->bk = BK->fd = P;                                                      \
1909   }                                                                           \
1910 }
1911 
1912 
1913 /* take a chunk off a list */
1914 
1915 #define unlink(P, BK, FD)                                                     \
1916 {                                                                             \
1917   BK = P->bk;                                                                 \
1918   FD = P->fd;                                                                 \
1919   FD->bk = BK;                                                        \
1920   BK->fd = FD;                                                        \
1921 }                                                                             \
1922 
1923 /* Place p as the last remainder */
1924 
1925 #define link_last_remainder(P)                                                \
1926 {                                                                             \
1927   last_remainder->fd = last_remainder->bk =  P;                               \
1928   P->fd = P->bk = last_remainder;                                             \
1929 }
1930 
1931 /* Clear the last_remainder bin */
1932 
1933 #define clear_last_remainder \
1934   (last_remainder->fd = last_remainder->bk = last_remainder)
1935 
1936 
1937 
1938 
1939 
1940 
1941 /* Routines dealing with mmap(). */
1942 
1943 #if HAVE_MMAP
1944 
1945 #ifdef DEFINE_MALLOC
1946 
1947 #if __STD_C
mmap_chunk(size_t size)1948 static mchunkptr mmap_chunk(size_t size)
1949 #else
1950 static mchunkptr mmap_chunk(size) size_t size;
1951 #endif
1952 {
1953   size_t page_mask = malloc_getpagesize - 1;
1954   mchunkptr p;
1955 
1956 #ifndef MAP_ANONYMOUS
1957   static int fd = -1;
1958 #endif
1959 
1960   if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
1961 
1962   /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
1963    * there is no following chunk whose prev_size field could be used.
1964    */
1965   size = (size + SIZE_SZ + page_mask) & ~page_mask;
1966 
1967 #ifdef MAP_ANONYMOUS
1968   p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
1969 		      MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1970 #else /* !MAP_ANONYMOUS */
1971   if (fd < 0)
1972   {
1973     fd = open("/dev/zero", O_RDWR);
1974     if(fd < 0) return 0;
1975   }
1976   p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
1977 #endif
1978 
1979   if(p == (mchunkptr)-1) return 0;
1980 
1981   n_mmaps++;
1982   if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
1983 
1984   /* We demand that eight bytes into a page must be 8-byte aligned. */
1985   assert(aligned_OK(chunk2mem(p)));
1986 
1987   /* The offset to the start of the mmapped region is stored
1988    * in the prev_size field of the chunk; normally it is zero,
1989    * but that can be changed in memalign().
1990    */
1991   p->prev_size = 0;
1992   set_head(p, size|IS_MMAPPED);
1993 
1994   mmapped_mem += size;
1995   if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1996     max_mmapped_mem = mmapped_mem;
1997   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1998     max_total_mem = mmapped_mem + sbrked_mem;
1999   return p;
2000 }
2001 
2002 #endif /* DEFINE_MALLOC */
2003 
2004 #ifdef SEPARATE_OBJECTS
2005 #define munmap_chunk malloc_munmap_chunk
2006 #endif
2007 
2008 #ifdef DEFINE_FREE
2009 
2010 #if __STD_C
munmap_chunk(mchunkptr p)2011 STATIC void munmap_chunk(mchunkptr p)
2012 #else
2013 STATIC void munmap_chunk(p) mchunkptr p;
2014 #endif
2015 {
2016   INTERNAL_SIZE_T size = chunksize(p);
2017   int ret;
2018 
2019   assert (chunk_is_mmapped(p));
2020   assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
2021   assert((n_mmaps > 0));
2022   assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
2023 
2024   n_mmaps--;
2025   mmapped_mem -= (size + p->prev_size);
2026 
2027   ret = munmap((char *)p - p->prev_size, size + p->prev_size);
2028 
2029   /* munmap returns non-zero on failure */
2030   assert(ret == 0);
2031 }
2032 
2033 #else /* ! DEFINE_FREE */
2034 
2035 #if __STD_C
2036 extern void munmap_chunk(mchunkptr);
2037 #else
2038 extern void munmap_chunk();
2039 #endif
2040 
2041 #endif /* ! DEFINE_FREE */
2042 
2043 #if HAVE_MREMAP
2044 
2045 #ifdef DEFINE_REALLOC
2046 
2047 #if __STD_C
mremap_chunk(mchunkptr p,size_t new_size)2048 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
2049 #else
2050 static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
2051 #endif
2052 {
2053   size_t page_mask = malloc_getpagesize - 1;
2054   INTERNAL_SIZE_T offset = p->prev_size;
2055   INTERNAL_SIZE_T size = chunksize(p);
2056   char *cp;
2057 
2058   assert (chunk_is_mmapped(p));
2059   assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
2060   assert((n_mmaps > 0));
2061   assert(((size + offset) & (malloc_getpagesize-1)) == 0);
2062 
2063   /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
2064   new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
2065 
2066   cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
2067 
2068   if (cp == (char *)-1) return 0;
2069 
2070   p = (mchunkptr)(cp + offset);
2071 
2072   assert(aligned_OK(chunk2mem(p)));
2073 
2074   assert((p->prev_size == offset));
2075   set_head(p, (new_size - offset)|IS_MMAPPED);
2076 
2077   mmapped_mem -= size + offset;
2078   mmapped_mem += new_size;
2079   if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
2080     max_mmapped_mem = mmapped_mem;
2081   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
2082     max_total_mem = mmapped_mem + sbrked_mem;
2083   return p;
2084 }
2085 
2086 #endif /* DEFINE_REALLOC */
2087 
2088 #endif /* HAVE_MREMAP */
2089 
2090 #endif /* HAVE_MMAP */
2091 
2092 
2093 
2094 
2095 #ifdef DEFINE_MALLOC
2096 
2097 /*
2098   Extend the top-most chunk by obtaining memory from system.
2099   Main interface to sbrk (but see also malloc_trim).
2100 */
2101 
2102 #if __STD_C
malloc_extend_top(INTERNAL_SIZE_T nb)2103 static void malloc_extend_top(INTERNAL_SIZE_T nb)
2104 #else
2105 static void malloc_extend_top(nb) RDECL INTERNAL_SIZE_T nb;
2106 #endif
2107 {
2108   char*     brk;                  /* return value from sbrk */
2109   INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
2110   INTERNAL_SIZE_T correction;     /* bytes for 2nd sbrk call */
2111   int correction_failed = 0;      /* whether we should relax the assertion */
2112   char*     new_brk;              /* return of 2nd sbrk call */
2113   INTERNAL_SIZE_T top_size;       /* new size of top chunk */
2114 
2115   mchunkptr old_top     = top;  /* Record state of old top */
2116   INTERNAL_SIZE_T old_top_size = chunksize(old_top);
2117   char*     old_end      = (char*)(chunk_at_offset(old_top, old_top_size));
2118 
2119   /* Pad request with top_pad plus minimal overhead */
2120 
2121   INTERNAL_SIZE_T    sbrk_size     = nb + top_pad + MINSIZE;
2122   unsigned long pagesz    = malloc_getpagesize;
2123 
2124   /* If not the first time through, round to preserve page boundary */
2125   /* Otherwise, we need to correct to a page size below anyway. */
2126   /* (We also correct below if an intervening foreign sbrk call.) */
2127 
2128   if (sbrk_base != (char*)(-1))
2129     sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
2130 
2131   brk = (char*)(MORECORE (sbrk_size));
2132 
2133   /* Fail if sbrk failed or if a foreign sbrk call killed our space */
2134   if (brk == (char*)(MORECORE_FAILURE) ||
2135       (brk < old_end && old_top != initial_top))
2136     return;
2137 
2138   sbrked_mem += sbrk_size;
2139 
2140   if (brk == old_end /* can just add bytes to current top, unless
2141 			previous correction failed */
2142       && ((POINTER_UINT)old_end & (pagesz - 1)) == 0)
2143   {
2144     top_size = sbrk_size + old_top_size;
2145     set_head(top, top_size | PREV_INUSE);
2146   }
2147   else
2148   {
2149     if (sbrk_base == (char*)(-1))  /* First time through. Record base */
2150       sbrk_base = brk;
2151     else  /* Someone else called sbrk().  Count those bytes as sbrked_mem. */
2152       sbrked_mem += brk - (char*)old_end;
2153 
2154     /* Guarantee alignment of first new chunk made from this space */
2155     front_misalign = (POINTER_UINT)chunk2mem(brk) & MALLOC_ALIGN_MASK;
2156     if (front_misalign > 0)
2157     {
2158       correction = (MALLOC_ALIGNMENT) - front_misalign;
2159       brk += correction;
2160     }
2161     else
2162       correction = 0;
2163 
2164     /* Guarantee the next brk will be at a page boundary */
2165     correction += pagesz - ((POINTER_UINT)(brk + sbrk_size) & (pagesz - 1));
2166 
2167     /* To guarantee page boundary, correction should be less than pagesz */
2168     correction &= (pagesz - 1);
2169 
2170     /* Allocate correction */
2171     new_brk = (char*)(MORECORE (correction));
2172     if (new_brk == (char*)(MORECORE_FAILURE))
2173       {
2174 	correction = 0;
2175 	correction_failed = 1;
2176 	new_brk = brk + sbrk_size;
2177 	if (front_misalign > 0)
2178 	  new_brk -= (MALLOC_ALIGNMENT) - front_misalign;
2179       }
2180 
2181     sbrked_mem += correction;
2182 
2183     top = (mchunkptr)brk;
2184     top_size = new_brk - brk + correction;
2185     set_head(top, top_size | PREV_INUSE);
2186 
2187     if (old_top != initial_top)
2188     {
2189 
2190       /* There must have been an intervening foreign sbrk call. */
2191       /* A double fencepost is necessary to prevent consolidation */
2192 
2193       /* If not enough space to do this, then user did something very wrong */
2194       if (old_top_size < MINSIZE)
2195       {
2196         set_head(top, PREV_INUSE); /* will force null return from malloc */
2197         return;
2198       }
2199 
2200       /* Also keep size a multiple of MALLOC_ALIGNMENT */
2201       old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2202       set_head_size(old_top, old_top_size);
2203       chunk_at_offset(old_top, old_top_size          )->size =
2204         SIZE_SZ|PREV_INUSE;
2205       chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
2206         SIZE_SZ|PREV_INUSE;
2207       /* If possible, release the rest. */
2208       if (old_top_size >= MINSIZE)
2209         __malloc_free(chunk2mem(old_top));
2210     }
2211   }
2212 
2213   if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
2214     max_sbrked_mem = sbrked_mem;
2215 #if HAVE_MMAP
2216   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
2217     max_total_mem = mmapped_mem + sbrked_mem;
2218 #else
2219   if ((unsigned long)(sbrked_mem) > (unsigned long)max_total_mem)
2220     max_total_mem = sbrked_mem;
2221 #endif
2222 
2223   /* We always land on a page boundary */
2224   assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0
2225 	 || correction_failed);
2226   (void) correction_failed;
2227 }
2228 
2229 #endif /* DEFINE_MALLOC */
2230 
2231 
2232 /* Main public routines */
2233 
2234 #ifdef DEFINE_MALLOC
2235 
2236 /*
2237   Malloc Algorthim:
2238 
2239     The requested size is first converted into a usable form, `nb'.
2240     This currently means to add 4 bytes overhead plus possibly more to
2241     obtain 8-byte alignment and/or to obtain a size of at least
2242     MINSIZE (currently 16 bytes), the smallest allocatable size.
2243     (All fits are considered `exact' if they are within MINSIZE bytes.)
2244 
2245     From there, the first successful of the following steps is taken:
2246 
2247       1. The bin corresponding to the request size is scanned, and if
2248          a chunk of exactly the right size is found, it is taken.
2249 
2250       2. The most recently remaindered chunk is used if it is big
2251          enough.  This is a form of (roving) first fit, used only in
2252          the absence of exact fits. Runs of consecutive requests use
2253          the remainder of the chunk used for the previous such request
2254          whenever possible. This limited use of a first-fit style
2255          allocation strategy tends to give contiguous chunks
2256          coextensive lifetimes, which improves locality and can reduce
2257          fragmentation in the long run.
2258 
2259       3. Other bins are scanned in increasing size order, using a
2260          chunk big enough to fulfill the request, and splitting off
2261          any remainder.  This search is strictly by best-fit; i.e.,
2262          the smallest (with ties going to approximately the least
2263          recently used) chunk that fits is selected.
2264 
2265       4. If large enough, the chunk bordering the end of memory
2266          (`top') is split off. (This use of `top' is in accord with
2267          the best-fit search rule.  In effect, `top' is treated as
2268          larger (and thus less well fitting) than any other available
2269          chunk since it can be extended to be as large as necessary
2270          (up to system limitations).
2271 
2272       5. If the request size meets the mmap threshold and the
2273          system supports mmap, and there are few enough currently
2274          allocated mmapped regions, and a call to mmap succeeds,
2275          the request is allocated via direct memory mapping.
2276 
2277       6. Otherwise, the top of memory is extended by
2278          obtaining more space from the system (normally using sbrk,
2279          but definable to anything else via the MORECORE macro).
2280          Memory is gathered from the system (in system page-sized
2281          units) in a way that allows chunks obtained across different
2282          sbrk calls to be consolidated, but does not require
2283          contiguous memory. Thus, it should be safe to intersperse
2284          mallocs with other sbrk calls.
2285 
2286 
2287       All allocations are made from the the `lowest' part of any found
2288       chunk. (The implementation invariant is that prev_inuse is
2289       always true of any allocated chunk; i.e., that each allocated
2290       chunk borders either a previously allocated and still in-use chunk,
2291       or the base of its memory arena.)
2292 
2293 */
2294 
2295 #if __STD_C
mALLOc(size_t bytes)2296 Void_t* mALLOc(size_t bytes)
2297 #else
2298 Void_t* mALLOc(bytes) RDECL size_t bytes;
2299 #endif
2300 {
2301 #ifdef MALLOC_PROVIDED
2302 
2303   return malloc (bytes); // Make sure that the pointer returned by malloc is returned back.
2304 
2305 #else
2306 
2307   mchunkptr victim;                  /* inspected/selected chunk */
2308   INTERNAL_SIZE_T victim_size;       /* its size */
2309   int       idx;                     /* index for bin traversal */
2310   mbinptr   bin;                     /* associated bin */
2311   mchunkptr remainder;               /* remainder from a split */
2312   long      remainder_size;          /* its size */
2313   int       remainder_index;         /* its bin index */
2314   unsigned long block;               /* block traverser bit */
2315   int       startidx;                /* first bin of a traversed block */
2316   mchunkptr fwd;                     /* misc temp for linking */
2317   mchunkptr bck;                     /* misc temp for linking */
2318   mbinptr q;                         /* misc temp */
2319 
2320   INTERNAL_SIZE_T nb  = request2size(bytes);  /* padded request size; */
2321 
2322   /* Check for overflow and just fail, if so. */
2323   if (nb > INT_MAX || nb < bytes)
2324   {
2325     errno = ENOMEM;
2326     return 0;
2327   }
2328 
2329   MALLOC_LOCK;
2330 
2331   /* Check for exact match in a bin */
2332 
2333   if (is_small_request(nb))  /* Faster version for small requests */
2334   {
2335     idx = smallbin_index(nb);
2336 
2337     /* No traversal or size check necessary for small bins.  */
2338 
2339     q = bin_at(idx);
2340     victim = last(q);
2341 
2342 #if MALLOC_ALIGN != 16
2343     /* Also scan the next one, since it would have a remainder < MINSIZE */
2344     if (victim == q)
2345     {
2346       q = next_bin(q);
2347       victim = last(q);
2348     }
2349 #endif
2350     if (victim != q)
2351     {
2352       victim_size = chunksize(victim);
2353       unlink(victim, bck, fwd);
2354       set_inuse_bit_at_offset(victim, victim_size);
2355       check_malloced_chunk(victim, nb);
2356       MALLOC_UNLOCK;
2357       return chunk2mem(victim);
2358     }
2359 
2360     idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
2361 
2362   }
2363   else
2364   {
2365     idx = bin_index(nb);
2366     bin = bin_at(idx);
2367 
2368     for (victim = last(bin); victim != bin; victim = victim->bk)
2369     {
2370       victim_size = chunksize(victim);
2371       remainder_size = long_sub_size_t(victim_size, nb);
2372 
2373       if (remainder_size >= (long)MINSIZE) /* too big */
2374       {
2375         --idx; /* adjust to rescan below after checking last remainder */
2376         break;
2377       }
2378 
2379       else if (remainder_size >= 0) /* exact fit */
2380       {
2381         unlink(victim, bck, fwd);
2382         set_inuse_bit_at_offset(victim, victim_size);
2383         check_malloced_chunk(victim, nb);
2384 	MALLOC_UNLOCK;
2385         return chunk2mem(victim);
2386       }
2387     }
2388 
2389     ++idx;
2390 
2391   }
2392 
2393   /* Try to use the last split-off remainder */
2394 
2395   if ( (victim = last_remainder->fd) != last_remainder)
2396   {
2397     victim_size = chunksize(victim);
2398     remainder_size = long_sub_size_t(victim_size, nb);
2399 
2400     if (remainder_size >= (long)MINSIZE) /* re-split */
2401     {
2402       remainder = chunk_at_offset(victim, nb);
2403       set_head(victim, nb | PREV_INUSE);
2404       link_last_remainder(remainder);
2405       set_head(remainder, remainder_size | PREV_INUSE);
2406       set_foot(remainder, remainder_size);
2407       check_malloced_chunk(victim, nb);
2408       MALLOC_UNLOCK;
2409       return chunk2mem(victim);
2410     }
2411 
2412     clear_last_remainder;
2413 
2414     if (remainder_size >= 0)  /* exhaust */
2415     {
2416       set_inuse_bit_at_offset(victim, victim_size);
2417       check_malloced_chunk(victim, nb);
2418       MALLOC_UNLOCK;
2419       return chunk2mem(victim);
2420     }
2421 
2422     /* Else place in bin */
2423 
2424     frontlink(victim, victim_size, remainder_index, bck, fwd);
2425   }
2426 
2427   /*
2428      If there are any possibly nonempty big-enough blocks,
2429      search for best fitting chunk by scanning bins in blockwidth units.
2430   */
2431 
2432   if ( (block = idx2binblock(idx)) <= binblocks)
2433   {
2434 
2435     /* Get to the first marked block */
2436 
2437     if ( (block & binblocks) == 0)
2438     {
2439       /* force to an even block boundary */
2440       idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
2441       block <<= 1;
2442       while ((block & binblocks) == 0)
2443       {
2444         idx += BINBLOCKWIDTH;
2445         block <<= 1;
2446       }
2447     }
2448 
2449     /* For each possibly nonempty block ... */
2450     for (;;)
2451     {
2452       startidx = idx;          /* (track incomplete blocks) */
2453       q = bin = bin_at(idx);
2454 
2455       /* For each bin in this block ... */
2456       do
2457       {
2458         /* Find and use first big enough chunk ... */
2459 
2460         for (victim = last(bin); victim != bin; victim = victim->bk)
2461         {
2462           victim_size = chunksize(victim);
2463           remainder_size = long_sub_size_t(victim_size, nb);
2464 
2465           if (remainder_size >= (long)MINSIZE) /* split */
2466           {
2467             remainder = chunk_at_offset(victim, nb);
2468             set_head(victim, nb | PREV_INUSE);
2469             unlink(victim, bck, fwd);
2470             link_last_remainder(remainder);
2471             set_head(remainder, remainder_size | PREV_INUSE);
2472             set_foot(remainder, remainder_size);
2473             check_malloced_chunk(victim, nb);
2474 	    MALLOC_UNLOCK;
2475             return chunk2mem(victim);
2476           }
2477 
2478           else if (remainder_size >= 0)  /* take */
2479           {
2480             set_inuse_bit_at_offset(victim, victim_size);
2481             unlink(victim, bck, fwd);
2482             check_malloced_chunk(victim, nb);
2483 	    MALLOC_UNLOCK;
2484             return chunk2mem(victim);
2485           }
2486 
2487         }
2488 
2489        bin = next_bin(bin);
2490 
2491 #if MALLOC_ALIGN == 16
2492        if (idx < MAX_SMALLBIN)
2493          {
2494            bin = next_bin(bin);
2495            ++idx;
2496          }
2497 #endif
2498       } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
2499 
2500       /* Clear out the block bit. */
2501 
2502       do   /* Possibly backtrack to try to clear a partial block */
2503       {
2504         if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
2505         {
2506           binblocks &= ~block;
2507           break;
2508         }
2509         --startidx;
2510        q = prev_bin(q);
2511       } while (first(q) == q);
2512 
2513       /* Get to the next possibly nonempty block */
2514 
2515       if ( (block <<= 1) <= binblocks && (block != 0) )
2516       {
2517         while ((block & binblocks) == 0)
2518         {
2519           idx += BINBLOCKWIDTH;
2520           block <<= 1;
2521         }
2522       }
2523       else
2524         break;
2525     }
2526   }
2527 
2528 
2529   /* Try to use top chunk */
2530 
2531   /* Require that there be a remainder, ensuring top always exists  */
2532   remainder_size = long_sub_size_t(chunksize(top), nb);
2533   if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
2534   {
2535 
2536 #if HAVE_MMAP
2537     /* If big and would otherwise need to extend, try to use mmap instead */
2538     if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
2539         (victim = mmap_chunk(nb)) != 0)
2540     {
2541       MALLOC_UNLOCK;
2542       return chunk2mem(victim);
2543     }
2544 #endif
2545 
2546     /* Try to extend */
2547     malloc_extend_top(nb);
2548     remainder_size = long_sub_size_t(chunksize(top), nb);
2549     if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
2550     {
2551       MALLOC_UNLOCK;
2552       return 0; /* propagate failure */
2553     }
2554   }
2555 
2556   victim = top;
2557   set_head(victim, nb | PREV_INUSE);
2558   top = chunk_at_offset(victim, nb);
2559   set_head(top, remainder_size | PREV_INUSE);
2560   check_malloced_chunk(victim, nb);
2561   MALLOC_UNLOCK;
2562   return chunk2mem(victim);
2563 
2564 #endif /* MALLOC_PROVIDED */
2565 }
2566 
2567 #endif /* DEFINE_MALLOC */
2568 
2569 #ifdef DEFINE_FREE
2570 
2571 /*
2572 
2573   free() algorithm :
2574 
2575     cases:
2576 
2577        1. free(0) has no effect.
2578 
2579        2. If the chunk was allocated via mmap, it is release via munmap().
2580 
2581        3. If a returned chunk borders the current high end of memory,
2582           it is consolidated into the top, and if the total unused
2583           topmost memory exceeds the trim threshold, malloc_trim is
2584           called.
2585 
2586        4. Other chunks are consolidated as they arrive, and
2587           placed in corresponding bins. (This includes the case of
2588           consolidating with the current `last_remainder').
2589 
2590 */
2591 
2592 
2593 #if __STD_C
fREe(Void_t * mem)2594 void fREe(Void_t* mem)
2595 #else
2596 void fREe(mem) RDECL Void_t* mem;
2597 #endif
2598 {
2599 #ifdef MALLOC_PROVIDED
2600 
2601   free (mem);
2602 
2603 #else
2604 
2605   mchunkptr p;         /* chunk corresponding to mem */
2606   INTERNAL_SIZE_T hd;  /* its head field */
2607   INTERNAL_SIZE_T sz;  /* its size */
2608   int       idx;       /* its bin index */
2609   mchunkptr next;      /* next contiguous chunk */
2610   INTERNAL_SIZE_T nextsz; /* its size */
2611   INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
2612   mchunkptr bck;       /* misc temp for linking */
2613   mchunkptr fwd;       /* misc temp for linking */
2614   int       islr;      /* track whether merging with last_remainder */
2615 
2616   if (mem == 0)                              /* free(0) has no effect */
2617     return;
2618 
2619   MALLOC_LOCK;
2620 
2621   p = mem2chunk(mem);
2622   hd = p->size;
2623 
2624 #if HAVE_MMAP
2625   if (hd & IS_MMAPPED)                       /* release mmapped memory. */
2626   {
2627     munmap_chunk(p);
2628     MALLOC_UNLOCK;
2629     return;
2630   }
2631 #endif
2632 
2633   check_inuse_chunk(p);
2634 
2635   sz = hd & ~PREV_INUSE;
2636   next = chunk_at_offset(p, sz);
2637   nextsz = chunksize(next);
2638 
2639   if (next == top)                            /* merge with top */
2640   {
2641     sz += nextsz;
2642 
2643     if (!(hd & PREV_INUSE))                    /* consolidate backward */
2644     {
2645       prevsz = p->prev_size;
2646       p = chunk_at_offset(p, -prevsz);
2647       sz += prevsz;
2648       unlink(p, bck, fwd);
2649     }
2650 
2651     set_head(p, sz | PREV_INUSE);
2652     top = p;
2653     if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
2654       malloc_trim(top_pad);
2655     MALLOC_UNLOCK;
2656     return;
2657   }
2658 
2659   set_head(next, nextsz);                    /* clear inuse bit */
2660 
2661   islr = 0;
2662 
2663   if (!(hd & PREV_INUSE))                    /* consolidate backward */
2664   {
2665     prevsz = p->prev_size;
2666     p = chunk_at_offset(p, -prevsz);
2667     sz += prevsz;
2668 
2669     if (p->fd == last_remainder)             /* keep as last_remainder */
2670       islr = 1;
2671     else
2672       unlink(p, bck, fwd);
2673   }
2674 
2675   if (!(inuse_bit_at_offset(next, nextsz)))   /* consolidate forward */
2676   {
2677     sz += nextsz;
2678 
2679     if (!islr && next->fd == last_remainder)  /* re-insert last_remainder */
2680     {
2681       islr = 1;
2682       link_last_remainder(p);
2683     }
2684     else
2685       unlink(next, bck, fwd);
2686   }
2687 
2688 
2689   set_head(p, sz | PREV_INUSE);
2690   set_foot(p, sz);
2691   if (!islr)
2692     frontlink(p, sz, idx, bck, fwd);
2693 
2694   MALLOC_UNLOCK;
2695 
2696 #endif /* MALLOC_PROVIDED */
2697 }
2698 #ifdef _HAVE_ALIAS_ATTRIBUTE
2699 #pragma GCC diagnostic push
2700 #ifndef __clang__
2701 #pragma GCC diagnostic ignored "-Wmissing-attributes"
2702 #endif
2703 __strong_reference(free, __malloc_free);
2704 #pragma GCC diagnostic pop
2705 #endif
2706 #endif /* DEFINE_FREE */
2707 
2708 #ifdef DEFINE_REALLOC
2709 
2710 /*
2711 
2712   Realloc algorithm:
2713 
2714     Chunks that were obtained via mmap cannot be extended or shrunk
2715     unless HAVE_MREMAP is defined, in which case mremap is used.
2716     Otherwise, if their reallocation is for additional space, they are
2717     copied.  If for less, they are just left alone.
2718 
2719     Otherwise, if the reallocation is for additional space, and the
2720     chunk can be extended, it is, else a malloc-copy-free sequence is
2721     taken.  There are several different ways that a chunk could be
2722     extended. All are tried:
2723 
2724        * Extending forward into following adjacent free chunk.
2725        * Shifting backwards, joining preceding adjacent space
2726        * Both shifting backwards and extending forward.
2727        * Extending into newly sbrked space
2728 
2729     Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
2730     size argument of zero (re)allocates a minimum-sized chunk.
2731 
2732     If the reallocation is for less space, and the new request is for
2733     a `small' (<512 bytes) size, then the newly unused space is lopped
2734     off and freed.
2735 
2736     The old unix realloc convention of allowing the last-free'd chunk
2737     to be used as an argument to realloc is no longer supported.
2738     I don't know of any programs still relying on this feature,
2739     and allowing it would also allow too many other incorrect
2740     usages of realloc to be sensible.
2741 
2742 
2743 */
2744 
2745 
2746 #if __STD_C
rEALLOc(Void_t * oldmem,size_t bytes)2747 Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
2748 #else
2749 Void_t* rEALLOc(oldmem, bytes) RDECL Void_t* oldmem; size_t bytes;
2750 #endif
2751 {
2752 #ifdef MALLOC_PROVIDED
2753 
2754   realloc (oldmem, bytes);
2755 
2756 #else
2757 
2758   INTERNAL_SIZE_T    nb;      /* padded request size */
2759 
2760   mchunkptr oldp;             /* chunk corresponding to oldmem */
2761   INTERNAL_SIZE_T    oldsize; /* its size */
2762 
2763   mchunkptr newp;             /* chunk to return */
2764   INTERNAL_SIZE_T    newsize; /* its size */
2765   Void_t*   newmem;           /* corresponding user mem */
2766 
2767   mchunkptr next;             /* next contiguous chunk after oldp */
2768   INTERNAL_SIZE_T  nextsize;  /* its size */
2769 
2770   mchunkptr prev;             /* previous contiguous chunk before oldp */
2771   INTERNAL_SIZE_T  prevsize;  /* its size */
2772 
2773   mchunkptr remainder;        /* holds split off extra space from newp */
2774   INTERNAL_SIZE_T  remainder_size;   /* its size */
2775 
2776   mchunkptr bck;              /* misc temp for linking */
2777   mchunkptr fwd;              /* misc temp for linking */
2778 
2779 #ifdef REALLOC_ZERO_BYTES_FREES
2780   if (bytes == 0) { fREe(oldmem); return 0; }
2781 #endif
2782 
2783 
2784   /* realloc of null is supposed to be same as malloc */
2785   if (oldmem == 0) return mALLOc(bytes);
2786 
2787   MALLOC_LOCK;
2788 
2789   newp    = oldp    = mem2chunk(oldmem);
2790   newsize = oldsize = chunksize(oldp);
2791 
2792 
2793   nb = request2size(bytes);
2794 
2795   /* Check for overflow and just fail, if so. */
2796   if (nb > INT_MAX || nb < bytes)
2797   {
2798     errno = ENOMEM;
2799     return 0;
2800   }
2801 
2802 #if HAVE_MMAP
2803   if (chunk_is_mmapped(oldp))
2804   {
2805 #if HAVE_MREMAP
2806     newp = mremap_chunk(oldp, nb);
2807     if(newp)
2808     {
2809       MALLOC_UNLOCK;
2810       return chunk2mem(newp);
2811     }
2812 #endif
2813     /* Note the extra SIZE_SZ overhead. */
2814     if(oldsize - SIZE_SZ >= nb)
2815     {
2816       MALLOC_UNLOCK;
2817       return oldmem; /* do nothing */
2818     }
2819     /* Must alloc, copy, free. */
2820     newmem = mALLOc(bytes);
2821     if (newmem == 0)
2822     {
2823       MALLOC_UNLOCK;
2824       return 0; /* propagate failure */
2825     }
2826     MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
2827     munmap_chunk(oldp);
2828     MALLOC_UNLOCK;
2829     return newmem;
2830   }
2831 #endif
2832 
2833   check_inuse_chunk(oldp);
2834 
2835   if ((long)(oldsize) < (long)(nb))
2836   {
2837 
2838     /* Try expanding forward */
2839 
2840     next = chunk_at_offset(oldp, oldsize);
2841     if (next == top || !inuse(next))
2842     {
2843       nextsize = chunksize(next);
2844 
2845       /* Forward into top only if a remainder */
2846       if (next == top)
2847       {
2848         if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
2849         {
2850           newsize += nextsize;
2851           top = chunk_at_offset(oldp, nb);
2852           set_head(top, (newsize - nb) | PREV_INUSE);
2853           set_head_size(oldp, nb);
2854 	  MALLOC_UNLOCK;
2855           return chunk2mem(oldp);
2856         }
2857       }
2858 
2859       /* Forward into next chunk */
2860       else if (((long)(nextsize + newsize) >= (long)(nb)))
2861       {
2862         unlink(next, bck, fwd);
2863         newsize  += nextsize;
2864         goto split;
2865       }
2866     }
2867     else
2868     {
2869       next = 0;
2870       nextsize = 0;
2871     }
2872 
2873     /* Try shifting backwards. */
2874 
2875     if (!prev_inuse(oldp))
2876     {
2877       prev = prev_chunk(oldp);
2878       prevsize = chunksize(prev);
2879 
2880       /* try forward + backward first to save a later consolidation */
2881 
2882       if (next != 0)
2883       {
2884         /* into top */
2885         if (next == top)
2886         {
2887           if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
2888           {
2889             unlink(prev, bck, fwd);
2890             newp = prev;
2891             newsize += prevsize + nextsize;
2892             newmem = chunk2mem(newp);
2893             MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2894             top = chunk_at_offset(newp, nb);
2895             set_head(top, (newsize - nb) | PREV_INUSE);
2896             set_head_size(newp, nb);
2897 	    MALLOC_UNLOCK;
2898             return newmem;
2899           }
2900         }
2901 
2902         /* into next chunk */
2903         else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
2904         {
2905           unlink(next, bck, fwd);
2906           unlink(prev, bck, fwd);
2907           newp = prev;
2908           newsize += nextsize + prevsize;
2909           newmem = chunk2mem(newp);
2910           MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2911           goto split;
2912         }
2913       }
2914 
2915       /* backward only */
2916       if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
2917       {
2918         unlink(prev, bck, fwd);
2919         newp = prev;
2920         newsize += prevsize;
2921         newmem = chunk2mem(newp);
2922         MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2923         goto split;
2924       }
2925     }
2926 
2927     /* Must allocate */
2928 
2929     newmem = mALLOc (bytes);
2930 
2931     if (newmem == 0)  /* propagate failure */
2932     {
2933       MALLOC_UNLOCK;
2934       return 0;
2935     }
2936 
2937     /* Avoid copy if newp is next chunk after oldp. */
2938     /* (This can only happen when new chunk is sbrk'ed.) */
2939 
2940     if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
2941     {
2942       newsize += chunksize(newp);
2943       newp = oldp;
2944       goto split;
2945     }
2946 
2947     /* Otherwise copy, free, and exit */
2948     MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2949     __malloc_free(oldmem);
2950     MALLOC_UNLOCK;
2951     return newmem;
2952   }
2953 
2954 
2955  split:  /* split off extra room in old or expanded chunk */
2956 
2957   remainder_size = long_sub_size_t(newsize, nb);
2958 
2959   if (remainder_size >= (long)MINSIZE) /* split off remainder */
2960   {
2961     remainder = chunk_at_offset(newp, nb);
2962     set_head_size(newp, nb);
2963     set_head(remainder, remainder_size | PREV_INUSE);
2964     set_inuse_bit_at_offset(remainder, remainder_size);
2965     __malloc_free(chunk2mem(remainder)); /* let free() deal with it */
2966   }
2967   else
2968   {
2969     set_head_size(newp, newsize);
2970     set_inuse_bit_at_offset(newp, newsize);
2971   }
2972 
2973   check_inuse_chunk(newp);
2974   MALLOC_UNLOCK;
2975   return chunk2mem(newp);
2976 
2977 #endif /* MALLOC_PROVIDED */
2978 }
2979 
2980 #endif /* DEFINE_REALLOC */
2981 
2982 #ifdef DEFINE_MEMALIGN
2983 
2984 /*
2985 
2986   memalign algorithm:
2987 
2988     memalign requests more than enough space from malloc, finds a spot
2989     within that chunk that meets the alignment request, and then
2990     possibly frees the leading and trailing space.
2991 
2992     The alignment argument must be a power of two. This property is not
2993     checked by memalign, so misuse may result in random runtime errors.
2994 
2995     8-byte alignment is guaranteed by normal malloc calls, so don't
2996     bother calling memalign with an argument of 8 or less.
2997 
2998     Overreliance on memalign is a sure way to fragment space.
2999 
3000 */
3001 
3002 
3003 #if __STD_C
mEMALIGn(size_t alignment,size_t bytes)3004 Void_t* mEMALIGn(size_t alignment, size_t bytes)
3005 #else
3006 Void_t* mEMALIGn(alignment, bytes) RDECL size_t alignment; size_t bytes;
3007 #endif
3008 {
3009   INTERNAL_SIZE_T    nb;      /* padded  request size */
3010   char*     m;                /* memory returned by malloc call */
3011   mchunkptr p;                /* corresponding chunk */
3012   char*     brk;              /* alignment point within p */
3013   mchunkptr newp;             /* chunk to return */
3014   INTERNAL_SIZE_T  newsize;   /* its size */
3015   INTERNAL_SIZE_T  leadsize;  /* leading space befor alignment point */
3016   mchunkptr remainder;        /* spare room at end to split off */
3017   long      remainder_size;   /* its size */
3018 
3019   /* If need less alignment than we give anyway, just relay to malloc */
3020 
3021   if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
3022 
3023   /* Otherwise, ensure that it is at least a minimum chunk size */
3024 
3025   if (alignment <  MINSIZE) alignment = MINSIZE;
3026 
3027   /* Call malloc with worst case padding to hit alignment. */
3028 
3029   nb = request2size(bytes);
3030 
3031   /* Check for overflow. */
3032   if (nb > __SIZE_MAX__ - (alignment + MINSIZE) || nb < bytes)
3033   {
3034     errno = ENOMEM;
3035     return 0;
3036   }
3037 
3038   m  = (char*)(mALLOc(nb + alignment + MINSIZE));
3039 
3040   if (m == 0) return 0; /* propagate failure */
3041 
3042   MALLOC_LOCK;
3043 
3044   p = mem2chunk(m);
3045 
3046   if ((((uintptr_t)(m)) % alignment) == 0) /* aligned */
3047   {
3048 #if HAVE_MMAP
3049     if(chunk_is_mmapped(p))
3050     {
3051       MALLOC_UNLOCK;
3052       return chunk2mem(p); /* nothing more to do */
3053     }
3054 #endif
3055   }
3056   else /* misaligned */
3057   {
3058     /*
3059       Find an aligned spot inside chunk.
3060       Since we need to give back leading space in a chunk of at
3061       least MINSIZE, if the first calculation places us at
3062       a spot with less than MINSIZE leader, we can move to the
3063       next aligned spot -- we've allocated enough total room so that
3064       this is always possible.
3065     */
3066 
3067     brk = (char*)mem2chunk(((uintptr_t)(m + alignment - 1)) & -alignment);
3068     if ((long)(brk - (char*)(p)) < (long)MINSIZE) brk = brk + alignment;
3069 
3070     newp = (mchunkptr)brk;
3071     leadsize = brk - (char*)(p);
3072     newsize = chunksize(p) - leadsize;
3073 
3074 #if HAVE_MMAP
3075     if(chunk_is_mmapped(p))
3076     {
3077       newp->prev_size = p->prev_size + leadsize;
3078       set_head(newp, newsize|IS_MMAPPED);
3079       MALLOC_UNLOCK;
3080       return chunk2mem(newp);
3081     }
3082 #endif
3083 
3084     /* give back leader, use the rest */
3085 
3086     set_head(newp, newsize | PREV_INUSE);
3087     set_inuse_bit_at_offset(newp, newsize);
3088     set_head_size(p, leadsize);
3089     __malloc_free(chunk2mem(p));
3090     p = newp;
3091 
3092     assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
3093   }
3094 
3095   /* Also give back spare room at the end */
3096 
3097   remainder_size = long_sub_size_t(chunksize(p), nb);
3098 
3099   if (remainder_size >= (long)MINSIZE)
3100   {
3101     remainder = chunk_at_offset(p, nb);
3102     set_head(remainder, remainder_size | PREV_INUSE);
3103     set_head_size(p, nb);
3104     __malloc_free(chunk2mem(remainder));
3105   }
3106 
3107   check_inuse_chunk(p);
3108   MALLOC_UNLOCK;
3109   return chunk2mem(p);
3110 
3111 }
3112 
3113 #ifdef _HAVE_ALIAS_ATTRIBUTE
3114 __strong_reference(memalign, aligned_alloc);
3115 #endif
3116 #endif /* DEFINE_MEMALIGN */
3117 
3118 #ifdef DEFINE_VALLOC
3119 
3120 /*
3121     valloc just invokes memalign with alignment argument equal
3122     to the page size of the system (or as near to this as can
3123     be figured out from all the includes/defines above.)
3124 */
3125 
3126 #if __STD_C
vALLOc(size_t bytes)3127 Void_t* vALLOc(size_t bytes)
3128 #else
3129 Void_t* vALLOc(bytes) RDECL size_t bytes;
3130 #endif
3131 {
3132   return mEMALIGn (malloc_getpagesize, bytes);
3133 }
3134 
3135 #endif /* DEFINE_VALLOC */
3136 
3137 #ifdef DEFINE_PVALLOC
3138 
3139 /*
3140   pvalloc just invokes valloc for the nearest pagesize
3141   that will accommodate request
3142 */
3143 
3144 
3145 #if __STD_C
pvALLOc(size_t bytes)3146 Void_t* pvALLOc(size_t bytes)
3147 #else
3148 Void_t* pvALLOc(bytes) RDECL size_t bytes;
3149 #endif
3150 {
3151   size_t pagesize = malloc_getpagesize;
3152   if (bytes > __SIZE_MAX__ - pagesize)
3153   {
3154     errno = ENOMEM;
3155     return 0;
3156   }
3157   return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
3158 }
3159 
3160 #endif /* DEFINE_PVALLOC */
3161 
3162 #ifdef DEFINE_CALLOC
3163 #include "mul_overflow.h"
3164 /*
3165 
3166   calloc calls malloc, then zeroes out the allocated chunk.
3167 
3168 */
3169 
3170 #if __STD_C
cALLOc(size_t n,size_t elem_size)3171 Void_t* cALLOc(size_t n, size_t elem_size)
3172 #else
3173 Void_t* cALLOc(n, elem_size) RDECL size_t n; size_t elem_size;
3174 #endif
3175 {
3176   mchunkptr p;
3177   INTERNAL_SIZE_T csz;
3178 
3179   INTERNAL_SIZE_T sz;
3180 
3181 #if MORECORE_CLEARS
3182   mchunkptr oldtop;
3183   INTERNAL_SIZE_T oldtopsize;
3184 #endif
3185   Void_t* mem;
3186 
3187   if (mul_overflow((INTERNAL_SIZE_T) n, (INTERNAL_SIZE_T) elem_size, &sz))
3188   {
3189     errno = ENOMEM;
3190     return 0;
3191   }
3192 
3193   /* check if expand_top called, in which case don't need to clear */
3194 #if MORECORE_CLEARS
3195   MALLOC_LOCK;
3196   oldtop = top;
3197   oldtopsize = chunksize(top);
3198 #endif
3199 
3200   mem = mALLOc (sz);
3201 
3202   if (mem == 0)
3203   {
3204 #if MORECORE_CLEARS
3205     MALLOC_UNLOCK;
3206 #endif
3207     return 0;
3208   }
3209   else
3210   {
3211     p = mem2chunk(mem);
3212 
3213     /* Two optional cases in which clearing not necessary */
3214 
3215 
3216 #if HAVE_MMAP
3217     if (chunk_is_mmapped(p))
3218     {
3219 #if MORECORE_CLEARS
3220       MALLOC_UNLOCK;
3221 #endif
3222       return mem;
3223     }
3224 #endif
3225 
3226     csz = chunksize(p);
3227 
3228 #if MORECORE_CLEARS
3229     if (p == oldtop && csz > oldtopsize)
3230     {
3231       /* clear only the bytes from non-freshly-sbrked memory */
3232       csz = oldtopsize;
3233     }
3234     MALLOC_UNLOCK;
3235 #endif
3236 
3237     MALLOC_ZERO(mem, csz - SIZE_SZ);
3238     return mem;
3239   }
3240 }
3241 
3242 #endif /* DEFINE_CALLOC */
3243 
3244 #if defined(DEFINE_CFREE) && !defined(__CYGWIN__)
3245 
3246 /*
3247 
3248   cfree just calls free. It is needed/defined on some systems
3249   that pair it with calloc, presumably for odd historical reasons.
3250 
3251 */
3252 
3253 #if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
3254 #if !defined(_LIBC) || !defined(_REENT_ONLY)
3255 #if __STD_C
cfree(Void_t * mem)3256 void cfree(Void_t *mem)
3257 #else
3258 void cfree(mem) Void_t *mem;
3259 #endif
3260 {
3261   fREe(mem);
3262 }
3263 #endif
3264 #endif
3265 
3266 #endif /* DEFINE_CFREE */
3267 
3268 #ifdef DEFINE_FREE
3269 
3270 /*
3271 
3272     Malloc_trim gives memory back to the system (via negative
3273     arguments to sbrk) if there is unused memory at the `high' end of
3274     the malloc pool. You can call this after freeing large blocks of
3275     memory to potentially reduce the system-level memory requirements
3276     of a program. However, it cannot guarantee to reduce memory. Under
3277     some allocation patterns, some large free blocks of memory will be
3278     locked between two used chunks, so they cannot be given back to
3279     the system.
3280 
3281     The `pad' argument to malloc_trim represents the amount of free
3282     trailing space to leave untrimmed. If this argument is zero,
3283     only the minimum amount of memory to maintain internal data
3284     structures will be left (one page or less). Non-zero arguments
3285     can be supplied to maintain enough trailing space to service
3286     future expected allocations without having to re-obtain memory
3287     from the system.
3288 
3289     Malloc_trim returns 1 if it actually released any memory, else 0.
3290 
3291 */
3292 
3293 #if __STD_C
malloc_trim(size_t pad)3294 int malloc_trim(size_t pad)
3295 #else
3296 int malloc_trim(pad) RDECL size_t pad;
3297 #endif
3298 {
3299   long  top_size;        /* Amount of top-most memory */
3300   long  extra;           /* Amount to release */
3301   char* current_brk;     /* address returned by pre-check sbrk call */
3302   char* new_brk;         /* address returned by negative sbrk call */
3303 
3304   unsigned long pagesz = malloc_getpagesize;
3305 
3306   MALLOC_LOCK;
3307 
3308   top_size = chunksize(top);
3309   extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3310 
3311   if (extra < (long)pagesz)  /* Not enough memory to release */
3312   {
3313     MALLOC_UNLOCK;
3314     return 0;
3315   }
3316 
3317   else
3318   {
3319     /* Test to make sure no one else called sbrk */
3320     current_brk = (char*)(MORECORE (0));
3321     if (current_brk != (char*)(top) + top_size)
3322     {
3323       MALLOC_UNLOCK;
3324       return 0;     /* Apparently we don't own memory; must fail */
3325     }
3326 
3327     else
3328     {
3329       new_brk = (char*)(MORECORE (-extra));
3330 
3331       if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
3332       {
3333         /* Try to figure out what we have */
3334         current_brk = (char*)(MORECORE (0));
3335         top_size = current_brk - (char*)top;
3336         if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
3337         {
3338           sbrked_mem = current_brk - sbrk_base;
3339           set_head(top, top_size | PREV_INUSE);
3340         }
3341         check_chunk(top);
3342 	MALLOC_UNLOCK;
3343         return 0;
3344       }
3345 
3346       else
3347       {
3348         /* Success. Adjust top accordingly. */
3349         set_head(top, (top_size - extra) | PREV_INUSE);
3350         sbrked_mem -= extra;
3351         check_chunk(top);
3352 	MALLOC_UNLOCK;
3353         return 1;
3354       }
3355     }
3356   }
3357 }
3358 
3359 #endif /* DEFINE_FREE */
3360 
3361 #ifdef DEFINE_MALLOC_USABLE_SIZE
3362 
3363 /*
3364   malloc_usable_size:
3365 
3366     This routine tells you how many bytes you can actually use in an
3367     allocated chunk, which may be more than you requested (although
3368     often not). You can use this many bytes without worrying about
3369     overwriting other allocated objects. Not a particularly great
3370     programming practice, but still sometimes useful.
3371 
3372 */
3373 
3374 #if __STD_C
malloc_usable_size(Void_t * mem)3375 size_t malloc_usable_size(Void_t* mem)
3376 #else
3377 size_t malloc_usable_size(mem) RDECL Void_t* mem;
3378 #endif
3379 {
3380   mchunkptr p;
3381   if (mem == 0)
3382     return 0;
3383   else
3384   {
3385     p = mem2chunk(mem);
3386     if(!chunk_is_mmapped(p))
3387     {
3388       if (!inuse(p)) return 0;
3389 #if DEBUG
3390       MALLOC_LOCK;
3391       check_inuse_chunk(p);
3392       MALLOC_UNLOCK;
3393 #endif
3394       return chunksize(p) - SIZE_SZ;
3395     }
3396     return chunksize(p) - 2*SIZE_SZ;
3397   }
3398 }
3399 
3400 #endif /* DEFINE_MALLOC_USABLE_SIZE */
3401 
3402 #ifdef DEFINE_MALLINFO
3403 
3404 /* Utility to update current_mallinfo for malloc_stats and mallinfo() */
3405 
malloc_update_mallinfo(void)3406 STATIC void malloc_update_mallinfo(void)
3407 {
3408   int i;
3409   mbinptr b;
3410   mchunkptr p;
3411 #if DEBUG
3412   mchunkptr q;
3413 #endif
3414 
3415   INTERNAL_SIZE_T avail = chunksize(top);
3416   int   navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
3417 
3418   for (i = 1; i < NAV; ++i)
3419   {
3420     b = bin_at(i);
3421     for (p = last(b); p != b; p = p->bk)
3422     {
3423 #if DEBUG
3424       check_free_chunk(p);
3425       for (q = next_chunk(p);
3426            q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
3427            q = next_chunk(q))
3428         check_inuse_chunk(q);
3429 #endif
3430       avail += chunksize(p);
3431       navail++;
3432     }
3433   }
3434 
3435   current_mallinfo.ordblks = navail;
3436   current_mallinfo.uordblks = sbrked_mem - avail;
3437   current_mallinfo.fordblks = avail;
3438 #if HAVE_MMAP
3439   current_mallinfo.hblks = n_mmaps;
3440   current_mallinfo.hblkhd = mmapped_mem;
3441 #endif
3442   current_mallinfo.keepcost = chunksize(top);
3443 
3444 }
3445 
3446 #else /* ! DEFINE_MALLINFO */
3447 
3448 #if __STD_C
3449 extern void malloc_update_mallinfo(void);
3450 #else
3451 extern void malloc_update_mallinfo();
3452 #endif
3453 
3454 #endif /* ! DEFINE_MALLINFO */
3455 
3456 #ifdef DEFINE_MALLOC_STATS
3457 
3458 /*
3459 
3460   malloc_stats:
3461 
3462     Prints on stderr the amount of space obtain from the system (both
3463     via sbrk and mmap), the maximum amount (which may be more than
3464     current if malloc_trim and/or munmap got called), the maximum
3465     number of simultaneous mmap regions used, and the current number
3466     of bytes allocated via malloc (or realloc, etc) but not yet
3467     freed. (Note that this is the number of bytes allocated, not the
3468     number requested. It will be larger than the number requested
3469     because of alignment and bookkeeping overhead.)
3470 
3471 */
3472 
3473 #if __STD_C
malloc_stats(void)3474 void malloc_stats(void)
3475 #else
3476 void malloc_stats() RDECL
3477 #endif
3478 {
3479   unsigned long local_max_total_mem;
3480   int local_sbrked_mem;
3481   struct mallinfo local_mallinfo;
3482 #if HAVE_MMAP
3483   unsigned long local_mmapped_mem, local_max_n_mmaps;
3484 #endif
3485   FILE *fp;
3486 
3487   MALLOC_LOCK;
3488   malloc_update_mallinfo();
3489   local_max_total_mem = max_total_mem;
3490   local_sbrked_mem = sbrked_mem;
3491   local_mallinfo = current_mallinfo;
3492 #if HAVE_MMAP
3493   local_mmapped_mem = mmapped_mem;
3494   local_max_n_mmaps = max_n_mmaps;
3495 #endif
3496   MALLOC_UNLOCK;
3497 
3498   fp = stderr;
3499 
3500   fprintf(fp, "max system bytes = %10u\n",
3501 	  (unsigned int)(local_max_total_mem));
3502 #if HAVE_MMAP
3503   fprintf(fp, "system bytes     = %10u\n",
3504 	  (unsigned int)(local_sbrked_mem + local_mmapped_mem));
3505   fprintf(fp, "in use bytes     = %10u\n",
3506 	  (unsigned int)(local_mallinfo.uordblks + local_mmapped_mem));
3507 #else
3508   fprintf(fp, "system bytes     = %10u\n",
3509 	  (unsigned int)local_sbrked_mem);
3510   fprintf(fp, "in use bytes     = %10u\n",
3511 	  (unsigned int)local_mallinfo.uordblks);
3512 #endif
3513 #if HAVE_MMAP
3514   fprintf(fp, "max mmap regions = %10u\n",
3515 	  (unsigned int)local_max_n_mmaps);
3516 #endif
3517 }
3518 
3519 #endif /* DEFINE_MALLOC_STATS */
3520 
3521 #ifdef DEFINE_MALLINFO
3522 
3523 /*
3524   mallinfo returns a copy of updated current mallinfo.
3525 */
3526 
3527 #if __STD_C
mALLINFo(void)3528 struct mallinfo mALLINFo(void)
3529 #else
3530 struct mallinfo mALLINFo() RDECL
3531 #endif
3532 {
3533   struct mallinfo ret;
3534 
3535   MALLOC_LOCK;
3536   malloc_update_mallinfo();
3537   ret = current_mallinfo;
3538   MALLOC_UNLOCK;
3539   return ret;
3540 }
3541 
3542 #endif /* DEFINE_MALLINFO */
3543 
3544 #ifdef DEFINE_MALLOPT
3545 
3546 /*
3547   mallopt:
3548 
3549     mallopt is the general SVID/XPG interface to tunable parameters.
3550     The format is to provide a (parameter-number, parameter-value) pair.
3551     mallopt then sets the corresponding parameter to the argument
3552     value if it can (i.e., so long as the value is meaningful),
3553     and returns 1 if successful else 0.
3554 
3555     See descriptions of tunable parameters above.
3556 
3557 */
3558 
3559 #if __STD_C
mALLOPt(int param_number,int value)3560 int mALLOPt(int param_number, int value)
3561 #else
3562 int mALLOPt(param_number, value) RDECL int param_number; int value;
3563 #endif
3564 {
3565   MALLOC_LOCK;
3566   switch(param_number)
3567   {
3568     case M_TRIM_THRESHOLD:
3569       trim_threshold = value; MALLOC_UNLOCK; return 1;
3570     case M_TOP_PAD:
3571       top_pad = value; MALLOC_UNLOCK; return 1;
3572     case M_MMAP_THRESHOLD:
3573 #if HAVE_MMAP
3574       mmap_threshold = value;
3575 #endif
3576       MALLOC_UNLOCK;
3577       return 1;
3578     case M_MMAP_MAX:
3579 #if HAVE_MMAP
3580       n_mmaps_max = value; MALLOC_UNLOCK; return 1;
3581 #else
3582       MALLOC_UNLOCK; return value == 0;
3583 #endif
3584 
3585     default:
3586       MALLOC_UNLOCK;
3587       return 0;
3588   }
3589 }
3590 
3591 #endif /* DEFINE_MALLOPT */
3592 
3593 /*
3594 
3595 History:
3596 
3597     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
3598       * Fixed ordering problem with boundary-stamping
3599 
3600     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
3601       * Added pvalloc, as recommended by H.J. Liu
3602       * Added 64bit pointer support mainly from Wolfram Gloger
3603       * Added anonymously donated WIN32 sbrk emulation
3604       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
3605       * malloc_extend_top: fix mask error that caused wastage after
3606         foreign sbrks
3607       * Add linux mremap support code from HJ Liu
3608 
3609     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
3610       * Integrated most documentation with the code.
3611       * Add support for mmap, with help from
3612         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
3613       * Use last_remainder in more cases.
3614       * Pack bins using idea from  colin@nyx10.cs.du.edu
3615       * Use ordered bins instead of best-fit threshhold
3616       * Eliminate block-local decls to simplify tracing and debugging.
3617       * Support another case of realloc via move into top
3618       * Fix error occuring when initial sbrk_base not word-aligned.
3619       * Rely on page size for units instead of SBRK_UNIT to
3620         avoid surprises about sbrk alignment conventions.
3621       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
3622         (raymond@es.ele.tue.nl) for the suggestion.
3623       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
3624       * More precautions for cases where other routines call sbrk,
3625         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
3626       * Added macros etc., allowing use in linux libc from
3627         H.J. Lu (hjl@gnu.ai.mit.edu)
3628       * Inverted this history list
3629 
3630     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
3631       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
3632       * Removed all preallocation code since under current scheme
3633         the work required to undo bad preallocations exceeds
3634         the work saved in good cases for most test programs.
3635       * No longer use return list or unconsolidated bins since
3636         no scheme using them consistently outperforms those that don't
3637         given above changes.
3638       * Use best fit for very large chunks to prevent some worst-cases.
3639       * Added some support for debugging
3640 
3641     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
3642       * Removed footers when chunks are in use. Thanks to
3643         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
3644 
3645     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
3646       * Added malloc_trim, with help from Wolfram Gloger
3647         (wmglo@Dent.MED.Uni-Muenchen.DE).
3648 
3649     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
3650 
3651     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
3652       * realloc: try to expand in both directions
3653       * malloc: swap order of clean-bin strategy;
3654       * realloc: only conditionally expand backwards
3655       * Try not to scavenge used bins
3656       * Use bin counts as a guide to preallocation
3657       * Occasionally bin return list chunks in first scan
3658       * Add a few optimizations from colin@nyx10.cs.du.edu
3659 
3660     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
3661       * faster bin computation & slightly different binning
3662       * merged all consolidations to one part of malloc proper
3663          (eliminating old malloc_find_space & malloc_clean_bin)
3664       * Scan 2 returns chunks (not just 1)
3665       * Propagate failure in realloc if malloc returns 0
3666       * Add stuff to allow compilation on non-ANSI compilers
3667           from kpv@research.att.com
3668 
3669     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
3670       * removed potential for odd address access in prev_chunk
3671       * removed dependency on getpagesize.h
3672       * misc cosmetics and a bit more internal documentation
3673       * anticosmetics: mangled names in macros to evade debugger strangeness
3674       * tested on sparc, hp-700, dec-mips, rs6000
3675           with gcc & native cc (hp, dec only) allowing
3676           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
3677 
3678     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
3679       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
3680          structure of old version,  but most details differ.)
3681 
3682 */
3683 #endif
3684