1 #ifdef MALLOC_PROVIDED
2 int _dummy_mallocr = 1;
3 #else
4 /* ---------- To make a malloc.h, start cutting here ------------ */
5 
6 /*
7   A version of malloc/free/realloc written by Doug Lea and released to the
8   public domain.  Send questions/comments/complaints/performance data
9   to dl@cs.oswego.edu
10 
11 * VERSION 2.6.5  Wed Jun 17 15:55:16 1998  Doug Lea  (dl at gee)
12 
13    Note: There may be an updated version of this malloc obtainable at
14            ftp://g.oswego.edu/pub/misc/malloc.c
15          Check before installing!
16 
17    Note: This version differs from 2.6.4 only by correcting a
18          statement ordering error that could cause failures only
19          when calls to this malloc are interposed with calls to
20          other memory allocators.
21 
22 * Why use this malloc?
23 
24   This is not the fastest, most space-conserving, most portable, or
25   most tunable malloc ever written. However it is among the fastest
26   while also being among the most space-conserving, portable and tunable.
27   Consistent balance across these factors results in a good general-purpose
28   allocator. For a high-level description, see
29      http://g.oswego.edu/dl/html/malloc.html
30 
31 * Synopsis of public routines
32 
33   (Much fuller descriptions are contained in the program documentation below.)
34 
35   malloc(size_t n);
36      Return a pointer to a newly allocated chunk of at least n bytes, or null
37      if no space is available.
38   free(Void_t* p);
39      Release the chunk of memory pointed to by p, or no effect if p is null.
40   realloc(Void_t* p, size_t n);
41      Return a pointer to a chunk of size n that contains the same data
42      as does chunk p up to the minimum of (n, p's size) bytes, or null
43      if no space is available. The returned pointer may or may not be
44      the same as p. If p is null, equivalent to malloc.  Unless the
45      #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
46      size argument of zero (re)allocates a minimum-sized chunk.
47   memalign(size_t alignment, size_t n);
48      Return a pointer to a newly allocated chunk of n bytes, aligned
49      in accord with the alignment argument, which must be a power of
50      two.
51   valloc(size_t n);
52      Equivalent to memalign(pagesize, n), where pagesize is the page
53      size of the system (or as near to this as can be figured out from
54      all the includes/defines below.)
55   pvalloc(size_t n);
56      Equivalent to valloc(minimum-page-that-holds(n)), that is,
57      round up n to nearest pagesize.
58   calloc(size_t unit, size_t quantity);
59      Returns a pointer to quantity * unit bytes, with all locations
60      set to zero.
61   cfree(Void_t* p);
62      Equivalent to free(p).
63   malloc_trim(size_t pad);
64      Release all but pad bytes of freed top-most memory back
65      to the system. Return 1 if successful, else 0.
66   malloc_usable_size(Void_t* p);
67      Report the number usable allocated bytes associated with allocated
68      chunk p. This may or may not report more bytes than were requested,
69      due to alignment and minimum size constraints.
70   malloc_stats();
71      Prints brief summary statistics on stderr.
72   mallinfo()
73      Returns (by copy) a struct containing various summary statistics.
74   mallopt(int parameter_number, int parameter_value)
75      Changes one of the tunable parameters described below. Returns
76      1 if successful in changing the parameter, else 0.
77 
78 * Vital statistics:
79 
80   Alignment:                            8-byte
81        8 byte alignment is currently hardwired into the design.  This
82        seems to suffice for all current machines and C compilers.
83 
84   Assumed pointer representation:       4 or 8 bytes
85        Code for 8-byte pointers is untested by me but has worked
86        reliably by Wolfram Gloger, who contributed most of the
87        changes supporting this.
88 
89   Assumed size_t  representation:       4 or 8 bytes
90        Note that size_t is allowed to be 4 bytes even if pointers are 8.
91 
92   Minimum overhead per allocated chunk: 4 or 8 bytes
93        Each malloced chunk has a hidden overhead of 4 bytes holding size
94        and status information.
95 
96   Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
97                           8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
98 
99        When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
100        ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
101        needed; 4 (8) for a trailing size field
102        and 8 (16) bytes for free list pointers. Thus, the minimum
103        allocatable size is 16/24/32 bytes.
104 
105        Even a request for zero bytes (i.e., malloc(0)) returns a
106        pointer to something of the minimum allocatable size.
107 
108   Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
109                           8-byte size_t: 2^63 - 16 bytes
110 
111        It is assumed that (possibly signed) size_t bit values suffice to
112        represent chunk sizes. `Possibly signed' is due to the fact
113        that `size_t' may be defined on a system as either a signed or
114        an unsigned type. To be conservative, values that would appear
115        as negative numbers are avoided.
116        Requests for sizes with a negative sign bit will return a
117        minimum-sized chunk.
118 
119   Maximum overhead wastage per allocated chunk: normally 15 bytes
120 
121        Alignnment demands, plus the minimum allocatable size restriction
122        make the normal worst-case wastage 15 bytes (i.e., up to 15
123        more bytes will be allocated than were requested in malloc), with
124        two exceptions:
125          1. Because requests for zero bytes allocate non-zero space,
126             the worst case wastage for a request of zero bytes is 24 bytes.
127          2. For requests >= mmap_threshold that are serviced via
128             mmap(), the worst case wastage is 8 bytes plus the remainder
129             from a system page (the minimal mmap unit); typically 4096 bytes.
130 
131 * Limitations
132 
133     Here are some features that are NOT currently supported
134 
135     * No user-definable hooks for callbacks and the like.
136     * No automated mechanism for fully checking that all accesses
137       to malloced memory stay within their bounds.
138     * No support for compaction.
139 
140 * Synopsis of compile-time options:
141 
142     People have reported using previous versions of this malloc on all
143     versions of Unix, sometimes by tweaking some of the defines
144     below. It has been tested most extensively on Solaris and
145     Linux. It is also reported to work on WIN32 platforms.
146     People have also reported adapting this malloc for use in
147     stand-alone embedded systems.
148 
149     The implementation is in straight, hand-tuned ANSI C.  Among other
150     consequences, it uses a lot of macros.  Because of this, to be at
151     all usable, this code should be compiled using an optimizing compiler
152     (for example gcc -O2) that can simplify expressions and control
153     paths.
154 
155   __STD_C                  (default: derived from C compiler defines)
156      Nonzero if using ANSI-standard C compiler, a C++ compiler, or
157      a C compiler sufficiently close to ANSI to get away with it.
158   DEBUG                    (default: NOT defined)
159      Define to enable debugging. Adds fairly extensive assertion-based
160      checking to help track down memory errors, but noticeably slows down
161      execution.
162   SEPARATE_OBJECTS	   (default: NOT defined)
163      Define this to compile into separate .o files.  You must then
164      compile malloc.c several times, defining a DEFINE_* macro each
165      time.  The list of DEFINE_* macros appears below.
166   MALLOC_LOCK		   (default: NOT defined)
167   MALLOC_UNLOCK		   (default: NOT defined)
168      Define these to C expressions which are run to lock and unlock
169      the malloc data structures.  Calls may be nested; that is,
170      MALLOC_LOCK may be called more than once before the corresponding
171      MALLOC_UNLOCK calls.  MALLOC_LOCK must avoid waiting for a lock
172      that it already holds.
173   MALLOC_ALIGNMENT          (default: NOT defined)
174      Define this to 16 if you need 16 byte alignment instead of 8 byte alignment
175      which is the normal default.
176   REALLOC_ZERO_BYTES_FREES (default: NOT defined)
177      Define this if you think that realloc(p, 0) should be equivalent
178      to free(p). Otherwise, since malloc returns a unique pointer for
179      malloc(0), so does realloc(p, 0).
180   HAVE_MEMCPY               (default: defined)
181      Define if you are not otherwise using ANSI STD C, but still
182      have memcpy and memset in your C library and want to use them.
183      Otherwise, simple internal versions are supplied.
184   USE_MEMCPY               (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
185      Define as 1 if you want the C library versions of memset and
186      memcpy called in realloc and calloc (otherwise macro versions are used).
187      At least on some platforms, the simple macro versions usually
188      outperform libc versions.
189   HAVE_MMAP                 (default: defined as 1)
190      Define to non-zero to optionally make malloc() use mmap() to
191      allocate very large blocks.
192   HAVE_MREMAP                 (default: defined as 0 unless Linux libc set)
193      Define to non-zero to optionally make realloc() use mremap() to
194      reallocate very large blocks.
195   malloc_getpagesize        (default: derived from system #includes)
196      Either a constant or routine call returning the system page size.
197   HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
198      Optionally define if you are on a system with a /usr/include/malloc.h
199      that declares struct mallinfo. It is not at all necessary to
200      define this even if you do, but will ensure consistency.
201   INTERNAL_SIZE_T           (default: size_t)
202      Define to a 32-bit type (probably `unsigned int') if you are on a
203      64-bit machine, yet do not want or need to allow malloc requests of
204      greater than 2^31 to be handled. This saves space, especially for
205      very small chunks.
206   INTERNAL_LINUX_C_LIB      (default: NOT defined)
207      Defined only when compiled as part of Linux libc.
208      Also note that there is some odd internal name-mangling via defines
209      (for example, internally, `malloc' is named `mALLOc') needed
210      when compiling in this case. These look funny but don't otherwise
211      affect anything.
212   INTERNAL_NEWLIB	    (default: NOT defined)
213      Defined only when compiled as part of the Cygnus newlib
214      distribution.
215   WIN32                     (default: undefined)
216      Define this on MS win (95, nt) platforms to compile in sbrk emulation.
217   LACKS_UNISTD_H            (default: undefined)
218      Define this if your system does not have a <unistd.h>.
219   MORECORE                  (default: sbrk)
220      The name of the routine to call to obtain more memory from the system.
221   MORECORE_FAILURE          (default: -1)
222      The value returned upon failure of MORECORE.
223   MORECORE_CLEARS           (default 1)
224      True (1) if the routine mapped to MORECORE zeroes out memory (which
225      holds for sbrk).
226   DEFAULT_TRIM_THRESHOLD
227   DEFAULT_TOP_PAD
228   DEFAULT_MMAP_THRESHOLD
229   DEFAULT_MMAP_MAX
230      Default values of tunable parameters (described in detail below)
231      controlling interaction with host system routines (sbrk, mmap, etc).
232      These values may also be changed dynamically via mallopt(). The
233      preset defaults are those that give best performance for typical
234      programs/systems.
235 
236 
237 */
238 
239 
240 
241 
242 /* Preliminaries */
243 
244 #ifndef __STD_C
245 #ifdef __STDC__
246 #define __STD_C     1
247 #else
248 #if __cplusplus
249 #define __STD_C     1
250 #else
251 #define __STD_C     0
252 #endif /*__cplusplus*/
253 #endif /*__STDC__*/
254 #endif /*__STD_C*/
255 
256 #ifndef Void_t
257 #if __STD_C
258 #define Void_t      void
259 #else
260 #define Void_t      char
261 #endif
262 #endif /*Void_t*/
263 
264 #if __STD_C
265 #include <stddef.h>   /* for size_t */
266 #else
267 #include <sys/types.h>
268 #endif
269 
270 #ifdef __cplusplus
271 extern "C" {
272 #endif
273 
274 #include <stdio.h>    /* needed for malloc_stats */
275 #include <limits.h>   /* needed for overflow checks */
276 #include <errno.h>    /* needed to set errno to ENOMEM */
277 
278 #ifdef WIN32
279 #define WIN32_LEAN_AND_MEAN
280 #include <windows.h>
281 #endif
282 
283 /*
284   Compile-time options
285 */
286 
287 
288 /*
289 
290   Special defines for Cygnus newlib distribution.
291 
292  */
293 
294 #ifdef INTERNAL_NEWLIB
295 
296 #include <sys/config.h>
297 
298 /*
299   In newlib, all the publically visible routines take a reentrancy
300   pointer.  We don't currently do anything much with it, but we do
301   pass it to the lock routine.
302  */
303 
304 #include <reent.h>
305 
306 #define POINTER_UINT unsigned _POINTER_INT
307 #define SEPARATE_OBJECTS
308 #define HAVE_MMAP 0
309 #define MORECORE(size) _sbrk_r(reent_ptr, (size))
310 #define MORECORE_CLEARS 0
311 #define MALLOC_LOCK __malloc_lock(reent_ptr)
312 #define MALLOC_UNLOCK __malloc_unlock(reent_ptr)
313 
314 #ifdef __CYGWIN__
315 # undef _WIN32
316 # undef WIN32
317 #endif
318 
319 #ifndef _WIN32
320 #ifdef SMALL_MEMORY
321 #define malloc_getpagesize (128)
322 #else
323 #define malloc_getpagesize (4096)
324 #endif
325 #endif
326 
327 #if __STD_C
328 extern void __malloc_lock(struct _reent *);
329 extern void __malloc_unlock(struct _reent *);
330 #else
331 extern void __malloc_lock();
332 extern void __malloc_unlock();
333 #endif
334 
335 #if __STD_C
336 #define RARG struct _reent *reent_ptr,
337 #define RONEARG struct _reent *reent_ptr
338 #else
339 #define RARG reent_ptr
340 #define RONEARG reent_ptr
341 #define RDECL struct _reent *reent_ptr;
342 #endif
343 
344 #define RERRNO reent_ptr->_errno
345 #define RCALL reent_ptr,
346 #define RONECALL reent_ptr
347 
348 #else /* ! INTERNAL_NEWLIB */
349 
350 #define POINTER_UINT unsigned long
351 #define RARG
352 #define RONEARG
353 #define RDECL
354 #define RERRNO errno
355 #define RCALL
356 #define RONECALL
357 
358 #endif /* ! INTERNAL_NEWLIB */
359 
360 /*
361     Debugging:
362 
363     Because freed chunks may be overwritten with link fields, this
364     malloc will often die when freed memory is overwritten by user
365     programs.  This can be very effective (albeit in an annoying way)
366     in helping track down dangling pointers.
367 
368     If you compile with -DDEBUG, a number of assertion checks are
369     enabled that will catch more memory errors. You probably won't be
370     able to make much sense of the actual assertion errors, but they
371     should help you locate incorrectly overwritten memory.  The
372     checking is fairly extensive, and will slow down execution
373     noticeably. Calling malloc_stats or mallinfo with DEBUG set will
374     attempt to check every non-mmapped allocated and free chunk in the
375     course of computing the summmaries. (By nature, mmapped regions
376     cannot be checked very much automatically.)
377 
378     Setting DEBUG may also be helpful if you are trying to modify
379     this code. The assertions in the check routines spell out in more
380     detail the assumptions and invariants underlying the algorithms.
381 
382 */
383 
384 #if DEBUG
385 #include <assert.h>
386 #else
387 #define assert(x) ((void)0)
388 #endif
389 
390 
391 /*
392   SEPARATE_OBJECTS should be defined if you want each function to go
393   into a separate .o file.  You must then compile malloc.c once per
394   function, defining the appropriate DEFINE_ macro.  See below for the
395   list of macros.
396  */
397 
398 #ifndef SEPARATE_OBJECTS
399 #define DEFINE_MALLOC
400 #define DEFINE_FREE
401 #define DEFINE_REALLOC
402 #define DEFINE_CALLOC
403 #define DEFINE_CFREE
404 #define DEFINE_MEMALIGN
405 #define DEFINE_VALLOC
406 #define DEFINE_PVALLOC
407 #define DEFINE_MALLINFO
408 #define DEFINE_MALLOC_STATS
409 #define DEFINE_MALLOC_USABLE_SIZE
410 #define DEFINE_MALLOPT
411 
412 #define STATIC static
413 #else
414 #define STATIC
415 #endif
416 
417 /*
418    Define MALLOC_LOCK and MALLOC_UNLOCK to C expressions to run to
419    lock and unlock the malloc data structures.  MALLOC_LOCK may be
420    called recursively.
421  */
422 
423 #ifndef MALLOC_LOCK
424 #define MALLOC_LOCK
425 #endif
426 
427 #ifndef MALLOC_UNLOCK
428 #define MALLOC_UNLOCK
429 #endif
430 
431 /*
432   INTERNAL_SIZE_T is the word-size used for internal bookkeeping
433   of chunk sizes. On a 64-bit machine, you can reduce malloc
434   overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
435   at the expense of not being able to handle requests greater than
436   2^31. This limitation is hardly ever a concern; you are encouraged
437   to set this. However, the default version is the same as size_t.
438 */
439 
440 #ifndef INTERNAL_SIZE_T
441 #define INTERNAL_SIZE_T size_t
442 #endif
443 
444 /*
445   Following is needed on implementations whereby long > size_t.
446   The problem is caused because the code performs subtractions of
447   size_t values and stores the result in long values.  In the case
448   where long > size_t and the first value is actually less than
449   the second value, the resultant value is positive.  For example,
450   (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF
451   which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF.  This is due to the
452   fact that assignment from unsigned to signed won't sign extend.
453 */
454 
455 #define long_sub_size_t(x, y)				\
456   (sizeof (long) > sizeof (INTERNAL_SIZE_T) && x < y	\
457    ? -(long) (y - x)					\
458    : (long) (x - y))
459 
460 /*
461   REALLOC_ZERO_BYTES_FREES should be set if a call to
462   realloc with zero bytes should be the same as a call to free.
463   Some people think it should. Otherwise, since this malloc
464   returns a unique pointer for malloc(0), so does realloc(p, 0).
465 */
466 
467 
468 /*   #define REALLOC_ZERO_BYTES_FREES */
469 
470 
471 /*
472   WIN32 causes an emulation of sbrk to be compiled in
473   mmap-based options are not currently supported in WIN32.
474 */
475 
476 /* #define WIN32 */
477 #ifdef WIN32
478 #define MORECORE wsbrk
479 #define HAVE_MMAP 0
480 #endif
481 
482 
483 /*
484   HAVE_MEMCPY should be defined if you are not otherwise using
485   ANSI STD C, but still have memcpy and memset in your C library
486   and want to use them in calloc and realloc. Otherwise simple
487   macro versions are defined here.
488 
489   USE_MEMCPY should be defined as 1 if you actually want to
490   have memset and memcpy called. People report that the macro
491   versions are often enough faster than libc versions on many
492   systems that it is better to use them.
493 
494 */
495 
496 #define HAVE_MEMCPY
497 
498 /* Although the original macro is called USE_MEMCPY, newlib actually
499    uses memmove to handle cases whereby a platform's memcpy implementation
500    copies backwards and thus destructive overlap may occur in realloc
501    whereby we are reclaiming free memory prior to the old allocation.  */
502 #ifndef USE_MEMCPY
503 #ifdef HAVE_MEMCPY
504 #define USE_MEMCPY 1
505 #else
506 #define USE_MEMCPY 0
507 #endif
508 #endif
509 
510 #if (__STD_C || defined(HAVE_MEMCPY))
511 
512 #if __STD_C
513 void* memset(void*, int, size_t);
514 void* memcpy(void*, const void*, size_t);
515 void* memmove(void*, const void*, size_t);
516 #else
517 Void_t* memset();
518 Void_t* memcpy();
519 Void_t* memmove();
520 #endif
521 #endif
522 
523 #if USE_MEMCPY
524 
525 /* The following macros are only invoked with (2n+1)-multiples of
526    INTERNAL_SIZE_T units, with a positive integer n. This is exploited
527    for fast inline execution when n is small. */
528 
529 #define MALLOC_ZERO(charp, nbytes)                                            \
530 do {                                                                          \
531   INTERNAL_SIZE_T mzsz = (nbytes);                                            \
532   if(mzsz <= 9*sizeof(mzsz)) {                                                \
533     INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \
534     if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
535                                      *mz++ = 0;                               \
536       if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
537                                      *mz++ = 0;                               \
538         if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
539                                      *mz++ = 0; }}}                           \
540                                      *mz++ = 0;                               \
541                                      *mz++ = 0;                               \
542                                      *mz   = 0;                               \
543   } else memset((charp), 0, mzsz);                                            \
544 } while(0)
545 
546 #define MALLOC_COPY(dest,src,nbytes)                                          \
547 do {                                                                          \
548   INTERNAL_SIZE_T mcsz = (nbytes);                                            \
549   if(mcsz <= 9*sizeof(mcsz)) {                                                \
550     INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
551     INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
552     if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
553                                      *mcdst++ = *mcsrc++;                     \
554       if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
555                                      *mcdst++ = *mcsrc++;                     \
556         if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
557                                      *mcdst++ = *mcsrc++; }}}                 \
558                                      *mcdst++ = *mcsrc++;                     \
559                                      *mcdst++ = *mcsrc++;                     \
560                                      *mcdst   = *mcsrc  ;                     \
561   } else memmove(dest, src, mcsz);                                             \
562 } while(0)
563 
564 #else /* !USE_MEMCPY */
565 
566 /* Use Duff's device for good zeroing/copying performance. */
567 
568 #define MALLOC_ZERO(charp, nbytes)                                            \
569 do {                                                                          \
570   INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
571   long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
572   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
573   switch (mctmp) {                                                            \
574     case 0: for(;;) { *mzp++ = 0;                                             \
575     case 7:           *mzp++ = 0;                                             \
576     case 6:           *mzp++ = 0;                                             \
577     case 5:           *mzp++ = 0;                                             \
578     case 4:           *mzp++ = 0;                                             \
579     case 3:           *mzp++ = 0;                                             \
580     case 2:           *mzp++ = 0;                                             \
581     case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
582   }                                                                           \
583 } while(0)
584 
585 #define MALLOC_COPY(dest,src,nbytes)                                          \
586 do {                                                                          \
587   INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
588   INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
589   long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
590   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
591   switch (mctmp) {                                                            \
592     case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
593     case 7:           *mcdst++ = *mcsrc++;                                    \
594     case 6:           *mcdst++ = *mcsrc++;                                    \
595     case 5:           *mcdst++ = *mcsrc++;                                    \
596     case 4:           *mcdst++ = *mcsrc++;                                    \
597     case 3:           *mcdst++ = *mcsrc++;                                    \
598     case 2:           *mcdst++ = *mcsrc++;                                    \
599     case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
600   }                                                                           \
601 } while(0)
602 
603 #endif
604 
605 
606 /*
607   Define HAVE_MMAP to optionally make malloc() use mmap() to
608   allocate very large blocks.  These will be returned to the
609   operating system immediately after a free().
610 */
611 
612 #ifndef HAVE_MMAP
613 #define HAVE_MMAP 1
614 #endif
615 
616 /*
617   Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
618   large blocks.  This is currently only possible on Linux with
619   kernel versions newer than 1.3.77.
620 */
621 
622 #ifndef HAVE_MREMAP
623 #ifdef INTERNAL_LINUX_C_LIB
624 #define HAVE_MREMAP 1
625 #else
626 #define HAVE_MREMAP 0
627 #endif
628 #endif
629 
630 #if HAVE_MMAP
631 
632 #include <unistd.h>
633 #include <fcntl.h>
634 #include <sys/mman.h>
635 
636 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
637 #define MAP_ANONYMOUS MAP_ANON
638 #endif
639 
640 #endif /* HAVE_MMAP */
641 
642 /*
643   Access to system page size. To the extent possible, this malloc
644   manages memory from the system in page-size units.
645 
646   The following mechanics for getpagesize were adapted from
647   bsd/gnu getpagesize.h
648 */
649 
650 #ifndef LACKS_UNISTD_H
651 #  include <unistd.h>
652 #endif
653 
654 #ifndef malloc_getpagesize
655 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
656 #    ifndef _SC_PAGE_SIZE
657 #      define _SC_PAGE_SIZE _SC_PAGESIZE
658 #    endif
659 #  endif
660 #  ifdef _SC_PAGE_SIZE
661 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
662 #  else
663 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
664        extern size_t getpagesize();
665 #      define malloc_getpagesize getpagesize()
666 #    else
667 #      include <sys/param.h>
668 #      ifdef EXEC_PAGESIZE
669 #        define malloc_getpagesize EXEC_PAGESIZE
670 #      else
671 #        ifdef NBPG
672 #          ifndef CLSIZE
673 #            define malloc_getpagesize NBPG
674 #          else
675 #            define malloc_getpagesize (NBPG * CLSIZE)
676 #          endif
677 #        else
678 #          ifdef NBPC
679 #            define malloc_getpagesize NBPC
680 #          else
681 #            ifdef PAGESIZE
682 #              define malloc_getpagesize PAGESIZE
683 #            else
684 #              define malloc_getpagesize (4096) /* just guess */
685 #            endif
686 #          endif
687 #        endif
688 #      endif
689 #    endif
690 #  endif
691 #endif
692 
693 
694 
695 /*
696 
697   This version of malloc supports the standard SVID/XPG mallinfo
698   routine that returns a struct containing the same kind of
699   information you can get from malloc_stats. It should work on
700   any SVID/XPG compliant system that has a /usr/include/malloc.h
701   defining struct mallinfo. (If you'd like to install such a thing
702   yourself, cut out the preliminary declarations as described above
703   and below and save them in a malloc.h file. But there's no
704   compelling reason to bother to do this.)
705 
706   The main declaration needed is the mallinfo struct that is returned
707   (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
708   bunch of fields, most of which are not even meaningful in this
709   version of malloc. Some of these fields are are instead filled by
710   mallinfo() with other numbers that might possibly be of interest.
711 
712   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
713   /usr/include/malloc.h file that includes a declaration of struct
714   mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
715   version is declared below.  These must be precisely the same for
716   mallinfo() to work.
717 
718 */
719 
720 /* #define HAVE_USR_INCLUDE_MALLOC_H */
721 
722 #if HAVE_USR_INCLUDE_MALLOC_H
723 #include "/usr/include/malloc.h"
724 #else
725 
726 /* SVID2/XPG mallinfo structure */
727 
728 struct mallinfo {
729   int arena;    /* total space allocated from system */
730   int ordblks;  /* number of non-inuse chunks */
731   int smblks;   /* unused -- always zero */
732   int hblks;    /* number of mmapped regions */
733   int hblkhd;   /* total space in mmapped regions */
734   int usmblks;  /* unused -- always zero */
735   int fsmblks;  /* unused -- always zero */
736   int uordblks; /* total allocated space */
737   int fordblks; /* total non-inuse space */
738   int keepcost; /* top-most, releasable (via malloc_trim) space */
739 };
740 
741 /* SVID2/XPG mallopt options */
742 
743 #define M_MXFAST  1    /* UNUSED in this malloc */
744 #define M_NLBLKS  2    /* UNUSED in this malloc */
745 #define M_GRAIN   3    /* UNUSED in this malloc */
746 #define M_KEEP    4    /* UNUSED in this malloc */
747 
748 #endif
749 
750 /* mallopt options that actually do something */
751 
752 #define M_TRIM_THRESHOLD    -1
753 #define M_TOP_PAD           -2
754 #define M_MMAP_THRESHOLD    -3
755 #define M_MMAP_MAX          -4
756 
757 
758 
759 #ifndef DEFAULT_TRIM_THRESHOLD
760 #define DEFAULT_TRIM_THRESHOLD (128L * 1024L)
761 #endif
762 
763 /*
764     M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
765       to keep before releasing via malloc_trim in free().
766 
767       Automatic trimming is mainly useful in long-lived programs.
768       Because trimming via sbrk can be slow on some systems, and can
769       sometimes be wasteful (in cases where programs immediately
770       afterward allocate more large chunks) the value should be high
771       enough so that your overall system performance would improve by
772       releasing.
773 
774       The trim threshold and the mmap control parameters (see below)
775       can be traded off with one another. Trimming and mmapping are
776       two different ways of releasing unused memory back to the
777       system. Between these two, it is often possible to keep
778       system-level demands of a long-lived program down to a bare
779       minimum. For example, in one test suite of sessions measuring
780       the XF86 X server on Linux, using a trim threshold of 128K and a
781       mmap threshold of 192K led to near-minimal long term resource
782       consumption.
783 
784       If you are using this malloc in a long-lived program, it should
785       pay to experiment with these values.  As a rough guide, you
786       might set to a value close to the average size of a process
787       (program) running on your system.  Releasing this much memory
788       would allow such a process to run in memory.  Generally, it's
789       worth it to tune for trimming rather tham memory mapping when a
790       program undergoes phases where several large chunks are
791       allocated and released in ways that can reuse each other's
792       storage, perhaps mixed with phases where there are no such
793       chunks at all.  And in well-behaved long-lived programs,
794       controlling release of large blocks via trimming versus mapping
795       is usually faster.
796 
797       However, in most programs, these parameters serve mainly as
798       protection against the system-level effects of carrying around
799       massive amounts of unneeded memory. Since frequent calls to
800       sbrk, mmap, and munmap otherwise degrade performance, the default
801       parameters are set to relatively high values that serve only as
802       safeguards.
803 
804       The default trim value is high enough to cause trimming only in
805       fairly extreme (by current memory consumption standards) cases.
806       It must be greater than page size to have any useful effect.  To
807       disable trimming completely, you can set to (unsigned long)(-1);
808 
809 
810 */
811 
812 
813 #ifndef DEFAULT_TOP_PAD
814 #define DEFAULT_TOP_PAD        (0)
815 #endif
816 
817 /*
818     M_TOP_PAD is the amount of extra `padding' space to allocate or
819       retain whenever sbrk is called. It is used in two ways internally:
820 
821       * When sbrk is called to extend the top of the arena to satisfy
822         a new malloc request, this much padding is added to the sbrk
823         request.
824 
825       * When malloc_trim is called automatically from free(),
826         it is used as the `pad' argument.
827 
828       In both cases, the actual amount of padding is rounded
829       so that the end of the arena is always a system page boundary.
830 
831       The main reason for using padding is to avoid calling sbrk so
832       often. Having even a small pad greatly reduces the likelihood
833       that nearly every malloc request during program start-up (or
834       after trimming) will invoke sbrk, which needlessly wastes
835       time.
836 
837       Automatic rounding-up to page-size units is normally sufficient
838       to avoid measurable overhead, so the default is 0.  However, in
839       systems where sbrk is relatively slow, it can pay to increase
840       this value, at the expense of carrying around more memory than
841       the program needs.
842 
843 */
844 
845 
846 #ifndef DEFAULT_MMAP_THRESHOLD
847 #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
848 #endif
849 
850 /*
851 
852     M_MMAP_THRESHOLD is the request size threshold for using mmap()
853       to service a request. Requests of at least this size that cannot
854       be allocated using already-existing space will be serviced via mmap.
855       (If enough normal freed space already exists it is used instead.)
856 
857       Using mmap segregates relatively large chunks of memory so that
858       they can be individually obtained and released from the host
859       system. A request serviced through mmap is never reused by any
860       other request (at least not directly; the system may just so
861       happen to remap successive requests to the same locations).
862 
863       Segregating space in this way has the benefit that mmapped space
864       can ALWAYS be individually released back to the system, which
865       helps keep the system level memory demands of a long-lived
866       program low. Mapped memory can never become `locked' between
867       other chunks, as can happen with normally allocated chunks, which
868       menas that even trimming via malloc_trim would not release them.
869 
870       However, it has the disadvantages that:
871 
872          1. The space cannot be reclaimed, consolidated, and then
873             used to service later requests, as happens with normal chunks.
874          2. It can lead to more wastage because of mmap page alignment
875             requirements
876          3. It causes malloc performance to be more dependent on host
877             system memory management support routines which may vary in
878             implementation quality and may impose arbitrary
879             limitations. Generally, servicing a request via normal
880             malloc steps is faster than going through a system's mmap.
881 
882       All together, these considerations should lead you to use mmap
883       only for relatively large requests.
884 
885 
886 */
887 
888 
889 
890 #ifndef DEFAULT_MMAP_MAX
891 #if HAVE_MMAP
892 #define DEFAULT_MMAP_MAX       (64)
893 #else
894 #define DEFAULT_MMAP_MAX       (0)
895 #endif
896 #endif
897 
898 /*
899     M_MMAP_MAX is the maximum number of requests to simultaneously
900       service using mmap. This parameter exists because:
901 
902          1. Some systems have a limited number of internal tables for
903             use by mmap.
904          2. In most systems, overreliance on mmap can degrade overall
905             performance.
906          3. If a program allocates many large regions, it is probably
907             better off using normal sbrk-based allocation routines that
908             can reclaim and reallocate normal heap memory. Using a
909             small value allows transition into this mode after the
910             first few allocations.
911 
912       Setting to 0 disables all use of mmap.  If HAVE_MMAP is not set,
913       the default value is 0, and attempts to set it to non-zero values
914       in mallopt will fail.
915 */
916 
917 
918 
919 
920 /*
921 
922   Special defines for linux libc
923 
924   Except when compiled using these special defines for Linux libc
925   using weak aliases, this malloc is NOT designed to work in
926   multithreaded applications.  No semaphores or other concurrency
927   control are provided to ensure that multiple malloc or free calls
928   don't run at the same time, which could be disasterous. A single
929   semaphore could be used across malloc, realloc, and free (which is
930   essentially the effect of the linux weak alias approach). It would
931   be hard to obtain finer granularity.
932 
933 */
934 
935 
936 #ifdef INTERNAL_LINUX_C_LIB
937 
938 #if __STD_C
939 
940 Void_t * __default_morecore_init (ptrdiff_t);
941 Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init;
942 
943 #else
944 
945 Void_t * __default_morecore_init ();
946 Void_t *(*__morecore)() = __default_morecore_init;
947 
948 #endif
949 
950 #define MORECORE (*__morecore)
951 #define MORECORE_FAILURE 0
952 #define MORECORE_CLEARS 1
953 
954 #else /* INTERNAL_LINUX_C_LIB */
955 
956 #ifndef INTERNAL_NEWLIB
957 #if __STD_C
958 extern Void_t*     sbrk(ptrdiff_t);
959 #else
960 extern Void_t*     sbrk();
961 #endif
962 #endif
963 
964 #ifndef MORECORE
965 #define MORECORE sbrk
966 #endif
967 
968 #ifndef MORECORE_FAILURE
969 #define MORECORE_FAILURE -1
970 #endif
971 
972 #ifndef MORECORE_CLEARS
973 #define MORECORE_CLEARS 1
974 #endif
975 
976 #endif /* INTERNAL_LINUX_C_LIB */
977 
978 #if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__)
979 
980 #define cALLOc		__libc_calloc
981 #define fREe		__libc_free
982 #define mALLOc		__libc_malloc
983 #define mEMALIGn	__libc_memalign
984 #define rEALLOc		__libc_realloc
985 #define vALLOc		__libc_valloc
986 #define pvALLOc		__libc_pvalloc
987 #define mALLINFo	__libc_mallinfo
988 #define mALLOPt		__libc_mallopt
989 
990 #pragma weak calloc = __libc_calloc
991 #pragma weak free = __libc_free
992 #pragma weak cfree = __libc_free
993 #pragma weak malloc = __libc_malloc
994 #pragma weak memalign = __libc_memalign
995 #pragma weak realloc = __libc_realloc
996 #pragma weak valloc = __libc_valloc
997 #pragma weak pvalloc = __libc_pvalloc
998 #pragma weak mallinfo = __libc_mallinfo
999 #pragma weak mallopt = __libc_mallopt
1000 
1001 #else
1002 
1003 #ifdef INTERNAL_NEWLIB
1004 
1005 #define cALLOc		_calloc_r
1006 #define fREe		_free_r
1007 #define mALLOc		_malloc_r
1008 #define mEMALIGn	_memalign_r
1009 #define rEALLOc		_realloc_r
1010 #define vALLOc		_valloc_r
1011 #define pvALLOc		_pvalloc_r
1012 #define mALLINFo	_mallinfo_r
1013 #define mALLOPt		_mallopt_r
1014 
1015 #define malloc_stats			_malloc_stats_r
1016 #define malloc_trim			_malloc_trim_r
1017 #define malloc_usable_size		_malloc_usable_size_r
1018 
1019 #define malloc_update_mallinfo		__malloc_update_mallinfo
1020 
1021 #define malloc_av_			__malloc_av_
1022 #define malloc_current_mallinfo		__malloc_current_mallinfo
1023 #define malloc_max_sbrked_mem		__malloc_max_sbrked_mem
1024 #define malloc_max_total_mem		__malloc_max_total_mem
1025 #define malloc_sbrk_base		__malloc_sbrk_base
1026 #define malloc_top_pad			__malloc_top_pad
1027 #define malloc_trim_threshold		__malloc_trim_threshold
1028 
1029 #else /* ! INTERNAL_NEWLIB */
1030 
1031 #define cALLOc		calloc
1032 #define fREe		free
1033 #define mALLOc		malloc
1034 #define mEMALIGn	memalign
1035 #define rEALLOc		realloc
1036 #define vALLOc		valloc
1037 #define pvALLOc		pvalloc
1038 #define mALLINFo	mallinfo
1039 #define mALLOPt		mallopt
1040 
1041 #endif /* ! INTERNAL_NEWLIB */
1042 #endif
1043 
1044 /* Public routines */
1045 
1046 #if __STD_C
1047 
1048 Void_t* mALLOc(RARG size_t);
1049 void    fREe(RARG Void_t*);
1050 Void_t* rEALLOc(RARG Void_t*, size_t);
1051 Void_t* mEMALIGn(RARG size_t, size_t);
1052 Void_t* vALLOc(RARG size_t);
1053 Void_t* pvALLOc(RARG size_t);
1054 Void_t* cALLOc(RARG size_t, size_t);
1055 void    cfree(Void_t*);
1056 int     malloc_trim(RARG size_t);
1057 size_t  malloc_usable_size(RARG Void_t*);
1058 void    malloc_stats(RONEARG);
1059 int     mALLOPt(RARG int, int);
1060 struct mallinfo mALLINFo(RONEARG);
1061 #else
1062 Void_t* mALLOc();
1063 void    fREe();
1064 Void_t* rEALLOc();
1065 Void_t* mEMALIGn();
1066 Void_t* vALLOc();
1067 Void_t* pvALLOc();
1068 Void_t* cALLOc();
1069 void    cfree();
1070 int     malloc_trim();
1071 size_t  malloc_usable_size();
1072 void    malloc_stats();
1073 int     mALLOPt();
1074 struct mallinfo mALLINFo();
1075 #endif
1076 
1077 
1078 #ifdef __cplusplus
1079 };  /* end of extern "C" */
1080 #endif
1081 
1082 /* ---------- To make a malloc.h, end cutting here ------------ */
1083 
1084 
1085 /*
1086   Emulation of sbrk for WIN32
1087   All code within the ifdef WIN32 is untested by me.
1088 */
1089 
1090 
1091 #ifdef WIN32
1092 
1093 #define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
1094 ~(malloc_getpagesize-1))
1095 
1096 /* resrve 64MB to insure large contiguous space */
1097 #define RESERVED_SIZE (1024*1024*64)
1098 #define NEXT_SIZE (2048*1024)
1099 #define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
1100 
1101 struct GmListElement;
1102 typedef struct GmListElement GmListElement;
1103 
1104 struct GmListElement
1105 {
1106 	GmListElement* next;
1107 	void* base;
1108 };
1109 
1110 static GmListElement* head = 0;
1111 static unsigned int gNextAddress = 0;
1112 static unsigned int gAddressBase = 0;
1113 static unsigned int gAllocatedSize = 0;
1114 
1115 static
makeGmListElement(void * bas)1116 GmListElement* makeGmListElement (void* bas)
1117 {
1118 	GmListElement* this;
1119 	this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
1120 	ASSERT (this);
1121 	if (this)
1122 	{
1123 		this->base = bas;
1124 		this->next = head;
1125 		head = this;
1126 	}
1127 	return this;
1128 }
1129 
gcleanup()1130 void gcleanup ()
1131 {
1132 	BOOL rval;
1133 	ASSERT ( (head == NULL) || (head->base == (void*)gAddressBase));
1134 	if (gAddressBase && (gNextAddress - gAddressBase))
1135 	{
1136 		rval = VirtualFree ((void*)gAddressBase,
1137 							gNextAddress - gAddressBase,
1138 							MEM_DECOMMIT);
1139         ASSERT (rval);
1140 	}
1141 	while (head)
1142 	{
1143 		GmListElement* next = head->next;
1144 		rval = VirtualFree (head->base, 0, MEM_RELEASE);
1145 		ASSERT (rval);
1146 		LocalFree (head);
1147 		head = next;
1148 	}
1149 }
1150 
1151 static
findRegion(void * start_address,unsigned long size)1152 void* findRegion (void* start_address, unsigned long size)
1153 {
1154 	MEMORY_BASIC_INFORMATION info;
1155 	while ((unsigned long)start_address < TOP_MEMORY)
1156 	{
1157 		VirtualQuery (start_address, &info, sizeof (info));
1158 		if (info.State != MEM_FREE)
1159 			start_address = (char*)info.BaseAddress + info.RegionSize;
1160 		else if (info.RegionSize >= size)
1161 			return start_address;
1162 		else
1163 			start_address = (char*)info.BaseAddress + info.RegionSize;
1164 	}
1165 	return NULL;
1166 
1167 }
1168 
1169 
wsbrk(long size)1170 void* wsbrk (long size)
1171 {
1172 	void* tmp;
1173 	if (size > 0)
1174 	{
1175 		if (gAddressBase == 0)
1176 		{
1177 			gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
1178 			gNextAddress = gAddressBase =
1179 				(unsigned int)VirtualAlloc (NULL, gAllocatedSize,
1180 											MEM_RESERVE, PAGE_NOACCESS);
1181 		} else if (AlignPage (gNextAddress + size) > (gAddressBase +
1182 gAllocatedSize))
1183 		{
1184 			long new_size = max (NEXT_SIZE, AlignPage (size));
1185 			void* new_address = (void*)(gAddressBase+gAllocatedSize);
1186 			do
1187 			{
1188 				new_address = findRegion (new_address, new_size);
1189 
1190 				if (new_address == 0)
1191 					return (void*)-1;
1192 
1193 				gAddressBase = gNextAddress =
1194 					(unsigned int)VirtualAlloc (new_address, new_size,
1195 												MEM_RESERVE, PAGE_NOACCESS);
1196 				// repeat in case of race condition
1197 				// The region that we found has been snagged
1198 				// by another thread
1199 			}
1200 			while (gAddressBase == 0);
1201 
1202 			ASSERT (new_address == (void*)gAddressBase);
1203 
1204 			gAllocatedSize = new_size;
1205 
1206 			if (!makeGmListElement ((void*)gAddressBase))
1207 				return (void*)-1;
1208 		}
1209 		if ((size + gNextAddress) > AlignPage (gNextAddress))
1210 		{
1211 			void* res;
1212 			res = VirtualAlloc ((void*)AlignPage (gNextAddress),
1213 								(size + gNextAddress -
1214 								 AlignPage (gNextAddress)),
1215 								MEM_COMMIT, PAGE_READWRITE);
1216 			if (res == 0)
1217 				return (void*)-1;
1218 		}
1219 		tmp = (void*)gNextAddress;
1220 		gNextAddress = (unsigned int)tmp + size;
1221 		return tmp;
1222 	}
1223 	else if (size < 0)
1224 	{
1225 		unsigned int alignedGoal = AlignPage (gNextAddress + size);
1226 		/* Trim by releasing the virtual memory */
1227 		if (alignedGoal >= gAddressBase)
1228 		{
1229 			VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
1230 						 MEM_DECOMMIT);
1231 			gNextAddress = gNextAddress + size;
1232 			return (void*)gNextAddress;
1233 		}
1234 		else
1235 		{
1236 			VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
1237 						 MEM_DECOMMIT);
1238 			gNextAddress = gAddressBase;
1239 			return (void*)-1;
1240 		}
1241 	}
1242 	else
1243 	{
1244 		return (void*)gNextAddress;
1245 	}
1246 }
1247 
1248 #endif
1249 
1250 
1251 
1252 /*
1253   Type declarations
1254 */
1255 
1256 
1257 struct malloc_chunk
1258 {
1259   INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1260   INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */
1261   struct malloc_chunk* fd;   /* double links -- used only if free. */
1262   struct malloc_chunk* bk;
1263 };
1264 
1265 typedef struct malloc_chunk* mchunkptr;
1266 
1267 /*
1268 
1269    malloc_chunk details:
1270 
1271     (The following includes lightly edited explanations by Colin Plumb.)
1272 
1273     Chunks of memory are maintained using a `boundary tag' method as
1274     described in e.g., Knuth or Standish.  (See the paper by Paul
1275     Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1276     survey of such techniques.)  Sizes of free chunks are stored both
1277     in the front of each chunk and at the end.  This makes
1278     consolidating fragmented chunks into bigger chunks very fast.  The
1279     size fields also hold bits representing whether chunks are free or
1280     in use.
1281 
1282     An allocated chunk looks like this:
1283 
1284 
1285     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1286             |             Size of previous chunk, if allocated            | |
1287             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1288             |             Size of chunk, in bytes                         |P|
1289       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1290             |             User data starts here...                          .
1291             .                                                               .
1292             .             (malloc_usable_space() bytes)                     .
1293             .                                                               |
1294 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1295             |             Size of chunk                                     |
1296             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1297 
1298 
1299     Where "chunk" is the front of the chunk for the purpose of most of
1300     the malloc code, but "mem" is the pointer that is returned to the
1301     user.  "Nextchunk" is the beginning of the next contiguous chunk.
1302 
1303     Chunks always begin on even word boundries, so the mem portion
1304     (which is returned to the user) is also on an even word boundary, and
1305     thus double-word aligned.
1306 
1307     Free chunks are stored in circular doubly-linked lists, and look like this:
1308 
1309     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1310             |             Size of previous chunk                            |
1311             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1312     `head:' |             Size of chunk, in bytes                         |P|
1313       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1314             |             Forward pointer to next chunk in list             |
1315             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1316             |             Back pointer to previous chunk in list            |
1317             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1318             |             Unused space (may be 0 bytes long)                .
1319             .                                                               .
1320             .                                                               |
1321 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1322     `foot:' |             Size of chunk, in bytes                           |
1323             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1324 
1325     The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1326     chunk size (which is always a multiple of two words), is an in-use
1327     bit for the *previous* chunk.  If that bit is *clear*, then the
1328     word before the current chunk size contains the previous chunk
1329     size, and can be used to find the front of the previous chunk.
1330     (The very first chunk allocated always has this bit set,
1331     preventing access to non-existent (or non-owned) memory.)
1332 
1333     Note that the `foot' of the current chunk is actually represented
1334     as the prev_size of the NEXT chunk. (This makes it easier to
1335     deal with alignments etc).
1336 
1337     The two exceptions to all this are
1338 
1339      1. The special chunk `top', which doesn't bother using the
1340         trailing size field since there is no
1341         next contiguous chunk that would have to index off it. (After
1342         initialization, `top' is forced to always exist.  If it would
1343         become less than MINSIZE bytes long, it is replenished via
1344         malloc_extend_top.)
1345 
1346      2. Chunks allocated via mmap, which have the second-lowest-order
1347         bit (IS_MMAPPED) set in their size fields.  Because they are
1348         never merged or traversed from any other chunk, they have no
1349         foot size or inuse information.
1350 
1351     Available chunks are kept in any of several places (all declared below):
1352 
1353     * `av': An array of chunks serving as bin headers for consolidated
1354        chunks. Each bin is doubly linked.  The bins are approximately
1355        proportionally (log) spaced.  There are a lot of these bins
1356        (128). This may look excessive, but works very well in
1357        practice.  All procedures maintain the invariant that no
1358        consolidated chunk physically borders another one. Chunks in
1359        bins are kept in size order, with ties going to the
1360        approximately least recently used chunk.
1361 
1362        The chunks in each bin are maintained in decreasing sorted order by
1363        size.  This is irrelevant for the small bins, which all contain
1364        the same-sized chunks, but facilitates best-fit allocation for
1365        larger chunks. (These lists are just sequential. Keeping them in
1366        order almost never requires enough traversal to warrant using
1367        fancier ordered data structures.)  Chunks of the same size are
1368        linked with the most recently freed at the front, and allocations
1369        are taken from the back.  This results in LRU or FIFO allocation
1370        order, which tends to give each chunk an equal opportunity to be
1371        consolidated with adjacent freed chunks, resulting in larger free
1372        chunks and less fragmentation.
1373 
1374     * `top': The top-most available chunk (i.e., the one bordering the
1375        end of available memory) is treated specially. It is never
1376        included in any bin, is used only if no other chunk is
1377        available, and is released back to the system if it is very
1378        large (see M_TRIM_THRESHOLD).
1379 
1380     * `last_remainder': A bin holding only the remainder of the
1381        most recently split (non-top) chunk. This bin is checked
1382        before other non-fitting chunks, so as to provide better
1383        locality for runs of sequentially allocated chunks.
1384 
1385     *  Implicitly, through the host system's memory mapping tables.
1386        If supported, requests greater than a threshold are usually
1387        serviced via calls to mmap, and then later released via munmap.
1388 
1389 */
1390 
1391 
1392 
1393 
1394 
1395 
1396 /*  sizes, alignments */
1397 
1398 #define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
1399 #ifndef MALLOC_ALIGNMENT
1400 #define MALLOC_ALIGN           8
1401 #define MALLOC_ALIGNMENT       (SIZE_SZ < 4 ? 8 : (SIZE_SZ + SIZE_SZ))
1402 #else
1403 #define MALLOC_ALIGN           MALLOC_ALIGNMENT
1404 #endif
1405 #define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
1406 #define MINSIZE                (sizeof(struct malloc_chunk))
1407 
1408 /* conversion from malloc headers to user pointers, and back */
1409 
1410 #define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1411 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1412 
1413 /* pad request bytes into a usable size */
1414 
1415 #define request2size(req) \
1416  (((unsigned long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
1417   (unsigned long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \
1418    (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
1419 
1420 /* Check if m has acceptable alignment */
1421 
1422 #define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1423 
1424 
1425 
1426 
1427 /*
1428   Physical chunk operations
1429 */
1430 
1431 
1432 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1433 
1434 #define PREV_INUSE 0x1
1435 
1436 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1437 
1438 #define IS_MMAPPED 0x2
1439 
1440 /* Bits to mask off when extracting size */
1441 
1442 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
1443 
1444 
1445 /* Ptr to next physical malloc_chunk. */
1446 
1447 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
1448 
1449 /* Ptr to previous physical malloc_chunk */
1450 
1451 #define prev_chunk(p)\
1452    ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1453 
1454 
1455 /* Treat space at ptr + offset as a chunk */
1456 
1457 #define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
1458 
1459 
1460 
1461 
1462 /*
1463   Dealing with use bits
1464 */
1465 
1466 /* extract p's inuse bit */
1467 
1468 #define inuse(p)\
1469 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
1470 
1471 /* extract inuse bit of previous chunk */
1472 
1473 #define prev_inuse(p)  ((p)->size & PREV_INUSE)
1474 
1475 /* check for mmap()'ed chunk */
1476 
1477 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1478 
1479 /* set/clear chunk as in use without otherwise disturbing */
1480 
1481 #define set_inuse(p)\
1482 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
1483 
1484 #define clear_inuse(p)\
1485 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
1486 
1487 /* check/set/clear inuse bits in known places */
1488 
1489 #define inuse_bit_at_offset(p, s)\
1490  (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1491 
1492 #define set_inuse_bit_at_offset(p, s)\
1493  (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1494 
1495 #define clear_inuse_bit_at_offset(p, s)\
1496  (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1497 
1498 
1499 
1500 
1501 /*
1502   Dealing with size fields
1503 */
1504 
1505 /* Get size, ignoring use bits */
1506 
1507 #define chunksize(p)          ((p)->size & ~(SIZE_BITS))
1508 
1509 /* Set size at head, without disturbing its use bit */
1510 
1511 #define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
1512 
1513 /* Set size/use ignoring previous bits in header */
1514 
1515 #define set_head(p, s)        ((p)->size = (s))
1516 
1517 /* Set size at footer (only when chunk is not in use) */
1518 
1519 #define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1520 
1521 
1522 
1523 
1524 
1525 /*
1526    Bins
1527 
1528     The bins, `av_' are an array of pairs of pointers serving as the
1529     heads of (initially empty) doubly-linked lists of chunks, laid out
1530     in a way so that each pair can be treated as if it were in a
1531     malloc_chunk. (This way, the fd/bk offsets for linking bin heads
1532     and chunks are the same).
1533 
1534     Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1535     8 bytes apart. Larger bins are approximately logarithmically
1536     spaced. (See the table below.) The `av_' array is never mentioned
1537     directly in the code, but instead via bin access macros.
1538 
1539     Bin layout:
1540 
1541     64 bins of size       8
1542     32 bins of size      64
1543     16 bins of size     512
1544      8 bins of size    4096
1545      4 bins of size   32768
1546      2 bins of size  262144
1547      1 bin  of size what's left
1548 
1549     There is actually a little bit of slop in the numbers in bin_index
1550     for the sake of speed. This makes no difference elsewhere.
1551 
1552     The special chunks `top' and `last_remainder' get their own bins,
1553     (this is implemented via yet more trickery with the av_ array),
1554     although `top' is never properly linked to its bin since it is
1555     always handled specially.
1556 
1557 */
1558 
1559 #ifdef SEPARATE_OBJECTS
1560 #define av_ malloc_av_
1561 #endif
1562 
1563 #define NAV             128   /* number of bins */
1564 
1565 typedef struct malloc_chunk* mbinptr;
1566 
1567 /* access macros */
1568 
1569 #define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
1570 #define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
1571 #define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
1572 
1573 /*
1574    The first 2 bins are never indexed. The corresponding av_ cells are instead
1575    used for bookkeeping. This is not to save space, but to simplify
1576    indexing, maintain locality, and avoid some initialization tests.
1577 */
1578 
1579 #define top            (bin_at(0)->fd)   /* The topmost chunk */
1580 #define last_remainder (bin_at(1))       /* remainder from last split */
1581 
1582 
1583 /*
1584    Because top initially points to its own bin with initial
1585    zero size, thus forcing extension on the first malloc request,
1586    we avoid having any special code in malloc to check whether
1587    it even exists yet. But we still need to in malloc_extend_top.
1588 */
1589 
1590 #define initial_top    ((mchunkptr)(bin_at(0)))
1591 
1592 /* Helper macro to initialize bins */
1593 
1594 #define IAV(i)  bin_at(i), bin_at(i)
1595 
1596 #ifdef DEFINE_MALLOC
1597 STATIC mbinptr av_[NAV * 2 + 2] = {
1598  0, 0,
1599  IAV(0),   IAV(1),   IAV(2),   IAV(3),   IAV(4),   IAV(5),   IAV(6),   IAV(7),
1600  IAV(8),   IAV(9),   IAV(10),  IAV(11),  IAV(12),  IAV(13),  IAV(14),  IAV(15),
1601  IAV(16),  IAV(17),  IAV(18),  IAV(19),  IAV(20),  IAV(21),  IAV(22),  IAV(23),
1602  IAV(24),  IAV(25),  IAV(26),  IAV(27),  IAV(28),  IAV(29),  IAV(30),  IAV(31),
1603  IAV(32),  IAV(33),  IAV(34),  IAV(35),  IAV(36),  IAV(37),  IAV(38),  IAV(39),
1604  IAV(40),  IAV(41),  IAV(42),  IAV(43),  IAV(44),  IAV(45),  IAV(46),  IAV(47),
1605  IAV(48),  IAV(49),  IAV(50),  IAV(51),  IAV(52),  IAV(53),  IAV(54),  IAV(55),
1606  IAV(56),  IAV(57),  IAV(58),  IAV(59),  IAV(60),  IAV(61),  IAV(62),  IAV(63),
1607  IAV(64),  IAV(65),  IAV(66),  IAV(67),  IAV(68),  IAV(69),  IAV(70),  IAV(71),
1608  IAV(72),  IAV(73),  IAV(74),  IAV(75),  IAV(76),  IAV(77),  IAV(78),  IAV(79),
1609  IAV(80),  IAV(81),  IAV(82),  IAV(83),  IAV(84),  IAV(85),  IAV(86),  IAV(87),
1610  IAV(88),  IAV(89),  IAV(90),  IAV(91),  IAV(92),  IAV(93),  IAV(94),  IAV(95),
1611  IAV(96),  IAV(97),  IAV(98),  IAV(99),  IAV(100), IAV(101), IAV(102), IAV(103),
1612  IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
1613  IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
1614  IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
1615 };
1616 #else
1617 extern mbinptr av_[NAV * 2 + 2];
1618 #endif
1619 
1620 
1621 
1622 /* field-extraction macros */
1623 
1624 #define first(b) ((b)->fd)
1625 #define last(b)  ((b)->bk)
1626 
1627 /*
1628   Indexing into bins
1629 */
1630 
1631 #define bin_index(sz)                                                          \
1632 (((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \
1633  ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \
1634  ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \
1635  ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
1636  ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
1637  ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
1638                                           126)
1639 /*
1640   bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold
1641   identically sized chunks. This is exploited in malloc.
1642 */
1643 
1644 #define MAX_SMALLBIN_SIZE   512
1645 #define SMALLBIN_WIDTH        8
1646 #define SMALLBIN_WIDTH_BITS   3
1647 #define MAX_SMALLBIN        (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1
1648 
1649 #define smallbin_index(sz)  (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS)
1650 
1651 /*
1652    Requests are `small' if both the corresponding and the next bin are small
1653 */
1654 
1655 #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
1656 
1657 
1658 
1659 /*
1660     To help compensate for the large number of bins, a one-level index
1661     structure is used for bin-by-bin searching.  `binblocks' is a
1662     one-word bitvector recording whether groups of BINBLOCKWIDTH bins
1663     have any (possibly) non-empty bins, so they can be skipped over
1664     all at once during during traversals. The bits are NOT always
1665     cleared as soon as all bins in a block are empty, but instead only
1666     when all are noticed to be empty during traversal in malloc.
1667 */
1668 
1669 #define BINBLOCKWIDTH     4   /* bins per block */
1670 
1671 #define binblocks      (bin_at(0)->size) /* bitvector of nonempty blocks */
1672 
1673 /* bin<->block macros */
1674 
1675 #define idx2binblock(ix)    ((unsigned long)1 << (ix / BINBLOCKWIDTH))
1676 #define mark_binblock(ii)   (binblocks |= idx2binblock(ii))
1677 #define clear_binblock(ii)  (binblocks &= ~(idx2binblock(ii)))
1678 
1679 
1680 
1681 
1682 
1683 /*  Other static bookkeeping data */
1684 
1685 #ifdef SEPARATE_OBJECTS
1686 #define trim_threshold		malloc_trim_threshold
1687 #define top_pad			malloc_top_pad
1688 #define n_mmaps_max		malloc_n_mmaps_max
1689 #define mmap_threshold		malloc_mmap_threshold
1690 #define sbrk_base		malloc_sbrk_base
1691 #define max_sbrked_mem		malloc_max_sbrked_mem
1692 #define max_total_mem		malloc_max_total_mem
1693 #define current_mallinfo	malloc_current_mallinfo
1694 #define n_mmaps			malloc_n_mmaps
1695 #define max_n_mmaps		malloc_max_n_mmaps
1696 #define mmapped_mem		malloc_mmapped_mem
1697 #define max_mmapped_mem		malloc_max_mmapped_mem
1698 #endif
1699 
1700 /* variables holding tunable values */
1701 
1702 #ifdef DEFINE_MALLOC
1703 
1704 STATIC unsigned long trim_threshold   = DEFAULT_TRIM_THRESHOLD;
1705 STATIC unsigned long top_pad          = DEFAULT_TOP_PAD;
1706 #if HAVE_MMAP
1707 STATIC unsigned int  n_mmaps_max      = DEFAULT_MMAP_MAX;
1708 STATIC unsigned long mmap_threshold   = DEFAULT_MMAP_THRESHOLD;
1709 #endif
1710 
1711 /* The first value returned from sbrk */
1712 STATIC char* sbrk_base = (char*)(-1);
1713 
1714 /* The maximum memory obtained from system via sbrk */
1715 STATIC unsigned long max_sbrked_mem = 0;
1716 
1717 /* The maximum via either sbrk or mmap */
1718 STATIC unsigned long max_total_mem = 0;
1719 
1720 /* internal working copy of mallinfo */
1721 STATIC struct mallinfo current_mallinfo = {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1722 
1723 #if HAVE_MMAP
1724 
1725 /* Tracking mmaps */
1726 
1727 STATIC unsigned int n_mmaps = 0;
1728 STATIC unsigned int max_n_mmaps = 0;
1729 STATIC unsigned long mmapped_mem = 0;
1730 STATIC unsigned long max_mmapped_mem = 0;
1731 
1732 #endif
1733 
1734 #else /* ! DEFINE_MALLOC */
1735 
1736 extern unsigned long trim_threshold;
1737 extern unsigned long top_pad;
1738 #if HAVE_MMAP
1739 extern unsigned int  n_mmaps_max;
1740 extern unsigned long mmap_threshold;
1741 #endif
1742 extern char* sbrk_base;
1743 extern unsigned long max_sbrked_mem;
1744 extern unsigned long max_total_mem;
1745 extern struct mallinfo current_mallinfo;
1746 #if HAVE_MMAP
1747 extern unsigned int n_mmaps;
1748 extern unsigned int max_n_mmaps;
1749 extern unsigned long mmapped_mem;
1750 extern unsigned long max_mmapped_mem;
1751 #endif
1752 
1753 #endif /* ! DEFINE_MALLOC */
1754 
1755 /* The total memory obtained from system via sbrk */
1756 #define sbrked_mem  (current_mallinfo.arena)
1757 
1758 
1759 
1760 /*
1761   Debugging support
1762 */
1763 
1764 #if DEBUG
1765 
1766 
1767 /*
1768   These routines make a number of assertions about the states
1769   of data structures that should be true at all times. If any
1770   are not true, it's very likely that a user program has somehow
1771   trashed memory. (It's also possible that there is a coding error
1772   in malloc. In which case, please report it!)
1773 */
1774 
1775 #if __STD_C
do_check_chunk(mchunkptr p)1776 static void do_check_chunk(mchunkptr p)
1777 #else
1778 static void do_check_chunk(p) mchunkptr p;
1779 #endif
1780 {
1781   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1782 
1783   /* No checkable chunk is mmapped */
1784   assert(!chunk_is_mmapped(p));
1785 
1786   /* Check for legal address ... */
1787   assert((char*)p >= sbrk_base);
1788   if (p != top)
1789     assert((char*)p + sz <= (char*)top);
1790   else
1791     assert((char*)p + sz <= sbrk_base + sbrked_mem);
1792 
1793 }
1794 
1795 
1796 #if __STD_C
do_check_free_chunk(mchunkptr p)1797 static void do_check_free_chunk(mchunkptr p)
1798 #else
1799 static void do_check_free_chunk(p) mchunkptr p;
1800 #endif
1801 {
1802   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1803   mchunkptr next = chunk_at_offset(p, sz);
1804 
1805   do_check_chunk(p);
1806 
1807   /* Check whether it claims to be free ... */
1808   assert(!inuse(p));
1809 
1810   /* Unless a special marker, must have OK fields */
1811   if ((long)sz >= (long)MINSIZE)
1812   {
1813     assert((sz & MALLOC_ALIGN_MASK) == 0);
1814     assert(aligned_OK(chunk2mem(p)));
1815     /* ... matching footer field */
1816     assert(next->prev_size == sz);
1817     /* ... and is fully consolidated */
1818     assert(prev_inuse(p));
1819     assert (next == top || inuse(next));
1820 
1821     /* ... and has minimally sane links */
1822     assert(p->fd->bk == p);
1823     assert(p->bk->fd == p);
1824   }
1825   else /* markers are always of size SIZE_SZ */
1826     assert(sz == SIZE_SZ);
1827 }
1828 
1829 #if __STD_C
do_check_inuse_chunk(mchunkptr p)1830 static void do_check_inuse_chunk(mchunkptr p)
1831 #else
1832 static void do_check_inuse_chunk(p) mchunkptr p;
1833 #endif
1834 {
1835   mchunkptr next = next_chunk(p);
1836   do_check_chunk(p);
1837 
1838   /* Check whether it claims to be in use ... */
1839   assert(inuse(p));
1840 
1841   /* ... and is surrounded by OK chunks.
1842     Since more things can be checked with free chunks than inuse ones,
1843     if an inuse chunk borders them and debug is on, it's worth doing them.
1844   */
1845   if (!prev_inuse(p))
1846   {
1847     mchunkptr prv = prev_chunk(p);
1848     assert(next_chunk(prv) == p);
1849     do_check_free_chunk(prv);
1850   }
1851   if (next == top)
1852   {
1853     assert(prev_inuse(next));
1854     assert(chunksize(next) >= MINSIZE);
1855   }
1856   else if (!inuse(next))
1857     do_check_free_chunk(next);
1858 
1859 }
1860 
1861 #if __STD_C
do_check_malloced_chunk(mchunkptr p,INTERNAL_SIZE_T s)1862 static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
1863 #else
1864 static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
1865 #endif
1866 {
1867   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1868   long room = long_sub_size_t(sz, s);
1869 
1870   do_check_inuse_chunk(p);
1871 
1872   /* Legal size ... */
1873   assert((long)sz >= (long)MINSIZE);
1874   assert((sz & MALLOC_ALIGN_MASK) == 0);
1875   assert(room >= 0);
1876   assert(room < (long)MINSIZE);
1877 
1878   /* ... and alignment */
1879   assert(aligned_OK(chunk2mem(p)));
1880 
1881 
1882   /* ... and was allocated at front of an available chunk */
1883   assert(prev_inuse(p));
1884 
1885 }
1886 
1887 
1888 #define check_free_chunk(P)  do_check_free_chunk(P)
1889 #define check_inuse_chunk(P) do_check_inuse_chunk(P)
1890 #define check_chunk(P) do_check_chunk(P)
1891 #define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
1892 #else
1893 #define check_free_chunk(P)
1894 #define check_inuse_chunk(P)
1895 #define check_chunk(P)
1896 #define check_malloced_chunk(P,N)
1897 #endif
1898 
1899 
1900 
1901 /*
1902   Macro-based internal utilities
1903 */
1904 
1905 
1906 /*
1907   Linking chunks in bin lists.
1908   Call these only with variables, not arbitrary expressions, as arguments.
1909 */
1910 
1911 /*
1912   Place chunk p of size s in its bin, in size order,
1913   putting it ahead of others of same size.
1914 */
1915 
1916 
1917 #define frontlink(P, S, IDX, BK, FD)                                          \
1918 {                                                                             \
1919   if (S < MAX_SMALLBIN_SIZE)                                                  \
1920   {                                                                           \
1921     IDX = smallbin_index(S);                                                  \
1922     mark_binblock(IDX);                                                       \
1923     BK = bin_at(IDX);                                                         \
1924     FD = BK->fd;                                                              \
1925     P->bk = BK;                                                               \
1926     P->fd = FD;                                                               \
1927     FD->bk = BK->fd = P;                                                      \
1928   }                                                                           \
1929   else                                                                        \
1930   {                                                                           \
1931     IDX = bin_index(S);                                                       \
1932     BK = bin_at(IDX);                                                         \
1933     FD = BK->fd;                                                              \
1934     if (FD == BK) mark_binblock(IDX);                                         \
1935     else                                                                      \
1936     {                                                                         \
1937       while (FD != BK && S < chunksize(FD)) FD = FD->fd;                      \
1938       BK = FD->bk;                                                            \
1939     }                                                                         \
1940     P->bk = BK;                                                               \
1941     P->fd = FD;                                                               \
1942     FD->bk = BK->fd = P;                                                      \
1943   }                                                                           \
1944 }
1945 
1946 
1947 /* take a chunk off a list */
1948 
1949 #define unlink(P, BK, FD)                                                     \
1950 {                                                                             \
1951   BK = P->bk;                                                                 \
1952   FD = P->fd;                                                                 \
1953   FD->bk = BK;                                                        \
1954   BK->fd = FD;                                                        \
1955 }                                                                             \
1956 
1957 /* Place p as the last remainder */
1958 
1959 #define link_last_remainder(P)                                                \
1960 {                                                                             \
1961   last_remainder->fd = last_remainder->bk =  P;                               \
1962   P->fd = P->bk = last_remainder;                                             \
1963 }
1964 
1965 /* Clear the last_remainder bin */
1966 
1967 #define clear_last_remainder \
1968   (last_remainder->fd = last_remainder->bk = last_remainder)
1969 
1970 
1971 
1972 
1973 
1974 
1975 /* Routines dealing with mmap(). */
1976 
1977 #if HAVE_MMAP
1978 
1979 #ifdef DEFINE_MALLOC
1980 
1981 #if __STD_C
mmap_chunk(size_t size)1982 static mchunkptr mmap_chunk(size_t size)
1983 #else
1984 static mchunkptr mmap_chunk(size) size_t size;
1985 #endif
1986 {
1987   size_t page_mask = malloc_getpagesize - 1;
1988   mchunkptr p;
1989 
1990 #ifndef MAP_ANONYMOUS
1991   static int fd = -1;
1992 #endif
1993 
1994   if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
1995 
1996   /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
1997    * there is no following chunk whose prev_size field could be used.
1998    */
1999   size = (size + SIZE_SZ + page_mask) & ~page_mask;
2000 
2001 #ifdef MAP_ANONYMOUS
2002   p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
2003 		      MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
2004 #else /* !MAP_ANONYMOUS */
2005   if (fd < 0)
2006   {
2007     fd = open("/dev/zero", O_RDWR);
2008     if(fd < 0) return 0;
2009   }
2010   p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
2011 #endif
2012 
2013   if(p == (mchunkptr)-1) return 0;
2014 
2015   n_mmaps++;
2016   if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
2017 
2018   /* We demand that eight bytes into a page must be 8-byte aligned. */
2019   assert(aligned_OK(chunk2mem(p)));
2020 
2021   /* The offset to the start of the mmapped region is stored
2022    * in the prev_size field of the chunk; normally it is zero,
2023    * but that can be changed in memalign().
2024    */
2025   p->prev_size = 0;
2026   set_head(p, size|IS_MMAPPED);
2027 
2028   mmapped_mem += size;
2029   if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
2030     max_mmapped_mem = mmapped_mem;
2031   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
2032     max_total_mem = mmapped_mem + sbrked_mem;
2033   return p;
2034 }
2035 
2036 #endif /* DEFINE_MALLOC */
2037 
2038 #ifdef SEPARATE_OBJECTS
2039 #define munmap_chunk malloc_munmap_chunk
2040 #endif
2041 
2042 #ifdef DEFINE_FREE
2043 
2044 #if __STD_C
munmap_chunk(mchunkptr p)2045 STATIC void munmap_chunk(mchunkptr p)
2046 #else
2047 STATIC void munmap_chunk(p) mchunkptr p;
2048 #endif
2049 {
2050   INTERNAL_SIZE_T size = chunksize(p);
2051   int ret;
2052 
2053   assert (chunk_is_mmapped(p));
2054   assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
2055   assert((n_mmaps > 0));
2056   assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
2057 
2058   n_mmaps--;
2059   mmapped_mem -= (size + p->prev_size);
2060 
2061   ret = munmap((char *)p - p->prev_size, size + p->prev_size);
2062 
2063   /* munmap returns non-zero on failure */
2064   assert(ret == 0);
2065 }
2066 
2067 #else /* ! DEFINE_FREE */
2068 
2069 #if __STD_C
2070 extern void munmap_chunk(mchunkptr);
2071 #else
2072 extern void munmap_chunk();
2073 #endif
2074 
2075 #endif /* ! DEFINE_FREE */
2076 
2077 #if HAVE_MREMAP
2078 
2079 #ifdef DEFINE_REALLOC
2080 
2081 #if __STD_C
mremap_chunk(mchunkptr p,size_t new_size)2082 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
2083 #else
2084 static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
2085 #endif
2086 {
2087   size_t page_mask = malloc_getpagesize - 1;
2088   INTERNAL_SIZE_T offset = p->prev_size;
2089   INTERNAL_SIZE_T size = chunksize(p);
2090   char *cp;
2091 
2092   assert (chunk_is_mmapped(p));
2093   assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
2094   assert((n_mmaps > 0));
2095   assert(((size + offset) & (malloc_getpagesize-1)) == 0);
2096 
2097   /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
2098   new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
2099 
2100   cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
2101 
2102   if (cp == (char *)-1) return 0;
2103 
2104   p = (mchunkptr)(cp + offset);
2105 
2106   assert(aligned_OK(chunk2mem(p)));
2107 
2108   assert((p->prev_size == offset));
2109   set_head(p, (new_size - offset)|IS_MMAPPED);
2110 
2111   mmapped_mem -= size + offset;
2112   mmapped_mem += new_size;
2113   if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
2114     max_mmapped_mem = mmapped_mem;
2115   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
2116     max_total_mem = mmapped_mem + sbrked_mem;
2117   return p;
2118 }
2119 
2120 #endif /* DEFINE_REALLOC */
2121 
2122 #endif /* HAVE_MREMAP */
2123 
2124 #endif /* HAVE_MMAP */
2125 
2126 
2127 
2128 
2129 #ifdef DEFINE_MALLOC
2130 
2131 /*
2132   Extend the top-most chunk by obtaining memory from system.
2133   Main interface to sbrk (but see also malloc_trim).
2134 */
2135 
2136 #if __STD_C
malloc_extend_top(RARG INTERNAL_SIZE_T nb)2137 static void malloc_extend_top(RARG INTERNAL_SIZE_T nb)
2138 #else
2139 static void malloc_extend_top(RARG nb) RDECL INTERNAL_SIZE_T nb;
2140 #endif
2141 {
2142   char*     brk;                  /* return value from sbrk */
2143   INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
2144   INTERNAL_SIZE_T correction;     /* bytes for 2nd sbrk call */
2145   int correction_failed = 0;      /* whether we should relax the assertion */
2146   char*     new_brk;              /* return of 2nd sbrk call */
2147   INTERNAL_SIZE_T top_size;       /* new size of top chunk */
2148 
2149   mchunkptr old_top     = top;  /* Record state of old top */
2150   INTERNAL_SIZE_T old_top_size = chunksize(old_top);
2151   char*     old_end      = (char*)(chunk_at_offset(old_top, old_top_size));
2152 
2153   /* Pad request with top_pad plus minimal overhead */
2154 
2155   INTERNAL_SIZE_T    sbrk_size     = nb + top_pad + MINSIZE;
2156   unsigned long pagesz    = malloc_getpagesize;
2157 
2158   /* If not the first time through, round to preserve page boundary */
2159   /* Otherwise, we need to correct to a page size below anyway. */
2160   /* (We also correct below if an intervening foreign sbrk call.) */
2161 
2162   if (sbrk_base != (char*)(-1))
2163     sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
2164 
2165   brk = (char*)(MORECORE (sbrk_size));
2166 
2167   /* Fail if sbrk failed or if a foreign sbrk call killed our space */
2168   if (brk == (char*)(MORECORE_FAILURE) ||
2169       (brk < old_end && old_top != initial_top))
2170     return;
2171 
2172   sbrked_mem += sbrk_size;
2173 
2174   if (brk == old_end /* can just add bytes to current top, unless
2175 			previous correction failed */
2176       && ((POINTER_UINT)old_end & (pagesz - 1)) == 0)
2177   {
2178     top_size = sbrk_size + old_top_size;
2179     set_head(top, top_size | PREV_INUSE);
2180   }
2181   else
2182   {
2183     if (sbrk_base == (char*)(-1))  /* First time through. Record base */
2184       sbrk_base = brk;
2185     else  /* Someone else called sbrk().  Count those bytes as sbrked_mem. */
2186       sbrked_mem += brk - (char*)old_end;
2187 
2188     /* Guarantee alignment of first new chunk made from this space */
2189     front_misalign = (POINTER_UINT)chunk2mem(brk) & MALLOC_ALIGN_MASK;
2190     if (front_misalign > 0)
2191     {
2192       correction = (MALLOC_ALIGNMENT) - front_misalign;
2193       brk += correction;
2194     }
2195     else
2196       correction = 0;
2197 
2198     /* Guarantee the next brk will be at a page boundary */
2199     correction += pagesz - ((POINTER_UINT)(brk + sbrk_size) & (pagesz - 1));
2200 
2201     /* Allocate correction */
2202     new_brk = (char*)(MORECORE (correction));
2203     if (new_brk == (char*)(MORECORE_FAILURE))
2204       {
2205 	correction = 0;
2206 	correction_failed = 1;
2207 	new_brk = brk;
2208       }
2209 
2210     sbrked_mem += correction;
2211 
2212     top = (mchunkptr)brk;
2213     top_size = new_brk - brk + correction;
2214     set_head(top, top_size | PREV_INUSE);
2215 
2216     if (old_top != initial_top)
2217     {
2218 
2219       /* There must have been an intervening foreign sbrk call. */
2220       /* A double fencepost is necessary to prevent consolidation */
2221 
2222       /* If not enough space to do this, then user did something very wrong */
2223       if (old_top_size < MINSIZE)
2224       {
2225         set_head(top, PREV_INUSE); /* will force null return from malloc */
2226         return;
2227       }
2228 
2229       /* Also keep size a multiple of MALLOC_ALIGNMENT */
2230       old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2231       set_head_size(old_top, old_top_size);
2232       chunk_at_offset(old_top, old_top_size          )->size =
2233         SIZE_SZ|PREV_INUSE;
2234       chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
2235         SIZE_SZ|PREV_INUSE;
2236       /* If possible, release the rest. */
2237       if (old_top_size >= MINSIZE)
2238         fREe(RCALL chunk2mem(old_top));
2239     }
2240   }
2241 
2242   if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
2243     max_sbrked_mem = sbrked_mem;
2244 #if HAVE_MMAP
2245   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
2246     max_total_mem = mmapped_mem + sbrked_mem;
2247 #else
2248   if ((unsigned long)(sbrked_mem) > (unsigned long)max_total_mem)
2249     max_total_mem = sbrked_mem;
2250 #endif
2251 
2252   /* We always land on a page boundary */
2253   assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0
2254 	 || correction_failed);
2255 }
2256 
2257 #endif /* DEFINE_MALLOC */
2258 
2259 
2260 /* Main public routines */
2261 
2262 #ifdef DEFINE_MALLOC
2263 
2264 /*
2265   Malloc Algorthim:
2266 
2267     The requested size is first converted into a usable form, `nb'.
2268     This currently means to add 4 bytes overhead plus possibly more to
2269     obtain 8-byte alignment and/or to obtain a size of at least
2270     MINSIZE (currently 16 bytes), the smallest allocatable size.
2271     (All fits are considered `exact' if they are within MINSIZE bytes.)
2272 
2273     From there, the first successful of the following steps is taken:
2274 
2275       1. The bin corresponding to the request size is scanned, and if
2276          a chunk of exactly the right size is found, it is taken.
2277 
2278       2. The most recently remaindered chunk is used if it is big
2279          enough.  This is a form of (roving) first fit, used only in
2280          the absence of exact fits. Runs of consecutive requests use
2281          the remainder of the chunk used for the previous such request
2282          whenever possible. This limited use of a first-fit style
2283          allocation strategy tends to give contiguous chunks
2284          coextensive lifetimes, which improves locality and can reduce
2285          fragmentation in the long run.
2286 
2287       3. Other bins are scanned in increasing size order, using a
2288          chunk big enough to fulfill the request, and splitting off
2289          any remainder.  This search is strictly by best-fit; i.e.,
2290          the smallest (with ties going to approximately the least
2291          recently used) chunk that fits is selected.
2292 
2293       4. If large enough, the chunk bordering the end of memory
2294          (`top') is split off. (This use of `top' is in accord with
2295          the best-fit search rule.  In effect, `top' is treated as
2296          larger (and thus less well fitting) than any other available
2297          chunk since it can be extended to be as large as necessary
2298          (up to system limitations).
2299 
2300       5. If the request size meets the mmap threshold and the
2301          system supports mmap, and there are few enough currently
2302          allocated mmapped regions, and a call to mmap succeeds,
2303          the request is allocated via direct memory mapping.
2304 
2305       6. Otherwise, the top of memory is extended by
2306          obtaining more space from the system (normally using sbrk,
2307          but definable to anything else via the MORECORE macro).
2308          Memory is gathered from the system (in system page-sized
2309          units) in a way that allows chunks obtained across different
2310          sbrk calls to be consolidated, but does not require
2311          contiguous memory. Thus, it should be safe to intersperse
2312          mallocs with other sbrk calls.
2313 
2314 
2315       All allocations are made from the the `lowest' part of any found
2316       chunk. (The implementation invariant is that prev_inuse is
2317       always true of any allocated chunk; i.e., that each allocated
2318       chunk borders either a previously allocated and still in-use chunk,
2319       or the base of its memory arena.)
2320 
2321 */
2322 
2323 #if __STD_C
mALLOc(RARG size_t bytes)2324 Void_t* mALLOc(RARG size_t bytes)
2325 #else
2326 Void_t* mALLOc(RARG bytes) RDECL size_t bytes;
2327 #endif
2328 {
2329 #ifdef MALLOC_PROVIDED
2330 
2331   return malloc (bytes); // Make sure that the pointer returned by malloc is returned back.
2332 
2333 #else
2334 
2335   mchunkptr victim;                  /* inspected/selected chunk */
2336   INTERNAL_SIZE_T victim_size;       /* its size */
2337   int       idx;                     /* index for bin traversal */
2338   mbinptr   bin;                     /* associated bin */
2339   mchunkptr remainder;               /* remainder from a split */
2340   long      remainder_size;          /* its size */
2341   int       remainder_index;         /* its bin index */
2342   unsigned long block;               /* block traverser bit */
2343   int       startidx;                /* first bin of a traversed block */
2344   mchunkptr fwd;                     /* misc temp for linking */
2345   mchunkptr bck;                     /* misc temp for linking */
2346   mbinptr q;                         /* misc temp */
2347 
2348   INTERNAL_SIZE_T nb  = request2size(bytes);  /* padded request size; */
2349 
2350   /* Check for overflow and just fail, if so. */
2351   if (nb > INT_MAX || nb < bytes)
2352   {
2353     RERRNO = ENOMEM;
2354     return 0;
2355   }
2356 
2357   MALLOC_LOCK;
2358 
2359   /* Check for exact match in a bin */
2360 
2361   if (is_small_request(nb))  /* Faster version for small requests */
2362   {
2363     idx = smallbin_index(nb);
2364 
2365     /* No traversal or size check necessary for small bins.  */
2366 
2367     q = bin_at(idx);
2368     victim = last(q);
2369 
2370 #if MALLOC_ALIGN != 16
2371     /* Also scan the next one, since it would have a remainder < MINSIZE */
2372     if (victim == q)
2373     {
2374       q = next_bin(q);
2375       victim = last(q);
2376     }
2377 #endif
2378     if (victim != q)
2379     {
2380       victim_size = chunksize(victim);
2381       unlink(victim, bck, fwd);
2382       set_inuse_bit_at_offset(victim, victim_size);
2383       check_malloced_chunk(victim, nb);
2384       MALLOC_UNLOCK;
2385       return chunk2mem(victim);
2386     }
2387 
2388     idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
2389 
2390   }
2391   else
2392   {
2393     idx = bin_index(nb);
2394     bin = bin_at(idx);
2395 
2396     for (victim = last(bin); victim != bin; victim = victim->bk)
2397     {
2398       victim_size = chunksize(victim);
2399       remainder_size = long_sub_size_t(victim_size, nb);
2400 
2401       if (remainder_size >= (long)MINSIZE) /* too big */
2402       {
2403         --idx; /* adjust to rescan below after checking last remainder */
2404         break;
2405       }
2406 
2407       else if (remainder_size >= 0) /* exact fit */
2408       {
2409         unlink(victim, bck, fwd);
2410         set_inuse_bit_at_offset(victim, victim_size);
2411         check_malloced_chunk(victim, nb);
2412 	MALLOC_UNLOCK;
2413         return chunk2mem(victim);
2414       }
2415     }
2416 
2417     ++idx;
2418 
2419   }
2420 
2421   /* Try to use the last split-off remainder */
2422 
2423   if ( (victim = last_remainder->fd) != last_remainder)
2424   {
2425     victim_size = chunksize(victim);
2426     remainder_size = long_sub_size_t(victim_size, nb);
2427 
2428     if (remainder_size >= (long)MINSIZE) /* re-split */
2429     {
2430       remainder = chunk_at_offset(victim, nb);
2431       set_head(victim, nb | PREV_INUSE);
2432       link_last_remainder(remainder);
2433       set_head(remainder, remainder_size | PREV_INUSE);
2434       set_foot(remainder, remainder_size);
2435       check_malloced_chunk(victim, nb);
2436       MALLOC_UNLOCK;
2437       return chunk2mem(victim);
2438     }
2439 
2440     clear_last_remainder;
2441 
2442     if (remainder_size >= 0)  /* exhaust */
2443     {
2444       set_inuse_bit_at_offset(victim, victim_size);
2445       check_malloced_chunk(victim, nb);
2446       MALLOC_UNLOCK;
2447       return chunk2mem(victim);
2448     }
2449 
2450     /* Else place in bin */
2451 
2452     frontlink(victim, victim_size, remainder_index, bck, fwd);
2453   }
2454 
2455   /*
2456      If there are any possibly nonempty big-enough blocks,
2457      search for best fitting chunk by scanning bins in blockwidth units.
2458   */
2459 
2460   if ( (block = idx2binblock(idx)) <= binblocks)
2461   {
2462 
2463     /* Get to the first marked block */
2464 
2465     if ( (block & binblocks) == 0)
2466     {
2467       /* force to an even block boundary */
2468       idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
2469       block <<= 1;
2470       while ((block & binblocks) == 0)
2471       {
2472         idx += BINBLOCKWIDTH;
2473         block <<= 1;
2474       }
2475     }
2476 
2477     /* For each possibly nonempty block ... */
2478     for (;;)
2479     {
2480       startidx = idx;          /* (track incomplete blocks) */
2481       q = bin = bin_at(idx);
2482 
2483       /* For each bin in this block ... */
2484       do
2485       {
2486         /* Find and use first big enough chunk ... */
2487 
2488         for (victim = last(bin); victim != bin; victim = victim->bk)
2489         {
2490           victim_size = chunksize(victim);
2491           remainder_size = long_sub_size_t(victim_size, nb);
2492 
2493           if (remainder_size >= (long)MINSIZE) /* split */
2494           {
2495             remainder = chunk_at_offset(victim, nb);
2496             set_head(victim, nb | PREV_INUSE);
2497             unlink(victim, bck, fwd);
2498             link_last_remainder(remainder);
2499             set_head(remainder, remainder_size | PREV_INUSE);
2500             set_foot(remainder, remainder_size);
2501             check_malloced_chunk(victim, nb);
2502 	    MALLOC_UNLOCK;
2503             return chunk2mem(victim);
2504           }
2505 
2506           else if (remainder_size >= 0)  /* take */
2507           {
2508             set_inuse_bit_at_offset(victim, victim_size);
2509             unlink(victim, bck, fwd);
2510             check_malloced_chunk(victim, nb);
2511 	    MALLOC_UNLOCK;
2512             return chunk2mem(victim);
2513           }
2514 
2515         }
2516 
2517        bin = next_bin(bin);
2518 
2519 #if MALLOC_ALIGN == 16
2520        if (idx < MAX_SMALLBIN)
2521          {
2522            bin = next_bin(bin);
2523            ++idx;
2524          }
2525 #endif
2526       } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
2527 
2528       /* Clear out the block bit. */
2529 
2530       do   /* Possibly backtrack to try to clear a partial block */
2531       {
2532         if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
2533         {
2534           binblocks &= ~block;
2535           break;
2536         }
2537         --startidx;
2538        q = prev_bin(q);
2539       } while (first(q) == q);
2540 
2541       /* Get to the next possibly nonempty block */
2542 
2543       if ( (block <<= 1) <= binblocks && (block != 0) )
2544       {
2545         while ((block & binblocks) == 0)
2546         {
2547           idx += BINBLOCKWIDTH;
2548           block <<= 1;
2549         }
2550       }
2551       else
2552         break;
2553     }
2554   }
2555 
2556 
2557   /* Try to use top chunk */
2558 
2559   /* Require that there be a remainder, ensuring top always exists  */
2560   remainder_size = long_sub_size_t(chunksize(top), nb);
2561   if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
2562   {
2563 
2564 #if HAVE_MMAP
2565     /* If big and would otherwise need to extend, try to use mmap instead */
2566     if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
2567         (victim = mmap_chunk(nb)) != 0)
2568     {
2569       MALLOC_UNLOCK;
2570       return chunk2mem(victim);
2571     }
2572 #endif
2573 
2574     /* Try to extend */
2575     malloc_extend_top(RCALL nb);
2576     remainder_size = long_sub_size_t(chunksize(top), nb);
2577     if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
2578     {
2579       MALLOC_UNLOCK;
2580       return 0; /* propagate failure */
2581     }
2582   }
2583 
2584   victim = top;
2585   set_head(victim, nb | PREV_INUSE);
2586   top = chunk_at_offset(victim, nb);
2587   set_head(top, remainder_size | PREV_INUSE);
2588   check_malloced_chunk(victim, nb);
2589   MALLOC_UNLOCK;
2590   return chunk2mem(victim);
2591 
2592 #endif /* MALLOC_PROVIDED */
2593 }
2594 
2595 #endif /* DEFINE_MALLOC */
2596 
2597 #ifdef DEFINE_FREE
2598 
2599 /*
2600 
2601   free() algorithm :
2602 
2603     cases:
2604 
2605        1. free(0) has no effect.
2606 
2607        2. If the chunk was allocated via mmap, it is release via munmap().
2608 
2609        3. If a returned chunk borders the current high end of memory,
2610           it is consolidated into the top, and if the total unused
2611           topmost memory exceeds the trim threshold, malloc_trim is
2612           called.
2613 
2614        4. Other chunks are consolidated as they arrive, and
2615           placed in corresponding bins. (This includes the case of
2616           consolidating with the current `last_remainder').
2617 
2618 */
2619 
2620 
2621 #if __STD_C
fREe(RARG Void_t * mem)2622 void fREe(RARG Void_t* mem)
2623 #else
2624 void fREe(RARG mem) RDECL Void_t* mem;
2625 #endif
2626 {
2627 #ifdef MALLOC_PROVIDED
2628 
2629   free (mem);
2630 
2631 #else
2632 
2633   mchunkptr p;         /* chunk corresponding to mem */
2634   INTERNAL_SIZE_T hd;  /* its head field */
2635   INTERNAL_SIZE_T sz;  /* its size */
2636   int       idx;       /* its bin index */
2637   mchunkptr next;      /* next contiguous chunk */
2638   INTERNAL_SIZE_T nextsz; /* its size */
2639   INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
2640   mchunkptr bck;       /* misc temp for linking */
2641   mchunkptr fwd;       /* misc temp for linking */
2642   int       islr;      /* track whether merging with last_remainder */
2643 
2644   if (mem == 0)                              /* free(0) has no effect */
2645     return;
2646 
2647   MALLOC_LOCK;
2648 
2649   p = mem2chunk(mem);
2650   hd = p->size;
2651 
2652 #if HAVE_MMAP
2653   if (hd & IS_MMAPPED)                       /* release mmapped memory. */
2654   {
2655     munmap_chunk(p);
2656     MALLOC_UNLOCK;
2657     return;
2658   }
2659 #endif
2660 
2661   check_inuse_chunk(p);
2662 
2663   sz = hd & ~PREV_INUSE;
2664   next = chunk_at_offset(p, sz);
2665   nextsz = chunksize(next);
2666 
2667   if (next == top)                            /* merge with top */
2668   {
2669     sz += nextsz;
2670 
2671     if (!(hd & PREV_INUSE))                    /* consolidate backward */
2672     {
2673       prevsz = p->prev_size;
2674       p = chunk_at_offset(p, -prevsz);
2675       sz += prevsz;
2676       unlink(p, bck, fwd);
2677     }
2678 
2679     set_head(p, sz | PREV_INUSE);
2680     top = p;
2681     if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
2682       malloc_trim(RCALL top_pad);
2683     MALLOC_UNLOCK;
2684     return;
2685   }
2686 
2687   set_head(next, nextsz);                    /* clear inuse bit */
2688 
2689   islr = 0;
2690 
2691   if (!(hd & PREV_INUSE))                    /* consolidate backward */
2692   {
2693     prevsz = p->prev_size;
2694     p = chunk_at_offset(p, -prevsz);
2695     sz += prevsz;
2696 
2697     if (p->fd == last_remainder)             /* keep as last_remainder */
2698       islr = 1;
2699     else
2700       unlink(p, bck, fwd);
2701   }
2702 
2703   if (!(inuse_bit_at_offset(next, nextsz)))   /* consolidate forward */
2704   {
2705     sz += nextsz;
2706 
2707     if (!islr && next->fd == last_remainder)  /* re-insert last_remainder */
2708     {
2709       islr = 1;
2710       link_last_remainder(p);
2711     }
2712     else
2713       unlink(next, bck, fwd);
2714   }
2715 
2716 
2717   set_head(p, sz | PREV_INUSE);
2718   set_foot(p, sz);
2719   if (!islr)
2720     frontlink(p, sz, idx, bck, fwd);
2721 
2722   MALLOC_UNLOCK;
2723 
2724 #endif /* MALLOC_PROVIDED */
2725 }
2726 
2727 #endif /* DEFINE_FREE */
2728 
2729 #ifdef DEFINE_REALLOC
2730 
2731 /*
2732 
2733   Realloc algorithm:
2734 
2735     Chunks that were obtained via mmap cannot be extended or shrunk
2736     unless HAVE_MREMAP is defined, in which case mremap is used.
2737     Otherwise, if their reallocation is for additional space, they are
2738     copied.  If for less, they are just left alone.
2739 
2740     Otherwise, if the reallocation is for additional space, and the
2741     chunk can be extended, it is, else a malloc-copy-free sequence is
2742     taken.  There are several different ways that a chunk could be
2743     extended. All are tried:
2744 
2745        * Extending forward into following adjacent free chunk.
2746        * Shifting backwards, joining preceding adjacent space
2747        * Both shifting backwards and extending forward.
2748        * Extending into newly sbrked space
2749 
2750     Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
2751     size argument of zero (re)allocates a minimum-sized chunk.
2752 
2753     If the reallocation is for less space, and the new request is for
2754     a `small' (<512 bytes) size, then the newly unused space is lopped
2755     off and freed.
2756 
2757     The old unix realloc convention of allowing the last-free'd chunk
2758     to be used as an argument to realloc is no longer supported.
2759     I don't know of any programs still relying on this feature,
2760     and allowing it would also allow too many other incorrect
2761     usages of realloc to be sensible.
2762 
2763 
2764 */
2765 
2766 
2767 #if __STD_C
rEALLOc(RARG Void_t * oldmem,size_t bytes)2768 Void_t* rEALLOc(RARG Void_t* oldmem, size_t bytes)
2769 #else
2770 Void_t* rEALLOc(RARG oldmem, bytes) RDECL Void_t* oldmem; size_t bytes;
2771 #endif
2772 {
2773 #ifdef MALLOC_PROVIDED
2774 
2775   realloc (oldmem, bytes);
2776 
2777 #else
2778 
2779   INTERNAL_SIZE_T    nb;      /* padded request size */
2780 
2781   mchunkptr oldp;             /* chunk corresponding to oldmem */
2782   INTERNAL_SIZE_T    oldsize; /* its size */
2783 
2784   mchunkptr newp;             /* chunk to return */
2785   INTERNAL_SIZE_T    newsize; /* its size */
2786   Void_t*   newmem;           /* corresponding user mem */
2787 
2788   mchunkptr next;             /* next contiguous chunk after oldp */
2789   INTERNAL_SIZE_T  nextsize;  /* its size */
2790 
2791   mchunkptr prev;             /* previous contiguous chunk before oldp */
2792   INTERNAL_SIZE_T  prevsize;  /* its size */
2793 
2794   mchunkptr remainder;        /* holds split off extra space from newp */
2795   INTERNAL_SIZE_T  remainder_size;   /* its size */
2796 
2797   mchunkptr bck;              /* misc temp for linking */
2798   mchunkptr fwd;              /* misc temp for linking */
2799 
2800 #ifdef REALLOC_ZERO_BYTES_FREES
2801   if (bytes == 0) { fREe(RCALL oldmem); return 0; }
2802 #endif
2803 
2804 
2805   /* realloc of null is supposed to be same as malloc */
2806   if (oldmem == 0) return mALLOc(RCALL bytes);
2807 
2808   MALLOC_LOCK;
2809 
2810   newp    = oldp    = mem2chunk(oldmem);
2811   newsize = oldsize = chunksize(oldp);
2812 
2813 
2814   nb = request2size(bytes);
2815 
2816   /* Check for overflow and just fail, if so. */
2817   if (nb > INT_MAX || nb < bytes)
2818   {
2819     RERRNO = ENOMEM;
2820     return 0;
2821   }
2822 
2823 #if HAVE_MMAP
2824   if (chunk_is_mmapped(oldp))
2825   {
2826 #if HAVE_MREMAP
2827     newp = mremap_chunk(oldp, nb);
2828     if(newp)
2829     {
2830       MALLOC_UNLOCK;
2831       return chunk2mem(newp);
2832     }
2833 #endif
2834     /* Note the extra SIZE_SZ overhead. */
2835     if(oldsize - SIZE_SZ >= nb)
2836     {
2837       MALLOC_UNLOCK;
2838       return oldmem; /* do nothing */
2839     }
2840     /* Must alloc, copy, free. */
2841     newmem = mALLOc(RCALL bytes);
2842     if (newmem == 0)
2843     {
2844       MALLOC_UNLOCK;
2845       return 0; /* propagate failure */
2846     }
2847     MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
2848     munmap_chunk(oldp);
2849     MALLOC_UNLOCK;
2850     return newmem;
2851   }
2852 #endif
2853 
2854   check_inuse_chunk(oldp);
2855 
2856   if ((long)(oldsize) < (long)(nb))
2857   {
2858 
2859     /* Try expanding forward */
2860 
2861     next = chunk_at_offset(oldp, oldsize);
2862     if (next == top || !inuse(next))
2863     {
2864       nextsize = chunksize(next);
2865 
2866       /* Forward into top only if a remainder */
2867       if (next == top)
2868       {
2869         if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
2870         {
2871           newsize += nextsize;
2872           top = chunk_at_offset(oldp, nb);
2873           set_head(top, (newsize - nb) | PREV_INUSE);
2874           set_head_size(oldp, nb);
2875 	  MALLOC_UNLOCK;
2876           return chunk2mem(oldp);
2877         }
2878       }
2879 
2880       /* Forward into next chunk */
2881       else if (((long)(nextsize + newsize) >= (long)(nb)))
2882       {
2883         unlink(next, bck, fwd);
2884         newsize  += nextsize;
2885         goto split;
2886       }
2887     }
2888     else
2889     {
2890       next = 0;
2891       nextsize = 0;
2892     }
2893 
2894     /* Try shifting backwards. */
2895 
2896     if (!prev_inuse(oldp))
2897     {
2898       prev = prev_chunk(oldp);
2899       prevsize = chunksize(prev);
2900 
2901       /* try forward + backward first to save a later consolidation */
2902 
2903       if (next != 0)
2904       {
2905         /* into top */
2906         if (next == top)
2907         {
2908           if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
2909           {
2910             unlink(prev, bck, fwd);
2911             newp = prev;
2912             newsize += prevsize + nextsize;
2913             newmem = chunk2mem(newp);
2914             MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2915             top = chunk_at_offset(newp, nb);
2916             set_head(top, (newsize - nb) | PREV_INUSE);
2917             set_head_size(newp, nb);
2918 	    MALLOC_UNLOCK;
2919             return newmem;
2920           }
2921         }
2922 
2923         /* into next chunk */
2924         else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
2925         {
2926           unlink(next, bck, fwd);
2927           unlink(prev, bck, fwd);
2928           newp = prev;
2929           newsize += nextsize + prevsize;
2930           newmem = chunk2mem(newp);
2931           MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2932           goto split;
2933         }
2934       }
2935 
2936       /* backward only */
2937       if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
2938       {
2939         unlink(prev, bck, fwd);
2940         newp = prev;
2941         newsize += prevsize;
2942         newmem = chunk2mem(newp);
2943         MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2944         goto split;
2945       }
2946     }
2947 
2948     /* Must allocate */
2949 
2950     newmem = mALLOc (RCALL bytes);
2951 
2952     if (newmem == 0)  /* propagate failure */
2953     {
2954       MALLOC_UNLOCK;
2955       return 0;
2956     }
2957 
2958     /* Avoid copy if newp is next chunk after oldp. */
2959     /* (This can only happen when new chunk is sbrk'ed.) */
2960 
2961     if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
2962     {
2963       newsize += chunksize(newp);
2964       newp = oldp;
2965       goto split;
2966     }
2967 
2968     /* Otherwise copy, free, and exit */
2969     MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2970     fREe(RCALL oldmem);
2971     MALLOC_UNLOCK;
2972     return newmem;
2973   }
2974 
2975 
2976  split:  /* split off extra room in old or expanded chunk */
2977 
2978   remainder_size = long_sub_size_t(newsize, nb);
2979 
2980   if (remainder_size >= (long)MINSIZE) /* split off remainder */
2981   {
2982     remainder = chunk_at_offset(newp, nb);
2983     set_head_size(newp, nb);
2984     set_head(remainder, remainder_size | PREV_INUSE);
2985     set_inuse_bit_at_offset(remainder, remainder_size);
2986     fREe(RCALL chunk2mem(remainder)); /* let free() deal with it */
2987   }
2988   else
2989   {
2990     set_head_size(newp, newsize);
2991     set_inuse_bit_at_offset(newp, newsize);
2992   }
2993 
2994   check_inuse_chunk(newp);
2995   MALLOC_UNLOCK;
2996   return chunk2mem(newp);
2997 
2998 #endif /* MALLOC_PROVIDED */
2999 }
3000 
3001 #endif /* DEFINE_REALLOC */
3002 
3003 #ifdef DEFINE_MEMALIGN
3004 
3005 /*
3006 
3007   memalign algorithm:
3008 
3009     memalign requests more than enough space from malloc, finds a spot
3010     within that chunk that meets the alignment request, and then
3011     possibly frees the leading and trailing space.
3012 
3013     The alignment argument must be a power of two. This property is not
3014     checked by memalign, so misuse may result in random runtime errors.
3015 
3016     8-byte alignment is guaranteed by normal malloc calls, so don't
3017     bother calling memalign with an argument of 8 or less.
3018 
3019     Overreliance on memalign is a sure way to fragment space.
3020 
3021 */
3022 
3023 
3024 #if __STD_C
mEMALIGn(RARG size_t alignment,size_t bytes)3025 Void_t* mEMALIGn(RARG size_t alignment, size_t bytes)
3026 #else
3027 Void_t* mEMALIGn(RARG alignment, bytes) RDECL size_t alignment; size_t bytes;
3028 #endif
3029 {
3030   INTERNAL_SIZE_T    nb;      /* padded  request size */
3031   char*     m;                /* memory returned by malloc call */
3032   mchunkptr p;                /* corresponding chunk */
3033   char*     brk;              /* alignment point within p */
3034   mchunkptr newp;             /* chunk to return */
3035   INTERNAL_SIZE_T  newsize;   /* its size */
3036   INTERNAL_SIZE_T  leadsize;  /* leading space befor alignment point */
3037   mchunkptr remainder;        /* spare room at end to split off */
3038   long      remainder_size;   /* its size */
3039 
3040   /* If need less alignment than we give anyway, just relay to malloc */
3041 
3042   if (alignment <= MALLOC_ALIGNMENT) return mALLOc(RCALL bytes);
3043 
3044   /* Otherwise, ensure that it is at least a minimum chunk size */
3045 
3046   if (alignment <  MINSIZE) alignment = MINSIZE;
3047 
3048   /* Call malloc with worst case padding to hit alignment. */
3049 
3050   nb = request2size(bytes);
3051 
3052   /* Check for overflow. */
3053   if (nb > INT_MAX || nb < bytes)
3054   {
3055     RERRNO = ENOMEM;
3056     return 0;
3057   }
3058 
3059   m  = (char*)(mALLOc(RCALL nb + alignment + MINSIZE));
3060 
3061   if (m == 0) return 0; /* propagate failure */
3062 
3063   MALLOC_LOCK;
3064 
3065   p = mem2chunk(m);
3066 
3067   if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
3068   {
3069 #if HAVE_MMAP
3070     if(chunk_is_mmapped(p))
3071     {
3072       MALLOC_UNLOCK;
3073       return chunk2mem(p); /* nothing more to do */
3074     }
3075 #endif
3076   }
3077   else /* misaligned */
3078   {
3079     /*
3080       Find an aligned spot inside chunk.
3081       Since we need to give back leading space in a chunk of at
3082       least MINSIZE, if the first calculation places us at
3083       a spot with less than MINSIZE leader, we can move to the
3084       next aligned spot -- we've allocated enough total room so that
3085       this is always possible.
3086     */
3087 
3088     brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -alignment);
3089     if ((long)(brk - (char*)(p)) < (long)MINSIZE) brk = brk + alignment;
3090 
3091     newp = (mchunkptr)brk;
3092     leadsize = brk - (char*)(p);
3093     newsize = chunksize(p) - leadsize;
3094 
3095 #if HAVE_MMAP
3096     if(chunk_is_mmapped(p))
3097     {
3098       newp->prev_size = p->prev_size + leadsize;
3099       set_head(newp, newsize|IS_MMAPPED);
3100       MALLOC_UNLOCK;
3101       return chunk2mem(newp);
3102     }
3103 #endif
3104 
3105     /* give back leader, use the rest */
3106 
3107     set_head(newp, newsize | PREV_INUSE);
3108     set_inuse_bit_at_offset(newp, newsize);
3109     set_head_size(p, leadsize);
3110     fREe(RCALL chunk2mem(p));
3111     p = newp;
3112 
3113     assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
3114   }
3115 
3116   /* Also give back spare room at the end */
3117 
3118   remainder_size = long_sub_size_t(chunksize(p), nb);
3119 
3120   if (remainder_size >= (long)MINSIZE)
3121   {
3122     remainder = chunk_at_offset(p, nb);
3123     set_head(remainder, remainder_size | PREV_INUSE);
3124     set_head_size(p, nb);
3125     fREe(RCALL chunk2mem(remainder));
3126   }
3127 
3128   check_inuse_chunk(p);
3129   MALLOC_UNLOCK;
3130   return chunk2mem(p);
3131 
3132 }
3133 
3134 #endif /* DEFINE_MEMALIGN */
3135 
3136 #ifdef DEFINE_VALLOC
3137 
3138 /*
3139     valloc just invokes memalign with alignment argument equal
3140     to the page size of the system (or as near to this as can
3141     be figured out from all the includes/defines above.)
3142 */
3143 
3144 #if __STD_C
vALLOc(RARG size_t bytes)3145 Void_t* vALLOc(RARG size_t bytes)
3146 #else
3147 Void_t* vALLOc(RARG bytes) RDECL size_t bytes;
3148 #endif
3149 {
3150   return mEMALIGn (RCALL malloc_getpagesize, bytes);
3151 }
3152 
3153 #endif /* DEFINE_VALLOC */
3154 
3155 #ifdef DEFINE_PVALLOC
3156 
3157 /*
3158   pvalloc just invokes valloc for the nearest pagesize
3159   that will accommodate request
3160 */
3161 
3162 
3163 #if __STD_C
pvALLOc(RARG size_t bytes)3164 Void_t* pvALLOc(RARG size_t bytes)
3165 #else
3166 Void_t* pvALLOc(RARG bytes) RDECL size_t bytes;
3167 #endif
3168 {
3169   size_t pagesize = malloc_getpagesize;
3170   return mEMALIGn (RCALL pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
3171 }
3172 
3173 #endif /* DEFINE_PVALLOC */
3174 
3175 #ifdef DEFINE_CALLOC
3176 
3177 /*
3178 
3179   calloc calls malloc, then zeroes out the allocated chunk.
3180 
3181 */
3182 
3183 #if __STD_C
cALLOc(RARG size_t n,size_t elem_size)3184 Void_t* cALLOc(RARG size_t n, size_t elem_size)
3185 #else
3186 Void_t* cALLOc(RARG n, elem_size) RDECL size_t n; size_t elem_size;
3187 #endif
3188 {
3189   mchunkptr p;
3190   INTERNAL_SIZE_T csz;
3191 
3192   INTERNAL_SIZE_T sz = n * elem_size;
3193 
3194 #if MORECORE_CLEARS
3195   mchunkptr oldtop;
3196   INTERNAL_SIZE_T oldtopsize;
3197 #endif
3198   Void_t* mem;
3199 
3200   /* check if expand_top called, in which case don't need to clear */
3201 #if MORECORE_CLEARS
3202   MALLOC_LOCK;
3203   oldtop = top;
3204   oldtopsize = chunksize(top);
3205 #endif
3206 
3207   mem = mALLOc (RCALL sz);
3208 
3209   if (mem == 0)
3210   {
3211 #if MORECORE_CLEARS
3212     MALLOC_UNLOCK;
3213 #endif
3214     return 0;
3215   }
3216   else
3217   {
3218     p = mem2chunk(mem);
3219 
3220     /* Two optional cases in which clearing not necessary */
3221 
3222 
3223 #if HAVE_MMAP
3224     if (chunk_is_mmapped(p))
3225     {
3226 #if MORECORE_CLEARS
3227       MALLOC_UNLOCK;
3228 #endif
3229       return mem;
3230     }
3231 #endif
3232 
3233     csz = chunksize(p);
3234 
3235 #if MORECORE_CLEARS
3236     if (p == oldtop && csz > oldtopsize)
3237     {
3238       /* clear only the bytes from non-freshly-sbrked memory */
3239       csz = oldtopsize;
3240     }
3241     MALLOC_UNLOCK;
3242 #endif
3243 
3244     MALLOC_ZERO(mem, csz - SIZE_SZ);
3245     return mem;
3246   }
3247 }
3248 
3249 #endif /* DEFINE_CALLOC */
3250 
3251 #if defined(DEFINE_CFREE) && !defined(__CYGWIN__)
3252 
3253 /*
3254 
3255   cfree just calls free. It is needed/defined on some systems
3256   that pair it with calloc, presumably for odd historical reasons.
3257 
3258 */
3259 
3260 #if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
3261 #if !defined(INTERNAL_NEWLIB) || !defined(_REENT_ONLY)
3262 #if __STD_C
cfree(Void_t * mem)3263 void cfree(Void_t *mem)
3264 #else
3265 void cfree(mem) Void_t *mem;
3266 #endif
3267 {
3268 #ifdef INTERNAL_NEWLIB
3269   fREe(_REENT, mem);
3270 #else
3271   fREe(mem);
3272 #endif
3273 }
3274 #endif
3275 #endif
3276 
3277 #endif /* DEFINE_CFREE */
3278 
3279 #ifdef DEFINE_FREE
3280 
3281 /*
3282 
3283     Malloc_trim gives memory back to the system (via negative
3284     arguments to sbrk) if there is unused memory at the `high' end of
3285     the malloc pool. You can call this after freeing large blocks of
3286     memory to potentially reduce the system-level memory requirements
3287     of a program. However, it cannot guarantee to reduce memory. Under
3288     some allocation patterns, some large free blocks of memory will be
3289     locked between two used chunks, so they cannot be given back to
3290     the system.
3291 
3292     The `pad' argument to malloc_trim represents the amount of free
3293     trailing space to leave untrimmed. If this argument is zero,
3294     only the minimum amount of memory to maintain internal data
3295     structures will be left (one page or less). Non-zero arguments
3296     can be supplied to maintain enough trailing space to service
3297     future expected allocations without having to re-obtain memory
3298     from the system.
3299 
3300     Malloc_trim returns 1 if it actually released any memory, else 0.
3301 
3302 */
3303 
3304 #if __STD_C
malloc_trim(RARG size_t pad)3305 int malloc_trim(RARG size_t pad)
3306 #else
3307 int malloc_trim(RARG pad) RDECL size_t pad;
3308 #endif
3309 {
3310   long  top_size;        /* Amount of top-most memory */
3311   long  extra;           /* Amount to release */
3312   char* current_brk;     /* address returned by pre-check sbrk call */
3313   char* new_brk;         /* address returned by negative sbrk call */
3314 
3315   unsigned long pagesz = malloc_getpagesize;
3316 
3317   MALLOC_LOCK;
3318 
3319   top_size = chunksize(top);
3320   extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3321 
3322   if (extra < (long)pagesz)  /* Not enough memory to release */
3323   {
3324     MALLOC_UNLOCK;
3325     return 0;
3326   }
3327 
3328   else
3329   {
3330     /* Test to make sure no one else called sbrk */
3331     current_brk = (char*)(MORECORE (0));
3332     if (current_brk != (char*)(top) + top_size)
3333     {
3334       MALLOC_UNLOCK;
3335       return 0;     /* Apparently we don't own memory; must fail */
3336     }
3337 
3338     else
3339     {
3340       new_brk = (char*)(MORECORE (-extra));
3341 
3342       if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
3343       {
3344         /* Try to figure out what we have */
3345         current_brk = (char*)(MORECORE (0));
3346         top_size = current_brk - (char*)top;
3347         if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
3348         {
3349           sbrked_mem = current_brk - sbrk_base;
3350           set_head(top, top_size | PREV_INUSE);
3351         }
3352         check_chunk(top);
3353 	MALLOC_UNLOCK;
3354         return 0;
3355       }
3356 
3357       else
3358       {
3359         /* Success. Adjust top accordingly. */
3360         set_head(top, (top_size - extra) | PREV_INUSE);
3361         sbrked_mem -= extra;
3362         check_chunk(top);
3363 	MALLOC_UNLOCK;
3364         return 1;
3365       }
3366     }
3367   }
3368 }
3369 
3370 #endif /* DEFINE_FREE */
3371 
3372 #ifdef DEFINE_MALLOC_USABLE_SIZE
3373 
3374 /*
3375   malloc_usable_size:
3376 
3377     This routine tells you how many bytes you can actually use in an
3378     allocated chunk, which may be more than you requested (although
3379     often not). You can use this many bytes without worrying about
3380     overwriting other allocated objects. Not a particularly great
3381     programming practice, but still sometimes useful.
3382 
3383 */
3384 
3385 #if __STD_C
malloc_usable_size(RARG Void_t * mem)3386 size_t malloc_usable_size(RARG Void_t* mem)
3387 #else
3388 size_t malloc_usable_size(RARG mem) RDECL Void_t* mem;
3389 #endif
3390 {
3391   mchunkptr p;
3392   if (mem == 0)
3393     return 0;
3394   else
3395   {
3396     p = mem2chunk(mem);
3397     if(!chunk_is_mmapped(p))
3398     {
3399       if (!inuse(p)) return 0;
3400 #if DEBUG
3401       MALLOC_LOCK;
3402       check_inuse_chunk(p);
3403       MALLOC_UNLOCK;
3404 #endif
3405       return chunksize(p) - SIZE_SZ;
3406     }
3407     return chunksize(p) - 2*SIZE_SZ;
3408   }
3409 }
3410 
3411 #endif /* DEFINE_MALLOC_USABLE_SIZE */
3412 
3413 #ifdef DEFINE_MALLINFO
3414 
3415 /* Utility to update current_mallinfo for malloc_stats and mallinfo() */
3416 
malloc_update_mallinfo()3417 STATIC void malloc_update_mallinfo()
3418 {
3419   int i;
3420   mbinptr b;
3421   mchunkptr p;
3422 #if DEBUG
3423   mchunkptr q;
3424 #endif
3425 
3426   INTERNAL_SIZE_T avail = chunksize(top);
3427   int   navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
3428 
3429   for (i = 1; i < NAV; ++i)
3430   {
3431     b = bin_at(i);
3432     for (p = last(b); p != b; p = p->bk)
3433     {
3434 #if DEBUG
3435       check_free_chunk(p);
3436       for (q = next_chunk(p);
3437            q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
3438            q = next_chunk(q))
3439         check_inuse_chunk(q);
3440 #endif
3441       avail += chunksize(p);
3442       navail++;
3443     }
3444   }
3445 
3446   current_mallinfo.ordblks = navail;
3447   current_mallinfo.uordblks = sbrked_mem - avail;
3448   current_mallinfo.fordblks = avail;
3449 #if HAVE_MMAP
3450   current_mallinfo.hblks = n_mmaps;
3451   current_mallinfo.hblkhd = mmapped_mem;
3452 #endif
3453   current_mallinfo.keepcost = chunksize(top);
3454 
3455 }
3456 
3457 #else /* ! DEFINE_MALLINFO */
3458 
3459 #if __STD_C
3460 extern void malloc_update_mallinfo(void);
3461 #else
3462 extern void malloc_update_mallinfo();
3463 #endif
3464 
3465 #endif /* ! DEFINE_MALLINFO */
3466 
3467 #ifdef DEFINE_MALLOC_STATS
3468 
3469 /*
3470 
3471   malloc_stats:
3472 
3473     Prints on stderr the amount of space obtain from the system (both
3474     via sbrk and mmap), the maximum amount (which may be more than
3475     current if malloc_trim and/or munmap got called), the maximum
3476     number of simultaneous mmap regions used, and the current number
3477     of bytes allocated via malloc (or realloc, etc) but not yet
3478     freed. (Note that this is the number of bytes allocated, not the
3479     number requested. It will be larger than the number requested
3480     because of alignment and bookkeeping overhead.)
3481 
3482 */
3483 
3484 #if __STD_C
malloc_stats(RONEARG)3485 void malloc_stats(RONEARG)
3486 #else
3487 void malloc_stats(RONEARG) RDECL
3488 #endif
3489 {
3490   unsigned long local_max_total_mem;
3491   int local_sbrked_mem;
3492   struct mallinfo local_mallinfo;
3493 #if HAVE_MMAP
3494   unsigned long local_mmapped_mem, local_max_n_mmaps;
3495 #endif
3496   FILE *fp;
3497 
3498   MALLOC_LOCK;
3499   malloc_update_mallinfo();
3500   local_max_total_mem = max_total_mem;
3501   local_sbrked_mem = sbrked_mem;
3502   local_mallinfo = current_mallinfo;
3503 #if HAVE_MMAP
3504   local_mmapped_mem = mmapped_mem;
3505   local_max_n_mmaps = max_n_mmaps;
3506 #endif
3507   MALLOC_UNLOCK;
3508 
3509 #ifdef INTERNAL_NEWLIB
3510   _REENT_SMALL_CHECK_INIT(reent_ptr);
3511   fp = _stderr_r(reent_ptr);
3512 #define fprintf fiprintf
3513 #else
3514   fp = stderr;
3515 #endif
3516 
3517   fprintf(fp, "max system bytes = %10u\n",
3518 	  (unsigned int)(local_max_total_mem));
3519 #if HAVE_MMAP
3520   fprintf(fp, "system bytes     = %10u\n",
3521 	  (unsigned int)(local_sbrked_mem + local_mmapped_mem));
3522   fprintf(fp, "in use bytes     = %10u\n",
3523 	  (unsigned int)(local_mallinfo.uordblks + local_mmapped_mem));
3524 #else
3525   fprintf(fp, "system bytes     = %10u\n",
3526 	  (unsigned int)local_sbrked_mem);
3527   fprintf(fp, "in use bytes     = %10u\n",
3528 	  (unsigned int)local_mallinfo.uordblks);
3529 #endif
3530 #if HAVE_MMAP
3531   fprintf(fp, "max mmap regions = %10u\n",
3532 	  (unsigned int)local_max_n_mmaps);
3533 #endif
3534 }
3535 
3536 #endif /* DEFINE_MALLOC_STATS */
3537 
3538 #ifdef DEFINE_MALLINFO
3539 
3540 /*
3541   mallinfo returns a copy of updated current mallinfo.
3542 */
3543 
3544 #if __STD_C
mALLINFo(RONEARG)3545 struct mallinfo mALLINFo(RONEARG)
3546 #else
3547 struct mallinfo mALLINFo(RONEARG) RDECL
3548 #endif
3549 {
3550   struct mallinfo ret;
3551 
3552   MALLOC_LOCK;
3553   malloc_update_mallinfo();
3554   ret = current_mallinfo;
3555   MALLOC_UNLOCK;
3556   return ret;
3557 }
3558 
3559 #endif /* DEFINE_MALLINFO */
3560 
3561 #ifdef DEFINE_MALLOPT
3562 
3563 /*
3564   mallopt:
3565 
3566     mallopt is the general SVID/XPG interface to tunable parameters.
3567     The format is to provide a (parameter-number, parameter-value) pair.
3568     mallopt then sets the corresponding parameter to the argument
3569     value if it can (i.e., so long as the value is meaningful),
3570     and returns 1 if successful else 0.
3571 
3572     See descriptions of tunable parameters above.
3573 
3574 */
3575 
3576 #if __STD_C
mALLOPt(RARG int param_number,int value)3577 int mALLOPt(RARG int param_number, int value)
3578 #else
3579 int mALLOPt(RARG param_number, value) RDECL int param_number; int value;
3580 #endif
3581 {
3582   MALLOC_LOCK;
3583   switch(param_number)
3584   {
3585     case M_TRIM_THRESHOLD:
3586       trim_threshold = value; MALLOC_UNLOCK; return 1;
3587     case M_TOP_PAD:
3588       top_pad = value; MALLOC_UNLOCK; return 1;
3589     case M_MMAP_THRESHOLD:
3590 #if HAVE_MMAP
3591       mmap_threshold = value;
3592 #endif
3593       MALLOC_UNLOCK;
3594       return 1;
3595     case M_MMAP_MAX:
3596 #if HAVE_MMAP
3597       n_mmaps_max = value; MALLOC_UNLOCK; return 1;
3598 #else
3599       MALLOC_UNLOCK; return value == 0;
3600 #endif
3601 
3602     default:
3603       MALLOC_UNLOCK;
3604       return 0;
3605   }
3606 }
3607 
3608 #endif /* DEFINE_MALLOPT */
3609 
3610 /*
3611 
3612 History:
3613 
3614     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
3615       * Fixed ordering problem with boundary-stamping
3616 
3617     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
3618       * Added pvalloc, as recommended by H.J. Liu
3619       * Added 64bit pointer support mainly from Wolfram Gloger
3620       * Added anonymously donated WIN32 sbrk emulation
3621       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
3622       * malloc_extend_top: fix mask error that caused wastage after
3623         foreign sbrks
3624       * Add linux mremap support code from HJ Liu
3625 
3626     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
3627       * Integrated most documentation with the code.
3628       * Add support for mmap, with help from
3629         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
3630       * Use last_remainder in more cases.
3631       * Pack bins using idea from  colin@nyx10.cs.du.edu
3632       * Use ordered bins instead of best-fit threshhold
3633       * Eliminate block-local decls to simplify tracing and debugging.
3634       * Support another case of realloc via move into top
3635       * Fix error occuring when initial sbrk_base not word-aligned.
3636       * Rely on page size for units instead of SBRK_UNIT to
3637         avoid surprises about sbrk alignment conventions.
3638       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
3639         (raymond@es.ele.tue.nl) for the suggestion.
3640       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
3641       * More precautions for cases where other routines call sbrk,
3642         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
3643       * Added macros etc., allowing use in linux libc from
3644         H.J. Lu (hjl@gnu.ai.mit.edu)
3645       * Inverted this history list
3646 
3647     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
3648       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
3649       * Removed all preallocation code since under current scheme
3650         the work required to undo bad preallocations exceeds
3651         the work saved in good cases for most test programs.
3652       * No longer use return list or unconsolidated bins since
3653         no scheme using them consistently outperforms those that don't
3654         given above changes.
3655       * Use best fit for very large chunks to prevent some worst-cases.
3656       * Added some support for debugging
3657 
3658     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
3659       * Removed footers when chunks are in use. Thanks to
3660         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
3661 
3662     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
3663       * Added malloc_trim, with help from Wolfram Gloger
3664         (wmglo@Dent.MED.Uni-Muenchen.DE).
3665 
3666     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
3667 
3668     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
3669       * realloc: try to expand in both directions
3670       * malloc: swap order of clean-bin strategy;
3671       * realloc: only conditionally expand backwards
3672       * Try not to scavenge used bins
3673       * Use bin counts as a guide to preallocation
3674       * Occasionally bin return list chunks in first scan
3675       * Add a few optimizations from colin@nyx10.cs.du.edu
3676 
3677     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
3678       * faster bin computation & slightly different binning
3679       * merged all consolidations to one part of malloc proper
3680          (eliminating old malloc_find_space & malloc_clean_bin)
3681       * Scan 2 returns chunks (not just 1)
3682       * Propagate failure in realloc if malloc returns 0
3683       * Add stuff to allow compilation on non-ANSI compilers
3684           from kpv@research.att.com
3685 
3686     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
3687       * removed potential for odd address access in prev_chunk
3688       * removed dependency on getpagesize.h
3689       * misc cosmetics and a bit more internal documentation
3690       * anticosmetics: mangled names in macros to evade debugger strangeness
3691       * tested on sparc, hp-700, dec-mips, rs6000
3692           with gcc & native cc (hp, dec only) allowing
3693           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
3694 
3695     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
3696       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
3697          structure of old version,  but most details differ.)
3698 
3699 */
3700 #endif
3701