1 /* Malloc implementation for multiple threads without lock contention.
2    Copyright (C) 1996-2002, 2003, 2004 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4    Contributed by Wolfram Gloger <wg@malloc.de>
5    and Doug Lea <dl@cs.oswego.edu>, 2001.
6 
7    The GNU C Library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public License as
9    published by the Free Software Foundation; either version 2.1 of the
10    License, or (at your option) any later version.
11 
12    The GNU C Library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with the GNU C Library; see the file COPYING.LIB.  If not,
19    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20    Boston, MA 02111-1307, USA.  */
21 
22 /*
23   This is a version (aka ptmalloc2) of malloc/free/realloc written by
24   Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
25 
26 * Version ptmalloc2-20011215
27   $Id: malloc.c,v 1.20 2004/11/04 17:31:04 wg Exp $
28   based on:
29   VERSION 2.7.1pre1 Sat May 12 07:41:21 2001  Doug Lea  (dl at gee)
30 
31    Note: There may be an updated version of this malloc obtainable at
32            http://www.malloc.de/malloc/ptmalloc2.tar.gz
33          Check before installing!
34 
35 * Quickstart
36 
37   In order to compile this implementation, a Makefile is provided with
38   the ptmalloc2 distribution, which has pre-defined targets for some
39   popular systems (e.g. "make posix" for Posix threads).  All that is
40   typically required with regard to compiler flags is the selection of
41   an appropriate malloc-machine.h include file via -I directives.
42   Many/most systems will additionally require USE_TSD_DATA_HACK to be
43   defined, so this is the default for "make posix".
44 
45 * Why use this malloc?
46 
47   This is not the fastest, most space-conserving, most portable, or
48   most tunable malloc ever written. However it is among the fastest
49   while also being among the most space-conserving, portable and tunable.
50   Consistent balance across these factors results in a good general-purpose
51   allocator for malloc-intensive programs.
52 
53   The main properties of the algorithms are:
54   * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
55     with ties normally decided via FIFO (i.e. least recently used).
56   * For small (<= 64 bytes by default) requests, it is a caching
57     allocator, that maintains pools of quickly recycled chunks.
58   * In between, and for combinations of large and small requests, it does
59     the best it can trying to meet both goals at once.
60   * For very large requests (>= 128KB by default), it relies on system
61     memory mapping facilities, if supported.
62 
63   For a longer but slightly out of date high-level description, see
64      http://gee.cs.oswego.edu/dl/html/malloc.html
65 
66   You may already by default be using a C library containing a malloc
67   that is  based on some version of this malloc (for example in
68   linux). You might still want to use the one in this file in order to
69   customize settings or to avoid overheads associated with library
70   versions.
71 
72 * Contents, described in more detail in "description of public routines" below.
73 
74   Standard (ANSI/SVID/...)  functions:
75     malloc(size_t n);
76     calloc(size_t n_elements, size_t element_size);
77     free(Void_t* p);
78     realloc(Void_t* p, size_t n);
79     memalign(size_t alignment, size_t n);
80     valloc(size_t n);
81     mallinfo()
82     mallopt(int parameter_number, int parameter_value)
83 
84   Additional functions:
85     independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
86     independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
87     pvalloc(size_t n);
88     cfree(Void_t* p);
89     malloc_trim(size_t pad);
90     malloc_usable_size(Void_t* p);
91     malloc_stats();
92 
93 * Vital statistics:
94 
95   Supported pointer representation:       4 or 8 bytes
96   Supported size_t  representation:       4 or 8 bytes
97        Note that size_t is allowed to be 4 bytes even if pointers are 8.
98        You can adjust this by defining INTERNAL_SIZE_T
99 
100   Alignment:                              2 * sizeof(size_t) (default)
101        (i.e., 8 byte alignment with 4byte size_t). This suffices for
102        nearly all current machines and C compilers. However, you can
103        define MALLOC_ALIGNMENT to be wider than this if necessary.
104 
105   Minimum overhead per allocated chunk:   4 or 8 bytes
106        Each malloced chunk has a hidden word of overhead holding size
107        and status information.
108 
109   Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
110                           8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
111 
112        When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
113        ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
114        needed; 4 (8) for a trailing size field and 8 (16) bytes for
115        free list pointers. Thus, the minimum allocatable size is
116        16/24/32 bytes.
117 
118        Even a request for zero bytes (i.e., malloc(0)) returns a
119        pointer to something of the minimum allocatable size.
120 
121        The maximum overhead wastage (i.e., number of extra bytes
122        allocated than were requested in malloc) is less than or equal
123        to the minimum size, except for requests >= mmap_threshold that
124        are serviced via mmap(), where the worst case wastage is 2 *
125        sizeof(size_t) bytes plus the remainder from a system page (the
126        minimal mmap unit); typically 4096 or 8192 bytes.
127 
128   Maximum allocated size:  4-byte size_t: 2^32 minus about two pages
129                            8-byte size_t: 2^64 minus about two pages
130 
131        It is assumed that (possibly signed) size_t values suffice to
132        represent chunk sizes. `Possibly signed' is due to the fact
133        that `size_t' may be defined on a system as either a signed or
134        an unsigned type. The ISO C standard says that it must be
135        unsigned, but a few systems are known not to adhere to this.
136        Additionally, even when size_t is unsigned, sbrk (which is by
137        default used to obtain memory from system) accepts signed
138        arguments, and may not be able to handle size_t-wide arguments
139        with negative sign bit.  Generally, values that would
140        appear as negative after accounting for overhead and alignment
141        are supported only via mmap(), which does not have this
142        limitation.
143 
144        Requests for sizes outside the allowed range will perform an optional
145        failure action and then return null. (Requests may also
146        also fail because a system is out of memory.)
147 
148   Thread-safety: thread-safe unless NO_THREADS is defined
149 
150   Compliance: I believe it is compliant with the 1997 Single Unix Specification
151        (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
152        others as well.
153 
154 * Synopsis of compile-time options:
155 
156     People have reported using previous versions of this malloc on all
157     versions of Unix, sometimes by tweaking some of the defines
158     below. It has been tested most extensively on Solaris and
159     Linux. It is also reported to work on WIN32 platforms.
160     People also report using it in stand-alone embedded systems.
161 
162     The implementation is in straight, hand-tuned ANSI C.  It is not
163     at all modular. (Sorry!)  It uses a lot of macros.  To be at all
164     usable, this code should be compiled using an optimizing compiler
165     (for example gcc -O3) that can simplify expressions and control
166     paths. (FAQ: some macros import variables as arguments rather than
167     declare locals because people reported that some debuggers
168     otherwise get confused.)
169 
170     OPTION                     DEFAULT VALUE
171 
172     Compilation Environment options:
173 
174     __STD_C                    derived from C compiler defines
175     WIN32                      NOT defined
176     HAVE_MEMCPY                defined
177     USE_MEMCPY                 1 if HAVE_MEMCPY is defined
178     HAVE_MMAP                  defined as 1
179     MMAP_CLEARS                1
180     HAVE_MREMAP                0 unless linux defined
181     USE_ARENAS                 the same as HAVE_MMAP
182     USE_STARTER                1
183     malloc_getpagesize         derived from system #includes, or 4096 if not
184     HAVE_USR_INCLUDE_MALLOC_H  NOT defined
185     LACKS_UNISTD_H             NOT defined unless WIN32
186     LACKS_SYS_PARAM_H          NOT defined unless WIN32
187     LACKS_SYS_MMAN_H           NOT defined unless WIN32
188 
189     Changing default word sizes:
190 
191     INTERNAL_SIZE_T            size_t
192     MALLOC_ALIGNMENT           2 * sizeof(INTERNAL_SIZE_T)
193 
194     Configuration and functionality options:
195 
196     USE_DL_PREFIX              NOT defined
197     USE_PUBLIC_MALLOC_WRAPPERS NOT defined
198     USE_MALLOC_LOCK            NOT defined
199     MALLOC_DEBUG               NOT defined
200     REALLOC_ZERO_BYTES_FREES   1
201     MALLOC_FAILURE_ACTION      errno = ENOMEM, if __STD_C defined, else no-op
202     TRIM_FASTBINS              0
203     FIRST_SORTED_BIN_SIZE      512
204 
205     Options for customizing MORECORE:
206 
207     MORECORE                   sbrk
208     MORECORE_FAILURE           -1
209     MORECORE_CONTIGUOUS        1
210     MORECORE_CANNOT_TRIM       NOT defined
211     MORECORE_CLEARS            1
212     MMAP_AS_MORECORE_SIZE      (1024 * 1024)
213 
214     Tuning options that are also dynamically changeable via mallopt:
215 
216     DEFAULT_MXFAST             64
217     DEFAULT_TRIM_THRESHOLD     128 * 1024
218     DEFAULT_TOP_PAD            0
219     DEFAULT_MMAP_THRESHOLD     128 * 1024
220     DEFAULT_MMAP_MAX           65536
221 
222     There are several other #defined constants and macros that you
223     probably don't want to touch unless you are extending or adapting malloc.  */
224 
225 /*
226   __STD_C should be nonzero if using ANSI-standard C compiler, a C++
227   compiler, or a C compiler sufficiently close to ANSI to get away
228   with it.
229 */
230 
231 #ifndef __STD_C
232 #if defined(__STDC__) || defined(__cplusplus)
233 #define __STD_C     1
234 #else
235 #define __STD_C     0
236 #endif
237 #endif /*__STD_C*/
238 
239 
240 /*
241   Void_t* is the pointer type that malloc should say it returns
242 */
243 
244 #ifndef Void_t
245 #if (__STD_C || defined(WIN32))
246 #define Void_t      void
247 #else
248 #define Void_t      char
249 #endif
250 #endif /*Void_t*/
251 
252 #if __STD_C
253 #include <stddef.h>   /* for size_t */
254 #include <stdlib.h>   /* for getenv(), abort() */
255 #else
256 #include <sys/types.h>
257 #endif
258 
259 #include <malloc-machine.h>
260 
261 #ifdef __cplusplus
262 extern "C" {
263 #endif
264 
265 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
266 
267 /* #define  LACKS_UNISTD_H */
268 
269 #ifndef LACKS_UNISTD_H
270 #include <unistd.h>
271 #endif
272 
273 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
274 
275 /* #define  LACKS_SYS_PARAM_H */
276 
277 
278 #include <stdio.h>    /* needed for malloc_stats */
279 #include <errno.h>    /* needed for optional MALLOC_FAILURE_ACTION */
280 
281 
282 /*
283   Debugging:
284 
285   Because freed chunks may be overwritten with bookkeeping fields, this
286   malloc will often die when freed memory is overwritten by user
287   programs.  This can be very effective (albeit in an annoying way)
288   in helping track down dangling pointers.
289 
290   If you compile with -DMALLOC_DEBUG, a number of assertion checks are
291   enabled that will catch more memory errors. You probably won't be
292   able to make much sense of the actual assertion errors, but they
293   should help you locate incorrectly overwritten memory.  The checking
294   is fairly extensive, and will slow down execution
295   noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
296   will attempt to check every non-mmapped allocated and free chunk in
297   the course of computing the summmaries. (By nature, mmapped regions
298   cannot be checked very much automatically.)
299 
300   Setting MALLOC_DEBUG may also be helpful if you are trying to modify
301   this code. The assertions in the check routines spell out in more
302   detail the assumptions and invariants underlying the algorithms.
303 
304   Setting MALLOC_DEBUG does NOT provide an automated mechanism for
305   checking that all accesses to malloced memory stay within their
306   bounds. However, there are several add-ons and adaptations of this
307   or other mallocs available that do this.
308 */
309 
310 #if MALLOC_DEBUG
311 #include <assert.h>
312 #else
313 #undef assert
314 #define assert(x) ((void)0)
315 #endif
316 
317 
318 /*
319   INTERNAL_SIZE_T is the word-size used for internal bookkeeping
320   of chunk sizes.
321 
322   The default version is the same as size_t.
323 
324   While not strictly necessary, it is best to define this as an
325   unsigned type, even if size_t is a signed type. This may avoid some
326   artificial size limitations on some systems.
327 
328   On a 64-bit machine, you may be able to reduce malloc overhead by
329   defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
330   expense of not being able to handle more than 2^32 of malloced
331   space. If this limitation is acceptable, you are encouraged to set
332   this unless you are on a platform requiring 16byte alignments. In
333   this case the alignment requirements turn out to negate any
334   potential advantages of decreasing size_t word size.
335 
336   Implementors: Beware of the possible combinations of:
337      - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
338        and might be the same width as int or as long
339      - size_t might have different width and signedness as INTERNAL_SIZE_T
340      - int and long might be 32 or 64 bits, and might be the same width
341   To deal with this, most comparisons and difference computations
342   among INTERNAL_SIZE_Ts should cast them to unsigned long, being
343   aware of the fact that casting an unsigned int to a wider long does
344   not sign-extend. (This also makes checking for negative numbers
345   awkward.) Some of these casts result in harmless compiler warnings
346   on some systems.
347 */
348 
349 #ifndef INTERNAL_SIZE_T
350 #define INTERNAL_SIZE_T size_t
351 #endif
352 
353 /* The corresponding word size */
354 #define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
355 
356 
357 /*
358   MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
359   It must be a power of two at least 2 * SIZE_SZ, even on machines
360   for which smaller alignments would suffice. It may be defined as
361   larger than this though. Note however that code and data structures
362   are optimized for the case of 8-byte alignment.
363 */
364 
365 
366 #ifndef MALLOC_ALIGNMENT
367 #define MALLOC_ALIGNMENT       (2 * SIZE_SZ)
368 #endif
369 
370 /* The corresponding bit mask value */
371 #define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
372 
373 
374 
375 /*
376   REALLOC_ZERO_BYTES_FREES should be set if a call to
377   realloc with zero bytes should be the same as a call to free.
378   This is required by the C standard. Otherwise, since this malloc
379   returns a unique pointer for malloc(0), so does realloc(p, 0).
380 */
381 
382 #ifndef REALLOC_ZERO_BYTES_FREES
383 #define REALLOC_ZERO_BYTES_FREES 1
384 #endif
385 
386 /*
387   TRIM_FASTBINS controls whether free() of a very small chunk can
388   immediately lead to trimming. Setting to true (1) can reduce memory
389   footprint, but will almost always slow down programs that use a lot
390   of small chunks.
391 
392   Define this only if you are willing to give up some speed to more
393   aggressively reduce system-level memory footprint when releasing
394   memory in programs that use many small chunks.  You can get
395   essentially the same effect by setting MXFAST to 0, but this can
396   lead to even greater slowdowns in programs using many small chunks.
397   TRIM_FASTBINS is an in-between compile-time option, that disables
398   only those chunks bordering topmost memory from being placed in
399   fastbins.
400 */
401 
402 #ifndef TRIM_FASTBINS
403 #define TRIM_FASTBINS  0
404 #endif
405 
406 
407 /*
408   USE_DL_PREFIX will prefix all public routines with the string 'dl'.
409   This is necessary when you only want to use this malloc in one part
410   of a program, using your regular system malloc elsewhere.
411 */
412 
413 /* #define USE_DL_PREFIX */
414 
415 
416 /*
417    Two-phase name translation.
418    All of the actual routines are given mangled names.
419    When wrappers are used, they become the public callable versions.
420    When DL_PREFIX is used, the callable names are prefixed.
421 */
422 
423 #ifdef USE_DL_PREFIX
424 #define public_cALLOc    dlcalloc
425 #define public_fREe      dlfree
426 #define public_cFREe     dlcfree
427 #define public_mALLOc    dlmalloc
428 #define public_mEMALIGn  dlmemalign
429 #define public_rEALLOc   dlrealloc
430 #define public_vALLOc    dlvalloc
431 #define public_pVALLOc   dlpvalloc
432 #define public_mALLINFo  dlmallinfo
433 #define public_mALLOPt   dlmallopt
434 #define public_mTRIm     dlmalloc_trim
435 #define public_mSTATs    dlmalloc_stats
436 #define public_mUSABLe   dlmalloc_usable_size
437 #define public_iCALLOc   dlindependent_calloc
438 #define public_iCOMALLOc dlindependent_comalloc
439 #define public_gET_STATe dlget_state
440 #define public_sET_STATe dlset_state
441 #else /* USE_DL_PREFIX */
442 #ifdef _LIBC
443 
444 /* Special defines for the GNU C library.  */
445 #define public_cALLOc    __libc_calloc
446 #define public_fREe      __libc_free
447 #define public_cFREe     __libc_cfree
448 #define public_mALLOc    __libc_malloc
449 #define public_mEMALIGn  __libc_memalign
450 #define public_rEALLOc   __libc_realloc
451 #define public_vALLOc    __libc_valloc
452 #define public_pVALLOc   __libc_pvalloc
453 #define public_mALLINFo  __libc_mallinfo
454 #define public_mALLOPt   __libc_mallopt
455 #define public_mTRIm     __malloc_trim
456 #define public_mSTATs    __malloc_stats
457 #define public_mUSABLe   __malloc_usable_size
458 #define public_iCALLOc   __libc_independent_calloc
459 #define public_iCOMALLOc __libc_independent_comalloc
460 #define public_gET_STATe __malloc_get_state
461 #define public_sET_STATe __malloc_set_state
462 #define malloc_getpagesize __getpagesize()
463 #define open             __open
464 #define mmap             __mmap
465 #define munmap           __munmap
466 #define mremap           __mremap
467 #define mprotect         __mprotect
468 #define MORECORE         (*__morecore)
469 #define MORECORE_FAILURE 0
470 
471 Void_t * __default_morecore (ptrdiff_t);
472 Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
473 
474 #else /* !_LIBC */
475 #define public_cALLOc    calloc
476 #define public_fREe      free
477 #define public_cFREe     cfree
478 #define public_mALLOc    malloc
479 #define public_mEMALIGn  memalign
480 #define public_rEALLOc   realloc
481 #define public_vALLOc    valloc
482 #define public_pVALLOc   pvalloc
483 #define public_mALLINFo  mallinfo
484 #define public_mALLOPt   mallopt
485 #define public_mTRIm     malloc_trim
486 #define public_mSTATs    malloc_stats
487 #define public_mUSABLe   malloc_usable_size
488 #define public_iCALLOc   independent_calloc
489 #define public_iCOMALLOc independent_comalloc
490 #define public_gET_STATe malloc_get_state
491 #define public_sET_STATe malloc_set_state
492 #endif /* _LIBC */
493 #endif /* USE_DL_PREFIX */
494 
495 
496 #if !defined _LIBC && (!defined __GNUC__ || __GNUC__<3)
497 #define __builtin_expect(expr, val) (expr)
498 #endif
499 
500 /*
501   HAVE_MEMCPY should be defined if you are not otherwise using
502   ANSI STD C, but still have memcpy and memset in your C library
503   and want to use them in calloc and realloc. Otherwise simple
504   macro versions are defined below.
505 
506   USE_MEMCPY should be defined as 1 if you actually want to
507   have memset and memcpy called. People report that the macro
508   versions are faster than libc versions on some systems.
509 
510   Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
511   (of <= 36 bytes) are manually unrolled in realloc and calloc.
512 */
513 
514 #define HAVE_MEMCPY
515 
516 #ifndef USE_MEMCPY
517 #ifdef HAVE_MEMCPY
518 #define USE_MEMCPY 1
519 #else
520 #define USE_MEMCPY 0
521 #endif
522 #endif
523 
524 
525 #if (__STD_C || defined(HAVE_MEMCPY))
526 
527 #ifdef _LIBC
528 # include <string.h>
529 #else
530 #ifdef WIN32
531 /* On Win32 memset and memcpy are already declared in windows.h */
532 #else
533 #if __STD_C
534 void* memset(void*, int, size_t);
535 void* memcpy(void*, const void*, size_t);
536 #else
537 Void_t* memset();
538 Void_t* memcpy();
539 #endif
540 #endif
541 #endif
542 #endif
543 
544 /*
545   MALLOC_FAILURE_ACTION is the action to take before "return 0" when
546   malloc fails to be able to return memory, either because memory is
547   exhausted or because of illegal arguments.
548 
549   By default, sets errno if running on STD_C platform, else does nothing.
550 */
551 
552 #ifndef MALLOC_FAILURE_ACTION
553 #if __STD_C
554 #define MALLOC_FAILURE_ACTION \
555    errno = ENOMEM;
556 
557 #else
558 #define MALLOC_FAILURE_ACTION
559 #endif
560 #endif
561 
562 /*
563   MORECORE-related declarations. By default, rely on sbrk
564 */
565 
566 
567 #ifdef LACKS_UNISTD_H
568 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
569 #if __STD_C
570 extern Void_t*     sbrk(ptrdiff_t);
571 #else
572 extern Void_t*     sbrk();
573 #endif
574 #endif
575 #endif
576 
577 /*
578   MORECORE is the name of the routine to call to obtain more memory
579   from the system.  See below for general guidance on writing
580   alternative MORECORE functions, as well as a version for WIN32 and a
581   sample version for pre-OSX macos.
582 */
583 
584 #ifndef MORECORE
585 #define MORECORE sbrk
586 #endif
587 
588 /*
589   MORECORE_FAILURE is the value returned upon failure of MORECORE
590   as well as mmap. Since it cannot be an otherwise valid memory address,
591   and must reflect values of standard sys calls, you probably ought not
592   try to redefine it.
593 */
594 
595 #ifndef MORECORE_FAILURE
596 #define MORECORE_FAILURE (-1)
597 #endif
598 
599 /*
600   If MORECORE_CONTIGUOUS is true, take advantage of fact that
601   consecutive calls to MORECORE with positive arguments always return
602   contiguous increasing addresses.  This is true of unix sbrk.  Even
603   if not defined, when regions happen to be contiguous, malloc will
604   permit allocations spanning regions obtained from different
605   calls. But defining this when applicable enables some stronger
606   consistency checks and space efficiencies.
607 */
608 
609 #ifndef MORECORE_CONTIGUOUS
610 #define MORECORE_CONTIGUOUS 1
611 #endif
612 
613 /*
614   Define MORECORE_CANNOT_TRIM if your version of MORECORE
615   cannot release space back to the system when given negative
616   arguments. This is generally necessary only if you are using
617   a hand-crafted MORECORE function that cannot handle negative arguments.
618 */
619 
620 /* #define MORECORE_CANNOT_TRIM */
621 
622 /*  MORECORE_CLEARS           (default 1)
623      The degree to which the routine mapped to MORECORE zeroes out
624      memory: never (0), only for newly allocated space (1) or always
625      (2).  The distinction between (1) and (2) is necessary because on
626      some systems, if the application first decrements and then
627      increments the break value, the contents of the reallocated space
628      are unspecified.
629 */
630 
631 #ifndef MORECORE_CLEARS
632 #define MORECORE_CLEARS 1
633 #endif
634 
635 
636 /*
637   Define HAVE_MMAP as true to optionally make malloc() use mmap() to
638   allocate very large blocks.  These will be returned to the
639   operating system immediately after a free(). Also, if mmap
640   is available, it is used as a backup strategy in cases where
641   MORECORE fails to provide space from system.
642 
643   This malloc is best tuned to work with mmap for large requests.
644   If you do not have mmap, operations involving very large chunks (1MB
645   or so) may be slower than you'd like.
646 */
647 
648 #ifndef HAVE_MMAP
649 #define HAVE_MMAP 1
650 
651 /*
652    Standard unix mmap using /dev/zero clears memory so calloc doesn't
653    need to.
654 */
655 
656 #ifndef MMAP_CLEARS
657 #define MMAP_CLEARS 1
658 #endif
659 
660 #else /* no mmap */
661 #ifndef MMAP_CLEARS
662 #define MMAP_CLEARS 0
663 #endif
664 #endif
665 
666 
667 /*
668    MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
669    sbrk fails, and mmap is used as a backup (which is done only if
670    HAVE_MMAP).  The value must be a multiple of page size.  This
671    backup strategy generally applies only when systems have "holes" in
672    address space, so sbrk cannot perform contiguous expansion, but
673    there is still space available on system.  On systems for which
674    this is known to be useful (i.e. most linux kernels), this occurs
675    only when programs allocate huge amounts of memory.  Between this,
676    and the fact that mmap regions tend to be limited, the size should
677    be large, to avoid too many mmap calls and thus avoid running out
678    of kernel resources.
679 */
680 
681 #ifndef MMAP_AS_MORECORE_SIZE
682 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
683 #endif
684 
685 /*
686   Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
687   large blocks.  This is currently only possible on Linux with
688   kernel versions newer than 1.3.77.
689 */
690 
691 #ifndef HAVE_MREMAP
692 #ifdef linux
693 #define HAVE_MREMAP 1
694 #else
695 #define HAVE_MREMAP 0
696 #endif
697 
698 #endif /* HAVE_MMAP */
699 
700 /* Define USE_ARENAS to enable support for multiple `arenas'.  These
701    are allocated using mmap(), are necessary for threads and
702    occasionally useful to overcome address space limitations affecting
703    sbrk(). */
704 
705 #ifndef USE_ARENAS
706 #define USE_ARENAS HAVE_MMAP
707 #endif
708 
709 /* USE_STARTER determines if and when the special "starter" hook
710    functions are used: not at all (0), during ptmalloc_init (first bit
711    set), or from the beginning until an explicit call to ptmalloc_init
712    (second bit set).  This is necessary if thread-related
713    initialization functions (e.g.  pthread_key_create) require
714    malloc() calls (set USE_STARTER=1), or if those functions initially
715    cannot be used at all (set USE_STARTER=2 and perform an explicit
716    ptmalloc_init() when the thread library is ready, typically at the
717    start of main()). */
718 
719 #ifndef USE_STARTER
720 # ifndef _LIBC
721 #  define USE_STARTER 1
722 # else
723 #  if USE___THREAD || (defined USE_TLS && !defined SHARED)
724     /* These routines are never needed in this configuration.  */
725 #   define USE_STARTER 0
726 #  else
727 #   define USE_STARTER (USE_TLS ? 4 : 1)
728 #  endif
729 # endif
730 #endif
731 
732 
733 /*
734   The system page size. To the extent possible, this malloc manages
735   memory from the system in page-size units.  Note that this value is
736   cached during initialization into a field of malloc_state. So even
737   if malloc_getpagesize is a function, it is only called once.
738 
739   The following mechanics for getpagesize were adapted from bsd/gnu
740   getpagesize.h. If none of the system-probes here apply, a value of
741   4096 is used, which should be OK: If they don't apply, then using
742   the actual value probably doesn't impact performance.
743 */
744 
745 
746 #ifndef malloc_getpagesize
747 
748 #ifndef LACKS_UNISTD_H
749 #  include <unistd.h>
750 #endif
751 
752 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
753 #    ifndef _SC_PAGE_SIZE
754 #      define _SC_PAGE_SIZE _SC_PAGESIZE
755 #    endif
756 #  endif
757 
758 #  ifdef _SC_PAGE_SIZE
759 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
760 #  else
761 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
762        extern size_t getpagesize();
763 #      define malloc_getpagesize getpagesize()
764 #    else
765 #      ifdef WIN32 /* use supplied emulation of getpagesize */
766 #        define malloc_getpagesize getpagesize()
767 #      else
768 #        ifndef LACKS_SYS_PARAM_H
769 #          include <sys/param.h>
770 #        endif
771 #        ifdef EXEC_PAGESIZE
772 #          define malloc_getpagesize EXEC_PAGESIZE
773 #        else
774 #          ifdef NBPG
775 #            ifndef CLSIZE
776 #              define malloc_getpagesize NBPG
777 #            else
778 #              define malloc_getpagesize (NBPG * CLSIZE)
779 #            endif
780 #          else
781 #            ifdef NBPC
782 #              define malloc_getpagesize NBPC
783 #            else
784 #              ifdef PAGESIZE
785 #                define malloc_getpagesize PAGESIZE
786 #              else /* just guess */
787 #                define malloc_getpagesize (4096)
788 #              endif
789 #            endif
790 #          endif
791 #        endif
792 #      endif
793 #    endif
794 #  endif
795 #endif
796 
797 /*
798   This version of malloc supports the standard SVID/XPG mallinfo
799   routine that returns a struct containing usage properties and
800   statistics. It should work on any SVID/XPG compliant system that has
801   a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
802   install such a thing yourself, cut out the preliminary declarations
803   as described above and below and save them in a malloc.h file. But
804   there's no compelling reason to bother to do this.)
805 
806   The main declaration needed is the mallinfo struct that is returned
807   (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
808   bunch of fields that are not even meaningful in this version of
809   malloc.  These fields are are instead filled by mallinfo() with
810   other numbers that might be of interest.
811 
812   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
813   /usr/include/malloc.h file that includes a declaration of struct
814   mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
815   version is declared below.  These must be precisely the same for
816   mallinfo() to work.  The original SVID version of this struct,
817   defined on most systems with mallinfo, declares all fields as
818   ints. But some others define as unsigned long. If your system
819   defines the fields using a type of different width than listed here,
820   you must #include your system version and #define
821   HAVE_USR_INCLUDE_MALLOC_H.
822 */
823 
824 /* #define HAVE_USR_INCLUDE_MALLOC_H */
825 
826 #ifdef HAVE_USR_INCLUDE_MALLOC_H
827 #include "/usr/include/malloc.h"
828 #endif
829 
830 
831 /* ---------- description of public routines ------------ */
832 
833 /*
834   malloc(size_t n)
835   Returns a pointer to a newly allocated chunk of at least n bytes, or null
836   if no space is available. Additionally, on failure, errno is
837   set to ENOMEM on ANSI C systems.
838 
839   If n is zero, malloc returns a minumum-sized chunk. (The minimum
840   size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
841   systems.)  On most systems, size_t is an unsigned type, so calls
842   with negative arguments are interpreted as requests for huge amounts
843   of space, which will often fail. The maximum supported value of n
844   differs across systems, but is in all cases less than the maximum
845   representable value of a size_t.
846 */
847 #if __STD_C
848 Void_t*  public_mALLOc(size_t);
849 #else
850 Void_t*  public_mALLOc();
851 #endif
852 #ifdef libc_hidden_proto
853 libc_hidden_proto (public_mALLOc)
854 #endif
855 
856 /*
857   free(Void_t* p)
858   Releases the chunk of memory pointed to by p, that had been previously
859   allocated using malloc or a related routine such as realloc.
860   It has no effect if p is null. It can have arbitrary (i.e., bad!)
861   effects if p has already been freed.
862 
863   Unless disabled (using mallopt), freeing very large spaces will
864   when possible, automatically trigger operations that give
865   back unused memory to the system, thus reducing program footprint.
866 */
867 #if __STD_C
868 void     public_fREe(Void_t*);
869 #else
870 void     public_fREe();
871 #endif
872 #ifdef libc_hidden_proto
873 libc_hidden_proto (public_fREe)
874 #endif
875 
876 /*
877   calloc(size_t n_elements, size_t element_size);
878   Returns a pointer to n_elements * element_size bytes, with all locations
879   set to zero.
880 */
881 #if __STD_C
882 Void_t*  public_cALLOc(size_t, size_t);
883 #else
884 Void_t*  public_cALLOc();
885 #endif
886 
887 /*
888   realloc(Void_t* p, size_t n)
889   Returns a pointer to a chunk of size n that contains the same data
890   as does chunk p up to the minimum of (n, p's size) bytes, or null
891   if no space is available.
892 
893   The returned pointer may or may not be the same as p. The algorithm
894   prefers extending p when possible, otherwise it employs the
895   equivalent of a malloc-copy-free sequence.
896 
897   If p is null, realloc is equivalent to malloc.
898 
899   If space is not available, realloc returns null, errno is set (if on
900   ANSI) and p is NOT freed.
901 
902   if n is for fewer bytes than already held by p, the newly unused
903   space is lopped off and freed if possible.  Unless the #define
904   REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
905   zero (re)allocates a minimum-sized chunk.
906 
907   Large chunks that were internally obtained via mmap will always
908   be reallocated using malloc-copy-free sequences unless
909   the system supports MREMAP (currently only linux).
910 
911   The old unix realloc convention of allowing the last-free'd chunk
912   to be used as an argument to realloc is not supported.
913 */
914 #if __STD_C
915 Void_t*  public_rEALLOc(Void_t*, size_t);
916 #else
917 Void_t*  public_rEALLOc();
918 #endif
919 #ifdef libc_hidden_proto
920 libc_hidden_proto (public_rEALLOc)
921 #endif
922 
923 /*
924   memalign(size_t alignment, size_t n);
925   Returns a pointer to a newly allocated chunk of n bytes, aligned
926   in accord with the alignment argument.
927 
928   The alignment argument should be a power of two. If the argument is
929   not a power of two, the nearest greater power is used.
930   8-byte alignment is guaranteed by normal malloc calls, so don't
931   bother calling memalign with an argument of 8 or less.
932 
933   Overreliance on memalign is a sure way to fragment space.
934 */
935 #if __STD_C
936 Void_t*  public_mEMALIGn(size_t, size_t);
937 #else
938 Void_t*  public_mEMALIGn();
939 #endif
940 #ifdef libc_hidden_proto
941 libc_hidden_proto (public_mEMALIGn)
942 #endif
943 
944 /*
945   valloc(size_t n);
946   Equivalent to memalign(pagesize, n), where pagesize is the page
947   size of the system. If the pagesize is unknown, 4096 is used.
948 */
949 #if __STD_C
950 Void_t*  public_vALLOc(size_t);
951 #else
952 Void_t*  public_vALLOc();
953 #endif
954 
955 
956 
957 /*
958   mallopt(int parameter_number, int parameter_value)
959   Sets tunable parameters The format is to provide a
960   (parameter-number, parameter-value) pair.  mallopt then sets the
961   corresponding parameter to the argument value if it can (i.e., so
962   long as the value is meaningful), and returns 1 if successful else
963   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
964   normally defined in malloc.h.  Only one of these (M_MXFAST) is used
965   in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
966   so setting them has no effect. But this malloc also supports four
967   other options in mallopt. See below for details.  Briefly, supported
968   parameters are as follows (listed defaults are for "typical"
969   configurations).
970 
971   Symbol            param #   default    allowed param values
972   M_MXFAST          1         64         0-80  (0 disables fastbins)
973   M_TRIM_THRESHOLD -1         128*1024   any   (-1U disables trimming)
974   M_TOP_PAD        -2         0          any
975   M_MMAP_THRESHOLD -3         128*1024   any   (or 0 if no MMAP support)
976   M_MMAP_MAX       -4         65536      any   (0 disables use of mmap)
977 */
978 #if __STD_C
979 int      public_mALLOPt(int, int);
980 #else
981 int      public_mALLOPt();
982 #endif
983 
984 
985 /*
986   mallinfo()
987   Returns (by copy) a struct containing various summary statistics:
988 
989   arena:     current total non-mmapped bytes allocated from system
990   ordblks:   the number of free chunks
991   smblks:    the number of fastbin blocks (i.e., small chunks that
992                have been freed but not use resused or consolidated)
993   hblks:     current number of mmapped regions
994   hblkhd:    total bytes held in mmapped regions
995   usmblks:   the maximum total allocated space. This will be greater
996                 than current total if trimming has occurred.
997   fsmblks:   total bytes held in fastbin blocks
998   uordblks:  current total allocated space (normal or mmapped)
999   fordblks:  total free space
1000   keepcost:  the maximum number of bytes that could ideally be released
1001                back to system via malloc_trim. ("ideally" means that
1002                it ignores page restrictions etc.)
1003 
1004   Because these fields are ints, but internal bookkeeping may
1005   be kept as longs, the reported values may wrap around zero and
1006   thus be inaccurate.
1007 */
1008 #if __STD_C
1009 struct mallinfo public_mALLINFo(void);
1010 #else
1011 struct mallinfo public_mALLINFo();
1012 #endif
1013 
1014 /*
1015   independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1016 
1017   independent_calloc is similar to calloc, but instead of returning a
1018   single cleared space, it returns an array of pointers to n_elements
1019   independent elements that can hold contents of size elem_size, each
1020   of which starts out cleared, and can be independently freed,
1021   realloc'ed etc. The elements are guaranteed to be adjacently
1022   allocated (this is not guaranteed to occur with multiple callocs or
1023   mallocs), which may also improve cache locality in some
1024   applications.
1025 
1026   The "chunks" argument is optional (i.e., may be null, which is
1027   probably the most typical usage). If it is null, the returned array
1028   is itself dynamically allocated and should also be freed when it is
1029   no longer needed. Otherwise, the chunks array must be of at least
1030   n_elements in length. It is filled in with the pointers to the
1031   chunks.
1032 
1033   In either case, independent_calloc returns this pointer array, or
1034   null if the allocation failed.  If n_elements is zero and "chunks"
1035   is null, it returns a chunk representing an array with zero elements
1036   (which should be freed if not wanted).
1037 
1038   Each element must be individually freed when it is no longer
1039   needed. If you'd like to instead be able to free all at once, you
1040   should instead use regular calloc and assign pointers into this
1041   space to represent elements.  (In this case though, you cannot
1042   independently free elements.)
1043 
1044   independent_calloc simplifies and speeds up implementations of many
1045   kinds of pools.  It may also be useful when constructing large data
1046   structures that initially have a fixed number of fixed-sized nodes,
1047   but the number is not known at compile time, and some of the nodes
1048   may later need to be freed. For example:
1049 
1050   struct Node { int item; struct Node* next; };
1051 
1052   struct Node* build_list() {
1053     struct Node** pool;
1054     int n = read_number_of_nodes_needed();
1055     if (n <= 0) return 0;
1056     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1057     if (pool == 0) die();
1058     // organize into a linked list...
1059     struct Node* first = pool[0];
1060     for (i = 0; i < n-1; ++i)
1061       pool[i]->next = pool[i+1];
1062     free(pool);     // Can now free the array (or not, if it is needed later)
1063     return first;
1064   }
1065 */
1066 #if __STD_C
1067 Void_t** public_iCALLOc(size_t, size_t, Void_t**);
1068 #else
1069 Void_t** public_iCALLOc();
1070 #endif
1071 
1072 /*
1073   independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1074 
1075   independent_comalloc allocates, all at once, a set of n_elements
1076   chunks with sizes indicated in the "sizes" array.    It returns
1077   an array of pointers to these elements, each of which can be
1078   independently freed, realloc'ed etc. The elements are guaranteed to
1079   be adjacently allocated (this is not guaranteed to occur with
1080   multiple callocs or mallocs), which may also improve cache locality
1081   in some applications.
1082 
1083   The "chunks" argument is optional (i.e., may be null). If it is null
1084   the returned array is itself dynamically allocated and should also
1085   be freed when it is no longer needed. Otherwise, the chunks array
1086   must be of at least n_elements in length. It is filled in with the
1087   pointers to the chunks.
1088 
1089   In either case, independent_comalloc returns this pointer array, or
1090   null if the allocation failed.  If n_elements is zero and chunks is
1091   null, it returns a chunk representing an array with zero elements
1092   (which should be freed if not wanted).
1093 
1094   Each element must be individually freed when it is no longer
1095   needed. If you'd like to instead be able to free all at once, you
1096   should instead use a single regular malloc, and assign pointers at
1097   particular offsets in the aggregate space. (In this case though, you
1098   cannot independently free elements.)
1099 
1100   independent_comallac differs from independent_calloc in that each
1101   element may have a different size, and also that it does not
1102   automatically clear elements.
1103 
1104   independent_comalloc can be used to speed up allocation in cases
1105   where several structs or objects must always be allocated at the
1106   same time.  For example:
1107 
1108   struct Head { ... }
1109   struct Foot { ... }
1110 
1111   void send_message(char* msg) {
1112     int msglen = strlen(msg);
1113     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1114     void* chunks[3];
1115     if (independent_comalloc(3, sizes, chunks) == 0)
1116       die();
1117     struct Head* head = (struct Head*)(chunks[0]);
1118     char*        body = (char*)(chunks[1]);
1119     struct Foot* foot = (struct Foot*)(chunks[2]);
1120     // ...
1121   }
1122 
1123   In general though, independent_comalloc is worth using only for
1124   larger values of n_elements. For small values, you probably won't
1125   detect enough difference from series of malloc calls to bother.
1126 
1127   Overuse of independent_comalloc can increase overall memory usage,
1128   since it cannot reuse existing noncontiguous small chunks that
1129   might be available for some of the elements.
1130 */
1131 #if __STD_C
1132 Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
1133 #else
1134 Void_t** public_iCOMALLOc();
1135 #endif
1136 
1137 
1138 /*
1139   pvalloc(size_t n);
1140   Equivalent to valloc(minimum-page-that-holds(n)), that is,
1141   round up n to nearest pagesize.
1142  */
1143 #if __STD_C
1144 Void_t*  public_pVALLOc(size_t);
1145 #else
1146 Void_t*  public_pVALLOc();
1147 #endif
1148 
1149 /*
1150   cfree(Void_t* p);
1151   Equivalent to free(p).
1152 
1153   cfree is needed/defined on some systems that pair it with calloc,
1154   for odd historical reasons (such as: cfree is used in example
1155   code in the first edition of K&R).
1156 */
1157 #if __STD_C
1158 void     public_cFREe(Void_t*);
1159 #else
1160 void     public_cFREe();
1161 #endif
1162 
1163 /*
1164   malloc_trim(size_t pad);
1165 
1166   If possible, gives memory back to the system (via negative
1167   arguments to sbrk) if there is unused memory at the `high' end of
1168   the malloc pool. You can call this after freeing large blocks of
1169   memory to potentially reduce the system-level memory requirements
1170   of a program. However, it cannot guarantee to reduce memory. Under
1171   some allocation patterns, some large free blocks of memory will be
1172   locked between two used chunks, so they cannot be given back to
1173   the system.
1174 
1175   The `pad' argument to malloc_trim represents the amount of free
1176   trailing space to leave untrimmed. If this argument is zero,
1177   only the minimum amount of memory to maintain internal data
1178   structures will be left (one page or less). Non-zero arguments
1179   can be supplied to maintain enough trailing space to service
1180   future expected allocations without having to re-obtain memory
1181   from the system.
1182 
1183   Malloc_trim returns 1 if it actually released any memory, else 0.
1184   On systems that do not support "negative sbrks", it will always
1185   rreturn 0.
1186 */
1187 #if __STD_C
1188 int      public_mTRIm(size_t);
1189 #else
1190 int      public_mTRIm();
1191 #endif
1192 
1193 /*
1194   malloc_usable_size(Void_t* p);
1195 
1196   Returns the number of bytes you can actually use in
1197   an allocated chunk, which may be more than you requested (although
1198   often not) due to alignment and minimum size constraints.
1199   You can use this many bytes without worrying about
1200   overwriting other allocated objects. This is not a particularly great
1201   programming practice. malloc_usable_size can be more useful in
1202   debugging and assertions, for example:
1203 
1204   p = malloc(n);
1205   assert(malloc_usable_size(p) >= 256);
1206 
1207 */
1208 #if __STD_C
1209 size_t   public_mUSABLe(Void_t*);
1210 #else
1211 size_t   public_mUSABLe();
1212 #endif
1213 
1214 /*
1215   malloc_stats();
1216   Prints on stderr the amount of space obtained from the system (both
1217   via sbrk and mmap), the maximum amount (which may be more than
1218   current if malloc_trim and/or munmap got called), and the current
1219   number of bytes allocated via malloc (or realloc, etc) but not yet
1220   freed. Note that this is the number of bytes allocated, not the
1221   number requested. It will be larger than the number requested
1222   because of alignment and bookkeeping overhead. Because it includes
1223   alignment wastage as being in use, this figure may be greater than
1224   zero even when no user-level chunks are allocated.
1225 
1226   The reported current and maximum system memory can be inaccurate if
1227   a program makes other calls to system memory allocation functions
1228   (normally sbrk) outside of malloc.
1229 
1230   malloc_stats prints only the most commonly interesting statistics.
1231   More information can be obtained by calling mallinfo.
1232 
1233 */
1234 #if __STD_C
1235 void     public_mSTATs(void);
1236 #else
1237 void     public_mSTATs();
1238 #endif
1239 
1240 /*
1241   malloc_get_state(void);
1242 
1243   Returns the state of all malloc variables in an opaque data
1244   structure.
1245 */
1246 #if __STD_C
1247 Void_t*  public_gET_STATe(void);
1248 #else
1249 Void_t*  public_gET_STATe();
1250 #endif
1251 
1252 /*
1253   malloc_set_state(Void_t* state);
1254 
1255   Restore the state of all malloc variables from data obtained with
1256   malloc_get_state().
1257 */
1258 #if __STD_C
1259 int      public_sET_STATe(Void_t*);
1260 #else
1261 int      public_sET_STATe();
1262 #endif
1263 
1264 #ifdef _LIBC
1265 /*
1266   posix_memalign(void **memptr, size_t alignment, size_t size);
1267 
1268   POSIX wrapper like memalign(), checking for validity of size.
1269 */
1270 int      __posix_memalign(void **, size_t, size_t);
1271 #endif
1272 
1273 /* mallopt tuning options */
1274 
1275 /*
1276   M_MXFAST is the maximum request size used for "fastbins", special bins
1277   that hold returned chunks without consolidating their spaces. This
1278   enables future requests for chunks of the same size to be handled
1279   very quickly, but can increase fragmentation, and thus increase the
1280   overall memory footprint of a program.
1281 
1282   This malloc manages fastbins very conservatively yet still
1283   efficiently, so fragmentation is rarely a problem for values less
1284   than or equal to the default.  The maximum supported value of MXFAST
1285   is 80. You wouldn't want it any higher than this anyway.  Fastbins
1286   are designed especially for use with many small structs, objects or
1287   strings -- the default handles structs/objects/arrays with sizes up
1288   to 8 4byte fields, or small strings representing words, tokens,
1289   etc. Using fastbins for larger objects normally worsens
1290   fragmentation without improving speed.
1291 
1292   M_MXFAST is set in REQUEST size units. It is internally used in
1293   chunksize units, which adds padding and alignment.  You can reduce
1294   M_MXFAST to 0 to disable all use of fastbins.  This causes the malloc
1295   algorithm to be a closer approximation of fifo-best-fit in all cases,
1296   not just for larger requests, but will generally cause it to be
1297   slower.
1298 */
1299 
1300 
1301 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1302 #ifndef M_MXFAST
1303 #define M_MXFAST            1
1304 #endif
1305 
1306 #ifndef DEFAULT_MXFAST
1307 #define DEFAULT_MXFAST     64
1308 #endif
1309 
1310 
1311 /*
1312   M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1313   to keep before releasing via malloc_trim in free().
1314 
1315   Automatic trimming is mainly useful in long-lived programs.
1316   Because trimming via sbrk can be slow on some systems, and can
1317   sometimes be wasteful (in cases where programs immediately
1318   afterward allocate more large chunks) the value should be high
1319   enough so that your overall system performance would improve by
1320   releasing this much memory.
1321 
1322   The trim threshold and the mmap control parameters (see below)
1323   can be traded off with one another. Trimming and mmapping are
1324   two different ways of releasing unused memory back to the
1325   system. Between these two, it is often possible to keep
1326   system-level demands of a long-lived program down to a bare
1327   minimum. For example, in one test suite of sessions measuring
1328   the XF86 X server on Linux, using a trim threshold of 128K and a
1329   mmap threshold of 192K led to near-minimal long term resource
1330   consumption.
1331 
1332   If you are using this malloc in a long-lived program, it should
1333   pay to experiment with these values.  As a rough guide, you
1334   might set to a value close to the average size of a process
1335   (program) running on your system.  Releasing this much memory
1336   would allow such a process to run in memory.  Generally, it's
1337   worth it to tune for trimming rather tham memory mapping when a
1338   program undergoes phases where several large chunks are
1339   allocated and released in ways that can reuse each other's
1340   storage, perhaps mixed with phases where there are no such
1341   chunks at all.  And in well-behaved long-lived programs,
1342   controlling release of large blocks via trimming versus mapping
1343   is usually faster.
1344 
1345   However, in most programs, these parameters serve mainly as
1346   protection against the system-level effects of carrying around
1347   massive amounts of unneeded memory. Since frequent calls to
1348   sbrk, mmap, and munmap otherwise degrade performance, the default
1349   parameters are set to relatively high values that serve only as
1350   safeguards.
1351 
1352   The trim value It must be greater than page size to have any useful
1353   effect.  To disable trimming completely, you can set to
1354   (unsigned long)(-1)
1355 
1356   Trim settings interact with fastbin (MXFAST) settings: Unless
1357   TRIM_FASTBINS is defined, automatic trimming never takes place upon
1358   freeing a chunk with size less than or equal to MXFAST. Trimming is
1359   instead delayed until subsequent freeing of larger chunks. However,
1360   you can still force an attempted trim by calling malloc_trim.
1361 
1362   Also, trimming is not generally possible in cases where
1363   the main arena is obtained via mmap.
1364 
1365   Note that the trick some people use of mallocing a huge space and
1366   then freeing it at program startup, in an attempt to reserve system
1367   memory, doesn't have the intended effect under automatic trimming,
1368   since that memory will immediately be returned to the system.
1369 */
1370 
1371 #define M_TRIM_THRESHOLD       -1
1372 
1373 #ifndef DEFAULT_TRIM_THRESHOLD
1374 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1375 #endif
1376 
1377 /*
1378   M_TOP_PAD is the amount of extra `padding' space to allocate or
1379   retain whenever sbrk is called. It is used in two ways internally:
1380 
1381   * When sbrk is called to extend the top of the arena to satisfy
1382   a new malloc request, this much padding is added to the sbrk
1383   request.
1384 
1385   * When malloc_trim is called automatically from free(),
1386   it is used as the `pad' argument.
1387 
1388   In both cases, the actual amount of padding is rounded
1389   so that the end of the arena is always a system page boundary.
1390 
1391   The main reason for using padding is to avoid calling sbrk so
1392   often. Having even a small pad greatly reduces the likelihood
1393   that nearly every malloc request during program start-up (or
1394   after trimming) will invoke sbrk, which needlessly wastes
1395   time.
1396 
1397   Automatic rounding-up to page-size units is normally sufficient
1398   to avoid measurable overhead, so the default is 0.  However, in
1399   systems where sbrk is relatively slow, it can pay to increase
1400   this value, at the expense of carrying around more memory than
1401   the program needs.
1402 */
1403 
1404 #define M_TOP_PAD              -2
1405 
1406 #ifndef DEFAULT_TOP_PAD
1407 #define DEFAULT_TOP_PAD        (0)
1408 #endif
1409 
1410 /*
1411   M_MMAP_THRESHOLD is the request size threshold for using mmap()
1412   to service a request. Requests of at least this size that cannot
1413   be allocated using already-existing space will be serviced via mmap.
1414   (If enough normal freed space already exists it is used instead.)
1415 
1416   Using mmap segregates relatively large chunks of memory so that
1417   they can be individually obtained and released from the host
1418   system. A request serviced through mmap is never reused by any
1419   other request (at least not directly; the system may just so
1420   happen to remap successive requests to the same locations).
1421 
1422   Segregating space in this way has the benefits that:
1423 
1424    1. Mmapped space can ALWAYS be individually released back
1425       to the system, which helps keep the system level memory
1426       demands of a long-lived program low.
1427    2. Mapped memory can never become `locked' between
1428       other chunks, as can happen with normally allocated chunks, which
1429       means that even trimming via malloc_trim would not release them.
1430    3. On some systems with "holes" in address spaces, mmap can obtain
1431       memory that sbrk cannot.
1432 
1433   However, it has the disadvantages that:
1434 
1435    1. The space cannot be reclaimed, consolidated, and then
1436       used to service later requests, as happens with normal chunks.
1437    2. It can lead to more wastage because of mmap page alignment
1438       requirements
1439    3. It causes malloc performance to be more dependent on host
1440       system memory management support routines which may vary in
1441       implementation quality and may impose arbitrary
1442       limitations. Generally, servicing a request via normal
1443       malloc steps is faster than going through a system's mmap.
1444 
1445   The advantages of mmap nearly always outweigh disadvantages for
1446   "large" chunks, but the value of "large" varies across systems.  The
1447   default is an empirically derived value that works well in most
1448   systems.
1449 */
1450 
1451 #define M_MMAP_THRESHOLD      -3
1452 
1453 #ifndef DEFAULT_MMAP_THRESHOLD
1454 #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
1455 #endif
1456 
1457 /*
1458   M_MMAP_MAX is the maximum number of requests to simultaneously
1459   service using mmap. This parameter exists because
1460   some systems have a limited number of internal tables for
1461   use by mmap, and using more than a few of them may degrade
1462   performance.
1463 
1464   The default is set to a value that serves only as a safeguard.
1465   Setting to 0 disables use of mmap for servicing large requests.  If
1466   HAVE_MMAP is not set, the default value is 0, and attempts to set it
1467   to non-zero values in mallopt will fail.
1468 */
1469 
1470 #define M_MMAP_MAX             -4
1471 
1472 #ifndef DEFAULT_MMAP_MAX
1473 #if HAVE_MMAP
1474 #define DEFAULT_MMAP_MAX       (65536)
1475 #else
1476 #define DEFAULT_MMAP_MAX       (0)
1477 #endif
1478 #endif
1479 
1480 #ifdef __cplusplus
1481 } /* end of extern "C" */
1482 #endif
1483 
1484 #include "malloc.h"
1485 
1486 #ifndef BOUNDED_N
1487 #define BOUNDED_N(ptr, sz) (ptr)
1488 #endif
1489 #ifndef RETURN_ADDRESS
1490 #define RETURN_ADDRESS(X_) (NULL)
1491 #endif
1492 
1493 /* On some platforms we can compile internal, not exported functions better.
1494    Let the environment provide a macro and define it to be empty if it
1495    is not available.  */
1496 #ifndef internal_function
1497 # define internal_function
1498 #endif
1499 
1500 /* Forward declarations.  */
1501 struct malloc_chunk;
1502 typedef struct malloc_chunk* mchunkptr;
1503 
1504 /* Internal routines.  */
1505 
1506 #if __STD_C
1507 
1508 Void_t*         _int_malloc(mstate, size_t);
1509 void            _int_free(mstate, Void_t*);
1510 Void_t*         _int_realloc(mstate, Void_t*, size_t);
1511 Void_t*         _int_memalign(mstate, size_t, size_t);
1512 Void_t*         _int_valloc(mstate, size_t);
1513 static Void_t*  _int_pvalloc(mstate, size_t);
1514 /*static Void_t*  cALLOc(size_t, size_t);*/
1515 static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);
1516 static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);
1517 static int      mTRIm(size_t);
1518 static size_t   mUSABLe(Void_t*);
1519 static int      mALLOPt(int, int);
1520 
1521 static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
1522 static int internal_function top_check(void);
1523 static void internal_function munmap_chunk(mchunkptr p);
1524 #if HAVE_MREMAP
1525 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
1526 #endif
1527 
1528 static Void_t*   malloc_check(size_t sz, const Void_t *caller);
1529 static void      free_check(Void_t* mem, const Void_t *caller);
1530 static Void_t*   realloc_check(Void_t* oldmem, size_t bytes,
1531 			       const Void_t *caller);
1532 static Void_t*   memalign_check(size_t alignment, size_t bytes,
1533 				const Void_t *caller);
1534 #ifndef NO_THREADS
1535 # if USE_STARTER
1536 static Void_t*   malloc_starter(size_t sz, const Void_t *caller);
1537 static Void_t*   memalign_starter(size_t aln, size_t sz, const Void_t *caller);
1538 static void      free_starter(Void_t* mem, const Void_t *caller);
1539 # endif
1540 static Void_t*   malloc_atfork(size_t sz, const Void_t *caller);
1541 static void      free_atfork(Void_t* mem, const Void_t *caller);
1542 #endif
1543 
1544 #else
1545 
1546 Void_t*         _int_malloc();
1547 void            _int_free();
1548 Void_t*         _int_realloc();
1549 Void_t*         _int_memalign();
1550 Void_t*         _int_valloc();
1551 Void_t*         _int_pvalloc();
1552 /*static Void_t*  cALLOc();*/
1553 static Void_t** _int_icalloc();
1554 static Void_t** _int_icomalloc();
1555 static int      mTRIm();
1556 static size_t   mUSABLe();
1557 static int      mALLOPt();
1558 
1559 #endif
1560 
1561 
1562 
1563 
1564 /* ------------- Optional versions of memcopy ---------------- */
1565 
1566 
1567 #if USE_MEMCPY
1568 
1569 /*
1570   Note: memcpy is ONLY invoked with non-overlapping regions,
1571   so the (usually slower) memmove is not needed.
1572 */
1573 
1574 #define MALLOC_COPY(dest, src, nbytes)  memcpy(dest, src, nbytes)
1575 #define MALLOC_ZERO(dest, nbytes)       memset(dest, 0,   nbytes)
1576 
1577 #else /* !USE_MEMCPY */
1578 
1579 /* Use Duff's device for good zeroing/copying performance. */
1580 
1581 #define MALLOC_ZERO(charp, nbytes)                                            \
1582 do {                                                                          \
1583   INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
1584   unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                     \
1585   long mcn;                                                                   \
1586   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
1587   switch (mctmp) {                                                            \
1588     case 0: for(;;) { *mzp++ = 0;                                             \
1589     case 7:           *mzp++ = 0;                                             \
1590     case 6:           *mzp++ = 0;                                             \
1591     case 5:           *mzp++ = 0;                                             \
1592     case 4:           *mzp++ = 0;                                             \
1593     case 3:           *mzp++ = 0;                                             \
1594     case 2:           *mzp++ = 0;                                             \
1595     case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
1596   }                                                                           \
1597 } while(0)
1598 
1599 #define MALLOC_COPY(dest,src,nbytes)                                          \
1600 do {                                                                          \
1601   INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
1602   INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
1603   unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                     \
1604   long mcn;                                                                   \
1605   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
1606   switch (mctmp) {                                                            \
1607     case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
1608     case 7:           *mcdst++ = *mcsrc++;                                    \
1609     case 6:           *mcdst++ = *mcsrc++;                                    \
1610     case 5:           *mcdst++ = *mcsrc++;                                    \
1611     case 4:           *mcdst++ = *mcsrc++;                                    \
1612     case 3:           *mcdst++ = *mcsrc++;                                    \
1613     case 2:           *mcdst++ = *mcsrc++;                                    \
1614     case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
1615   }                                                                           \
1616 } while(0)
1617 
1618 #endif
1619 
1620 /* ------------------ MMAP support ------------------  */
1621 
1622 
1623 #if HAVE_MMAP
1624 
1625 #include <fcntl.h>
1626 #ifndef LACKS_SYS_MMAN_H
1627 #include <sys/mman.h>
1628 #endif
1629 
1630 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1631 # define MAP_ANONYMOUS MAP_ANON
1632 #endif
1633 #if !defined(MAP_FAILED)
1634 # define MAP_FAILED ((char*)-1)
1635 #endif
1636 
1637 #ifndef MAP_NORESERVE
1638 # ifdef MAP_AUTORESRV
1639 #  define MAP_NORESERVE MAP_AUTORESRV
1640 # else
1641 #  define MAP_NORESERVE 0
1642 # endif
1643 #endif
1644 
1645 /*
1646    Nearly all versions of mmap support MAP_ANONYMOUS,
1647    so the following is unlikely to be needed, but is
1648    supplied just in case.
1649 */
1650 
1651 #ifndef MAP_ANONYMOUS
1652 
1653 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1654 
1655 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1656  (dev_zero_fd = open("/dev/zero", O_RDWR), \
1657   mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1658    mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1659 
1660 #else
1661 
1662 #define MMAP(addr, size, prot, flags) \
1663  (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1664 
1665 #endif
1666 
1667 
1668 #endif /* HAVE_MMAP */
1669 
1670 
1671 /*
1672   -----------------------  Chunk representations -----------------------
1673 */
1674 
1675 
1676 /*
1677   This struct declaration is misleading (but accurate and necessary).
1678   It declares a "view" into memory allowing access to necessary
1679   fields at known offsets from a given base. See explanation below.
1680 */
1681 
1682 struct malloc_chunk {
1683 
1684   INTERNAL_SIZE_T      prev_size;  /* Size of previous chunk (if free).  */
1685   INTERNAL_SIZE_T      size;       /* Size in bytes, including overhead. */
1686 
1687   struct malloc_chunk* fd;         /* double links -- used only if free. */
1688   struct malloc_chunk* bk;
1689 };
1690 
1691 
1692 /*
1693    malloc_chunk details:
1694 
1695     (The following includes lightly edited explanations by Colin Plumb.)
1696 
1697     Chunks of memory are maintained using a `boundary tag' method as
1698     described in e.g., Knuth or Standish.  (See the paper by Paul
1699     Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1700     survey of such techniques.)  Sizes of free chunks are stored both
1701     in the front of each chunk and at the end.  This makes
1702     consolidating fragmented chunks into bigger chunks very fast.  The
1703     size fields also hold bits representing whether chunks are free or
1704     in use.
1705 
1706     An allocated chunk looks like this:
1707 
1708 
1709     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1710             |             Size of previous chunk, if allocated            | |
1711             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1712             |             Size of chunk, in bytes                         |P|
1713       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1714             |             User data starts here...                          .
1715             .                                                               .
1716             .             (malloc_usable_space() bytes)                     .
1717             .                                                               |
1718 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1719             |             Size of chunk                                     |
1720             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1721 
1722 
1723     Where "chunk" is the front of the chunk for the purpose of most of
1724     the malloc code, but "mem" is the pointer that is returned to the
1725     user.  "Nextchunk" is the beginning of the next contiguous chunk.
1726 
1727     Chunks always begin on even word boundries, so the mem portion
1728     (which is returned to the user) is also on an even word boundary, and
1729     thus at least double-word aligned.
1730 
1731     Free chunks are stored in circular doubly-linked lists, and look like this:
1732 
1733     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1734             |             Size of previous chunk                            |
1735             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1736     `head:' |             Size of chunk, in bytes                         |P|
1737       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1738             |             Forward pointer to next chunk in list             |
1739             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1740             |             Back pointer to previous chunk in list            |
1741             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1742             |             Unused space (may be 0 bytes long)                .
1743             .                                                               .
1744             .                                                               |
1745 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1746     `foot:' |             Size of chunk, in bytes                           |
1747             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1748 
1749     The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1750     chunk size (which is always a multiple of two words), is an in-use
1751     bit for the *previous* chunk.  If that bit is *clear*, then the
1752     word before the current chunk size contains the previous chunk
1753     size, and can be used to find the front of the previous chunk.
1754     The very first chunk allocated always has this bit set,
1755     preventing access to non-existent (or non-owned) memory. If
1756     prev_inuse is set for any given chunk, then you CANNOT determine
1757     the size of the previous chunk, and might even get a memory
1758     addressing fault when trying to do so.
1759 
1760     Note that the `foot' of the current chunk is actually represented
1761     as the prev_size of the NEXT chunk. This makes it easier to
1762     deal with alignments etc but can be very confusing when trying
1763     to extend or adapt this code.
1764 
1765     The two exceptions to all this are
1766 
1767      1. The special chunk `top' doesn't bother using the
1768         trailing size field since there is no next contiguous chunk
1769         that would have to index off it. After initialization, `top'
1770         is forced to always exist.  If it would become less than
1771         MINSIZE bytes long, it is replenished.
1772 
1773      2. Chunks allocated via mmap, which have the second-lowest-order
1774         bit (IS_MMAPPED) set in their size fields.  Because they are
1775         allocated one-by-one, each must contain its own trailing size field.
1776 
1777 */
1778 
1779 /*
1780   ---------- Size and alignment checks and conversions ----------
1781 */
1782 
1783 /* conversion from malloc headers to user pointers, and back */
1784 
1785 #define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1786 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1787 
1788 /* The smallest possible chunk */
1789 #define MIN_CHUNK_SIZE        (sizeof(struct malloc_chunk))
1790 
1791 /* The smallest size we can malloc is an aligned minimal chunk */
1792 
1793 #define MINSIZE  \
1794   (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1795 
1796 /* Check if m has acceptable alignment */
1797 
1798 #define aligned_OK(m)  (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1799 
1800 
1801 /*
1802    Check if a request is so large that it would wrap around zero when
1803    padded and aligned. To simplify some other code, the bound is made
1804    low enough so that adding MINSIZE will also not wrap around zero.
1805 */
1806 
1807 #define REQUEST_OUT_OF_RANGE(req)                                 \
1808   ((unsigned long)(req) >=                                        \
1809    (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1810 
1811 /* pad request bytes into a usable size -- internal version */
1812 
1813 #define request2size(req)                                         \
1814   (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?             \
1815    MINSIZE :                                                      \
1816    ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1817 
1818 /*  Same, except also perform argument check */
1819 
1820 #define checked_request2size(req, sz)                             \
1821   if (REQUEST_OUT_OF_RANGE(req)) {                                \
1822     MALLOC_FAILURE_ACTION;                                        \
1823     return 0;                                                     \
1824   }                                                               \
1825   (sz) = request2size(req);
1826 
1827 /*
1828   --------------- Physical chunk operations ---------------
1829 */
1830 
1831 
1832 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1833 #define PREV_INUSE 0x1
1834 
1835 /* extract inuse bit of previous chunk */
1836 #define prev_inuse(p)       ((p)->size & PREV_INUSE)
1837 
1838 
1839 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1840 #define IS_MMAPPED 0x2
1841 
1842 /* check for mmap()'ed chunk */
1843 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1844 
1845 
1846 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1847    from a non-main arena.  This is only set immediately before handing
1848    the chunk to the user, if necessary.  */
1849 #define NON_MAIN_ARENA 0x4
1850 
1851 /* check for chunk from non-main arena */
1852 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1853 
1854 
1855 /*
1856   Bits to mask off when extracting size
1857 
1858   Note: IS_MMAPPED is intentionally not masked off from size field in
1859   macros for which mmapped chunks should never be seen. This should
1860   cause helpful core dumps to occur if it is tried by accident by
1861   people extending or adapting this malloc.
1862 */
1863 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
1864 
1865 /* Get size, ignoring use bits */
1866 #define chunksize(p)         ((p)->size & ~(SIZE_BITS))
1867 
1868 
1869 /* Ptr to next physical malloc_chunk. */
1870 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
1871 
1872 /* Ptr to previous physical malloc_chunk */
1873 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1874 
1875 /* Treat space at ptr + offset as a chunk */
1876 #define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
1877 
1878 /* extract p's inuse bit */
1879 #define inuse(p)\
1880 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
1881 
1882 /* set/clear chunk as being inuse without otherwise disturbing */
1883 #define set_inuse(p)\
1884 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
1885 
1886 #define clear_inuse(p)\
1887 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
1888 
1889 
1890 /* check/set/clear inuse bits in known places */
1891 #define inuse_bit_at_offset(p, s)\
1892  (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1893 
1894 #define set_inuse_bit_at_offset(p, s)\
1895  (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1896 
1897 #define clear_inuse_bit_at_offset(p, s)\
1898  (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1899 
1900 
1901 /* Set size at head, without disturbing its use bit */
1902 #define set_head_size(p, s)  ((p)->size = (((p)->size & SIZE_BITS) | (s)))
1903 
1904 /* Set size/use field */
1905 #define set_head(p, s)       ((p)->size = (s))
1906 
1907 /* Set size at footer (only when chunk is not in use) */
1908 #define set_foot(p, s)       (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1909 
1910 
1911 /*
1912   -------------------- Internal data structures --------------------
1913 
1914    All internal state is held in an instance of malloc_state defined
1915    below. There are no other static variables, except in two optional
1916    cases:
1917    * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1918    * If HAVE_MMAP is true, but mmap doesn't support
1919      MAP_ANONYMOUS, a dummy file descriptor for mmap.
1920 
1921    Beware of lots of tricks that minimize the total bookkeeping space
1922    requirements. The result is a little over 1K bytes (for 4byte
1923    pointers and size_t.)
1924 */
1925 
1926 /*
1927   Bins
1928 
1929     An array of bin headers for free chunks. Each bin is doubly
1930     linked.  The bins are approximately proportionally (log) spaced.
1931     There are a lot of these bins (128). This may look excessive, but
1932     works very well in practice.  Most bins hold sizes that are
1933     unusual as malloc request sizes, but are more usual for fragments
1934     and consolidated sets of chunks, which is what these bins hold, so
1935     they can be found quickly.  All procedures maintain the invariant
1936     that no consolidated chunk physically borders another one, so each
1937     chunk in a list is known to be preceeded and followed by either
1938     inuse chunks or the ends of memory.
1939 
1940     Chunks in bins are kept in size order, with ties going to the
1941     approximately least recently used chunk. Ordering isn't needed
1942     for the small bins, which all contain the same-sized chunks, but
1943     facilitates best-fit allocation for larger chunks. These lists
1944     are just sequential. Keeping them in order almost never requires
1945     enough traversal to warrant using fancier ordered data
1946     structures.
1947 
1948     Chunks of the same size are linked with the most
1949     recently freed at the front, and allocations are taken from the
1950     back.  This results in LRU (FIFO) allocation order, which tends
1951     to give each chunk an equal opportunity to be consolidated with
1952     adjacent freed chunks, resulting in larger free chunks and less
1953     fragmentation.
1954 
1955     To simplify use in double-linked lists, each bin header acts
1956     as a malloc_chunk. This avoids special-casing for headers.
1957     But to conserve space and improve locality, we allocate
1958     only the fd/bk pointers of bins, and then use repositioning tricks
1959     to treat these as the fields of a malloc_chunk*.
1960 */
1961 
1962 typedef struct malloc_chunk* mbinptr;
1963 
1964 /* addressing -- note that bin_at(0) does not exist */
1965 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
1966 
1967 /* analog of ++bin */
1968 #define next_bin(b)  ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
1969 
1970 /* Reminders about list directionality within bins */
1971 #define first(b)     ((b)->fd)
1972 #define last(b)      ((b)->bk)
1973 
1974 /* Take a chunk off a bin list */
1975 #define unlink(P, BK, FD) {                                            \
1976   FD = P->fd;                                                          \
1977   BK = P->bk;                                                          \
1978   FD->bk = BK;                                                         \
1979   BK->fd = FD;                                                         \
1980 }
1981 
1982 /*
1983   Indexing
1984 
1985     Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1986     8 bytes apart. Larger bins are approximately logarithmically spaced:
1987 
1988     64 bins of size       8
1989     32 bins of size      64
1990     16 bins of size     512
1991      8 bins of size    4096
1992      4 bins of size   32768
1993      2 bins of size  262144
1994      1 bin  of size what's left
1995 
1996     There is actually a little bit of slop in the numbers in bin_index
1997     for the sake of speed. This makes no difference elsewhere.
1998 
1999     The bins top out around 1MB because we expect to service large
2000     requests via mmap.
2001 */
2002 
2003 #define NBINS             128
2004 #define NSMALLBINS         64
2005 #define SMALLBIN_WIDTH      8
2006 #define MIN_LARGE_SIZE    512
2007 
2008 #define in_smallbin_range(sz)  \
2009   ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
2010 
2011 #define smallbin_index(sz)     (((unsigned)(sz)) >> 3)
2012 
2013 #define largebin_index(sz)                                                   \
2014 (((((unsigned long)(sz)) >>  6) <= 32)?  56 + (((unsigned long)(sz)) >>  6): \
2015  ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
2016  ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2017  ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
2018  ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
2019                                         126)
2020 
2021 #define bin_index(sz) \
2022  ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2023 
2024 /*
2025   FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
2026   first bin that is maintained in sorted order. This must
2027   be the smallest size corresponding to a given bin.
2028 
2029   Normally, this should be MIN_LARGE_SIZE. But you can weaken
2030   best fit guarantees to sometimes speed up malloc by increasing value.
2031   Doing this means that malloc may choose a chunk that is
2032   non-best-fitting by up to the width of the bin.
2033 
2034   Some useful cutoff values:
2035       512 - all bins sorted
2036      2560 - leaves bins <=     64 bytes wide unsorted
2037     12288 - leaves bins <=    512 bytes wide unsorted
2038     65536 - leaves bins <=   4096 bytes wide unsorted
2039    262144 - leaves bins <=  32768 bytes wide unsorted
2040        -1 - no bins sorted (not recommended!)
2041 */
2042 
2043 #define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
2044 /* #define FIRST_SORTED_BIN_SIZE 65536 */
2045 
2046 /*
2047   Unsorted chunks
2048 
2049     All remainders from chunk splits, as well as all returned chunks,
2050     are first placed in the "unsorted" bin. They are then placed
2051     in regular bins after malloc gives them ONE chance to be used before
2052     binning. So, basically, the unsorted_chunks list acts as a queue,
2053     with chunks being placed on it in free (and malloc_consolidate),
2054     and taken off (to be either used or placed in bins) in malloc.
2055 
2056     The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
2057     does not have to be taken into account in size comparisons.
2058 */
2059 
2060 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2061 #define unsorted_chunks(M)          (bin_at(M, 1))
2062 
2063 /*
2064   Top
2065 
2066     The top-most available chunk (i.e., the one bordering the end of
2067     available memory) is treated specially. It is never included in
2068     any bin, is used only if no other chunk is available, and is
2069     released back to the system if it is very large (see
2070     M_TRIM_THRESHOLD).  Because top initially
2071     points to its own bin with initial zero size, thus forcing
2072     extension on the first malloc request, we avoid having any special
2073     code in malloc to check whether it even exists yet. But we still
2074     need to do so when getting memory from system, so we make
2075     initial_top treat the bin as a legal but unusable chunk during the
2076     interval between initialization and the first call to
2077     sYSMALLOc. (This is somewhat delicate, since it relies on
2078     the 2 preceding words to be zero during this interval as well.)
2079 */
2080 
2081 /* Conveniently, the unsorted bin can be used as dummy top on first call */
2082 #define initial_top(M)              (unsorted_chunks(M))
2083 
2084 /*
2085   Binmap
2086 
2087     To help compensate for the large number of bins, a one-level index
2088     structure is used for bin-by-bin searching.  `binmap' is a
2089     bitvector recording whether bins are definitely empty so they can
2090     be skipped over during during traversals.  The bits are NOT always
2091     cleared as soon as bins are empty, but instead only
2092     when they are noticed to be empty during traversal in malloc.
2093 */
2094 
2095 /* Conservatively use 32 bits per map word, even if on 64bit system */
2096 #define BINMAPSHIFT      5
2097 #define BITSPERMAP       (1U << BINMAPSHIFT)
2098 #define BINMAPSIZE       (NBINS / BITSPERMAP)
2099 
2100 #define idx2block(i)     ((i) >> BINMAPSHIFT)
2101 #define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2102 
2103 #define mark_bin(m,i)    ((m)->binmap[idx2block(i)] |=  idx2bit(i))
2104 #define unmark_bin(m,i)  ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2105 #define get_binmap(m,i)  ((m)->binmap[idx2block(i)] &   idx2bit(i))
2106 
2107 /*
2108   Fastbins
2109 
2110     An array of lists holding recently freed small chunks.  Fastbins
2111     are not doubly linked.  It is faster to single-link them, and
2112     since chunks are never removed from the middles of these lists,
2113     double linking is not necessary. Also, unlike regular bins, they
2114     are not even processed in FIFO order (they use faster LIFO) since
2115     ordering doesn't much matter in the transient contexts in which
2116     fastbins are normally used.
2117 
2118     Chunks in fastbins keep their inuse bit set, so they cannot
2119     be consolidated with other free chunks. malloc_consolidate
2120     releases all chunks in fastbins and consolidates them with
2121     other free chunks.
2122 */
2123 
2124 typedef struct malloc_chunk* mfastbinptr;
2125 
2126 /* offset 2 to use otherwise unindexable first 2 bins */
2127 #define fastbin_index(sz)        ((((unsigned int)(sz)) >> 3) - 2)
2128 
2129 /* The maximum fastbin request size we support */
2130 #define MAX_FAST_SIZE     80
2131 
2132 #define NFASTBINS  (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2133 
2134 /*
2135   FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2136   that triggers automatic consolidation of possibly-surrounding
2137   fastbin chunks. This is a heuristic, so the exact value should not
2138   matter too much. It is defined at half the default trim threshold as a
2139   compromise heuristic to only attempt consolidation if it is likely
2140   to lead to trimming. However, it is not dynamically tunable, since
2141   consolidation reduces fragmentation surrounding large chunks even
2142   if trimming is not used.
2143 */
2144 
2145 #define FASTBIN_CONSOLIDATION_THRESHOLD  (65536UL)
2146 
2147 /*
2148   Since the lowest 2 bits in max_fast don't matter in size comparisons,
2149   they are used as flags.
2150 */
2151 
2152 /*
2153   FASTCHUNKS_BIT held in max_fast indicates that there are probably
2154   some fastbin chunks. It is set true on entering a chunk into any
2155   fastbin, and cleared only in malloc_consolidate.
2156 
2157   The truth value is inverted so that have_fastchunks will be true
2158   upon startup (since statics are zero-filled), simplifying
2159   initialization checks.
2160 */
2161 
2162 #define FASTCHUNKS_BIT        (1U)
2163 
2164 #define have_fastchunks(M)     (((M)->max_fast &  FASTCHUNKS_BIT) == 0)
2165 #define clear_fastchunks(M)    ((M)->max_fast |=  FASTCHUNKS_BIT)
2166 #define set_fastchunks(M)      ((M)->max_fast &= ~FASTCHUNKS_BIT)
2167 
2168 /*
2169   NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2170   regions.  Otherwise, contiguity is exploited in merging together,
2171   when possible, results from consecutive MORECORE calls.
2172 
2173   The initial value comes from MORECORE_CONTIGUOUS, but is
2174   changed dynamically if mmap is ever used as an sbrk substitute.
2175 */
2176 
2177 #define NONCONTIGUOUS_BIT     (2U)
2178 
2179 #define contiguous(M)          (((M)->max_fast &  NONCONTIGUOUS_BIT) == 0)
2180 #define noncontiguous(M)       (((M)->max_fast &  NONCONTIGUOUS_BIT) != 0)
2181 #define set_noncontiguous(M)   ((M)->max_fast |=  NONCONTIGUOUS_BIT)
2182 #define set_contiguous(M)      ((M)->max_fast &= ~NONCONTIGUOUS_BIT)
2183 
2184 /*
2185    Set value of max_fast.
2186    Use impossibly small value if 0.
2187    Precondition: there are no existing fastbin chunks.
2188    Setting the value clears fastchunk bit but preserves noncontiguous bit.
2189 */
2190 
2191 #define set_max_fast(M, s) \
2192   (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2193   FASTCHUNKS_BIT | \
2194   ((M)->max_fast &  NONCONTIGUOUS_BIT)
2195 
2196 
2197 /*
2198    ----------- Internal state representation and initialization -----------
2199 */
2200 
2201 struct malloc_state {
2202   /* Serialize access.  */
2203   mutex_t mutex;
2204 
2205   /* Statistics for locking.  Only used if THREAD_STATS is defined.  */
2206   long stat_lock_direct, stat_lock_loop, stat_lock_wait;
2207   long pad0_[1]; /* try to give the mutex its own cacheline */
2208 
2209   /* The maximum chunk size to be eligible for fastbin */
2210   INTERNAL_SIZE_T  max_fast;   /* low 2 bits used as flags */
2211 
2212   /* Fastbins */
2213   mfastbinptr      fastbins[NFASTBINS];
2214 
2215   /* Base of the topmost chunk -- not otherwise kept in a bin */
2216   mchunkptr        top;
2217 
2218   /* The remainder from the most recent split of a small request */
2219   mchunkptr        last_remainder;
2220 
2221   /* Normal bins packed as described above */
2222   mchunkptr        bins[NBINS * 2];
2223 
2224   /* Bitmap of bins */
2225   unsigned int     binmap[BINMAPSIZE];
2226 
2227   /* Linked list */
2228   struct malloc_state *next;
2229 
2230   /* Memory allocated from the system in this arena.  */
2231   INTERNAL_SIZE_T system_mem;
2232   INTERNAL_SIZE_T max_system_mem;
2233 };
2234 
2235 struct malloc_par {
2236   /* Tunable parameters */
2237   unsigned long    trim_threshold;
2238   INTERNAL_SIZE_T  top_pad;
2239   INTERNAL_SIZE_T  mmap_threshold;
2240 
2241   /* Memory map support */
2242   int              n_mmaps;
2243   int              n_mmaps_max;
2244   int              max_n_mmaps;
2245 
2246   /* Cache malloc_getpagesize */
2247   unsigned int     pagesize;
2248 
2249   /* Statistics */
2250   INTERNAL_SIZE_T  mmapped_mem;
2251   /*INTERNAL_SIZE_T  sbrked_mem;*/
2252   /*INTERNAL_SIZE_T  max_sbrked_mem;*/
2253   INTERNAL_SIZE_T  max_mmapped_mem;
2254   INTERNAL_SIZE_T  max_total_mem; /* only kept for NO_THREADS */
2255 
2256   /* First address handed out by MORECORE/sbrk.  */
2257   char*            sbrk_base;
2258 };
2259 
2260 /* There are several instances of this struct ("arenas") in this
2261    malloc.  If you are adapting this malloc in a way that does NOT use
2262    a static or mmapped malloc_state, you MUST explicitly zero-fill it
2263    before using. This malloc relies on the property that malloc_state
2264    is initialized to all zeroes (as is true of C statics).  */
2265 
2266 static struct malloc_state main_arena;
2267 
2268 /* There is only one instance of the malloc parameters.  */
2269 
2270 static struct malloc_par mp_;
2271 
2272 /*
2273   Initialize a malloc_state struct.
2274 
2275   This is called only from within malloc_consolidate, which needs
2276   be called in the same contexts anyway.  It is never called directly
2277   outside of malloc_consolidate because some optimizing compilers try
2278   to inline it at all call points, which turns out not to be an
2279   optimization at all. (Inlining it in malloc_consolidate is fine though.)
2280 */
2281 
2282 #if __STD_C
malloc_init_state(mstate av)2283 static void malloc_init_state(mstate av)
2284 #else
2285 static void malloc_init_state(av) mstate av;
2286 #endif
2287 {
2288   int     i;
2289   mbinptr bin;
2290 
2291   /* Establish circular links for normal bins */
2292   for (i = 1; i < NBINS; ++i) {
2293     bin = bin_at(av,i);
2294     bin->fd = bin->bk = bin;
2295   }
2296 
2297 #if MORECORE_CONTIGUOUS
2298   if (av != &main_arena)
2299 #endif
2300     set_noncontiguous(av);
2301 
2302   set_max_fast(av, DEFAULT_MXFAST);
2303 
2304   av->top            = initial_top(av);
2305 }
2306 
2307 /*
2308    Other internal utilities operating on mstates
2309 */
2310 
2311 #if __STD_C
2312 static Void_t*  sYSMALLOc(INTERNAL_SIZE_T, mstate);
2313 static int      sYSTRIm(size_t, mstate);
2314 static void     malloc_consolidate(mstate);
2315 static Void_t** iALLOc(mstate, size_t, size_t*, int, Void_t**);
2316 #else
2317 static Void_t*  sYSMALLOc();
2318 static int      sYSTRIm();
2319 static void     malloc_consolidate();
2320 static Void_t** iALLOc();
2321 #endif
2322 
2323 
2324 /* -------------- Early definitions for debugging hooks ---------------- */
2325 
2326 /* Define and initialize the hook variables.  These weak definitions must
2327    appear before any use of the variables in a function (arena.c uses one).  */
2328 #ifndef weak_variable
2329 #ifndef _LIBC
2330 #define weak_variable /**/
2331 #else
2332 /* In GNU libc we want the hook variables to be weak definitions to
2333    avoid a problem with Emacs.  */
2334 #define weak_variable weak_function
2335 #endif
2336 #endif
2337 
2338 #if !(USE_STARTER & 2)
2339 # define free_hook_ini     NULL
2340 /* Forward declarations.  */
2341 static Void_t* malloc_hook_ini __MALLOC_P ((size_t sz,
2342 					    const __malloc_ptr_t caller));
2343 static Void_t* realloc_hook_ini __MALLOC_P ((Void_t* ptr, size_t sz,
2344 					     const __malloc_ptr_t caller));
2345 static Void_t* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz,
2346 					      const __malloc_ptr_t caller));
2347 #else
2348 # define free_hook_ini     free_starter
2349 # define malloc_hook_ini   malloc_starter
2350 # define realloc_hook_ini  NULL
2351 # define memalign_hook_ini memalign_starter
2352 #endif
2353 
2354 void weak_variable (*__malloc_initialize_hook) __MALLOC_PMT ((void)) = NULL;
2355 void weak_variable (*__free_hook) __MALLOC_PMT ((__malloc_ptr_t __ptr,
2356                                                  const __malloc_ptr_t))
2357      = free_hook_ini;
2358 __malloc_ptr_t weak_variable (*__malloc_hook)
2359  __MALLOC_PMT ((size_t __size, const __malloc_ptr_t)) = malloc_hook_ini;
2360 __malloc_ptr_t weak_variable (*__realloc_hook)
2361  __MALLOC_PMT ((__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t))
2362      = realloc_hook_ini;
2363 __malloc_ptr_t weak_variable (*__memalign_hook)
2364  __MALLOC_PMT ((size_t __alignment, size_t __size, const __malloc_ptr_t))
2365      = memalign_hook_ini;
2366 void weak_variable (*__after_morecore_hook) __MALLOC_P ((void)) = NULL;
2367 
2368 
2369 /* ------------------- Support for multiple arenas -------------------- */
2370 #include "arena.c"
2371 
2372 /*
2373   Debugging support
2374 
2375   These routines make a number of assertions about the states
2376   of data structures that should be true at all times. If any
2377   are not true, it's very likely that a user program has somehow
2378   trashed memory. (It's also possible that there is a coding error
2379   in malloc. In which case, please report it!)
2380 */
2381 
2382 #if ! MALLOC_DEBUG
2383 
2384 #define check_chunk(A,P)
2385 #define check_free_chunk(A,P)
2386 #define check_inuse_chunk(A,P)
2387 #define check_remalloced_chunk(A,P,N)
2388 #define check_malloced_chunk(A,P,N)
2389 #define check_malloc_state(A)
2390 
2391 #else
2392 
2393 #define check_chunk(A,P)              do_check_chunk(A,P)
2394 #define check_free_chunk(A,P)         do_check_free_chunk(A,P)
2395 #define check_inuse_chunk(A,P)        do_check_inuse_chunk(A,P)
2396 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2397 #define check_malloced_chunk(A,P,N)   do_check_malloced_chunk(A,P,N)
2398 #define check_malloc_state(A)         do_check_malloc_state(A)
2399 
2400 /*
2401   Properties of all chunks
2402 */
2403 
2404 #if __STD_C
do_check_chunk(mstate av,mchunkptr p)2405 static void do_check_chunk(mstate av, mchunkptr p)
2406 #else
2407 static void do_check_chunk(av, p) mstate av; mchunkptr p;
2408 #endif
2409 {
2410   unsigned long sz = chunksize(p);
2411   /* min and max possible addresses assuming contiguous allocation */
2412   char* max_address = (char*)(av->top) + chunksize(av->top);
2413   char* min_address = max_address - av->system_mem;
2414 
2415   if (!chunk_is_mmapped(p)) {
2416 
2417     /* Has legal address ... */
2418     if (p != av->top) {
2419       if (contiguous(av)) {
2420         assert(((char*)p) >= min_address);
2421         assert(((char*)p + sz) <= ((char*)(av->top)));
2422       }
2423     }
2424     else {
2425       /* top size is always at least MINSIZE */
2426       assert((unsigned long)(sz) >= MINSIZE);
2427       /* top predecessor always marked inuse */
2428       assert(prev_inuse(p));
2429     }
2430 
2431   }
2432   else {
2433 #if HAVE_MMAP
2434     /* address is outside main heap  */
2435     if (contiguous(av) && av->top != initial_top(av)) {
2436       assert(((char*)p) < min_address || ((char*)p) > max_address);
2437     }
2438     /* chunk is page-aligned */
2439     assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
2440     /* mem is aligned */
2441     assert(aligned_OK(chunk2mem(p)));
2442 #else
2443     /* force an appropriate assert violation if debug set */
2444     assert(!chunk_is_mmapped(p));
2445 #endif
2446   }
2447 }
2448 
2449 /*
2450   Properties of free chunks
2451 */
2452 
2453 #if __STD_C
do_check_free_chunk(mstate av,mchunkptr p)2454 static void do_check_free_chunk(mstate av, mchunkptr p)
2455 #else
2456 static void do_check_free_chunk(av, p) mstate av; mchunkptr p;
2457 #endif
2458 {
2459   INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2460   mchunkptr next = chunk_at_offset(p, sz);
2461 
2462   do_check_chunk(av, p);
2463 
2464   /* Chunk must claim to be free ... */
2465   assert(!inuse(p));
2466   assert (!chunk_is_mmapped(p));
2467 
2468   /* Unless a special marker, must have OK fields */
2469   if ((unsigned long)(sz) >= MINSIZE)
2470   {
2471     assert((sz & MALLOC_ALIGN_MASK) == 0);
2472     assert(aligned_OK(chunk2mem(p)));
2473     /* ... matching footer field */
2474     assert(next->prev_size == sz);
2475     /* ... and is fully consolidated */
2476     assert(prev_inuse(p));
2477     assert (next == av->top || inuse(next));
2478 
2479     /* ... and has minimally sane links */
2480     assert(p->fd->bk == p);
2481     assert(p->bk->fd == p);
2482   }
2483   else /* markers are always of size SIZE_SZ */
2484     assert(sz == SIZE_SZ);
2485 }
2486 
2487 /*
2488   Properties of inuse chunks
2489 */
2490 
2491 #if __STD_C
do_check_inuse_chunk(mstate av,mchunkptr p)2492 static void do_check_inuse_chunk(mstate av, mchunkptr p)
2493 #else
2494 static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p;
2495 #endif
2496 {
2497   mchunkptr next;
2498 
2499   do_check_chunk(av, p);
2500 
2501   if (chunk_is_mmapped(p))
2502     return; /* mmapped chunks have no next/prev */
2503 
2504   /* Check whether it claims to be in use ... */
2505   assert(inuse(p));
2506 
2507   next = next_chunk(p);
2508 
2509   /* ... and is surrounded by OK chunks.
2510     Since more things can be checked with free chunks than inuse ones,
2511     if an inuse chunk borders them and debug is on, it's worth doing them.
2512   */
2513   if (!prev_inuse(p))  {
2514     /* Note that we cannot even look at prev unless it is not inuse */
2515     mchunkptr prv = prev_chunk(p);
2516     assert(next_chunk(prv) == p);
2517     do_check_free_chunk(av, prv);
2518   }
2519 
2520   if (next == av->top) {
2521     assert(prev_inuse(next));
2522     assert(chunksize(next) >= MINSIZE);
2523   }
2524   else if (!inuse(next))
2525     do_check_free_chunk(av, next);
2526 }
2527 
2528 /*
2529   Properties of chunks recycled from fastbins
2530 */
2531 
2532 #if __STD_C
do_check_remalloced_chunk(mstate av,mchunkptr p,INTERNAL_SIZE_T s)2533 static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2534 #else
2535 static void do_check_remalloced_chunk(av, p, s)
2536 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2537 #endif
2538 {
2539   INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2540 
2541   if (!chunk_is_mmapped(p)) {
2542     assert(av == arena_for_chunk(p));
2543     if (chunk_non_main_arena(p))
2544       assert(av != &main_arena);
2545     else
2546       assert(av == &main_arena);
2547   }
2548 
2549   do_check_inuse_chunk(av, p);
2550 
2551   /* Legal size ... */
2552   assert((sz & MALLOC_ALIGN_MASK) == 0);
2553   assert((unsigned long)(sz) >= MINSIZE);
2554   /* ... and alignment */
2555   assert(aligned_OK(chunk2mem(p)));
2556   /* chunk is less than MINSIZE more than request */
2557   assert((long)(sz) - (long)(s) >= 0);
2558   assert((long)(sz) - (long)(s + MINSIZE) < 0);
2559 }
2560 
2561 /*
2562   Properties of nonrecycled chunks at the point they are malloced
2563 */
2564 
2565 #if __STD_C
do_check_malloced_chunk(mstate av,mchunkptr p,INTERNAL_SIZE_T s)2566 static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2567 #else
2568 static void do_check_malloced_chunk(av, p, s)
2569 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2570 #endif
2571 {
2572   /* same as recycled case ... */
2573   do_check_remalloced_chunk(av, p, s);
2574 
2575   /*
2576     ... plus,  must obey implementation invariant that prev_inuse is
2577     always true of any allocated chunk; i.e., that each allocated
2578     chunk borders either a previously allocated and still in-use
2579     chunk, or the base of its memory arena. This is ensured
2580     by making all allocations from the the `lowest' part of any found
2581     chunk.  This does not necessarily hold however for chunks
2582     recycled via fastbins.
2583   */
2584 
2585   assert(prev_inuse(p));
2586 }
2587 
2588 
2589 /*
2590   Properties of malloc_state.
2591 
2592   This may be useful for debugging malloc, as well as detecting user
2593   programmer errors that somehow write into malloc_state.
2594 
2595   If you are extending or experimenting with this malloc, you can
2596   probably figure out how to hack this routine to print out or
2597   display chunk addresses, sizes, bins, and other instrumentation.
2598 */
2599 
do_check_malloc_state(mstate av)2600 static void do_check_malloc_state(mstate av)
2601 {
2602   int i;
2603   mchunkptr p;
2604   mchunkptr q;
2605   mbinptr b;
2606   unsigned int binbit;
2607   int empty;
2608   unsigned int idx;
2609   INTERNAL_SIZE_T size;
2610   unsigned long total = 0;
2611   int max_fast_bin;
2612 
2613   /* internal size_t must be no wider than pointer type */
2614   assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2615 
2616   /* alignment is a power of 2 */
2617   assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2618 
2619   /* cannot run remaining checks until fully initialized */
2620   if (av->top == 0 || av->top == initial_top(av))
2621     return;
2622 
2623   /* pagesize is a power of 2 */
2624   assert((mp_.pagesize & (mp_.pagesize-1)) == 0);
2625 
2626   /* A contiguous main_arena is consistent with sbrk_base.  */
2627   if (av == &main_arena && contiguous(av))
2628     assert((char*)mp_.sbrk_base + av->system_mem ==
2629 	   (char*)av->top + chunksize(av->top));
2630 
2631   /* properties of fastbins */
2632 
2633   /* max_fast is in allowed range */
2634   assert((av->max_fast & ~1) <= request2size(MAX_FAST_SIZE));
2635 
2636   max_fast_bin = fastbin_index(av->max_fast);
2637 
2638   for (i = 0; i < NFASTBINS; ++i) {
2639     p = av->fastbins[i];
2640 
2641     /* all bins past max_fast are empty */
2642     if (i > max_fast_bin)
2643       assert(p == 0);
2644 
2645     while (p != 0) {
2646       /* each chunk claims to be inuse */
2647       do_check_inuse_chunk(av, p);
2648       total += chunksize(p);
2649       /* chunk belongs in this bin */
2650       assert(fastbin_index(chunksize(p)) == i);
2651       p = p->fd;
2652     }
2653   }
2654 
2655   if (total != 0)
2656     assert(have_fastchunks(av));
2657   else if (!have_fastchunks(av))
2658     assert(total == 0);
2659 
2660   /* check normal bins */
2661   for (i = 1; i < NBINS; ++i) {
2662     b = bin_at(av,i);
2663 
2664     /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2665     if (i >= 2) {
2666       binbit = get_binmap(av,i);
2667       empty = last(b) == b;
2668       if (!binbit)
2669         assert(empty);
2670       else if (!empty)
2671         assert(binbit);
2672     }
2673 
2674     for (p = last(b); p != b; p = p->bk) {
2675       /* each chunk claims to be free */
2676       do_check_free_chunk(av, p);
2677       size = chunksize(p);
2678       total += size;
2679       if (i >= 2) {
2680         /* chunk belongs in bin */
2681         idx = bin_index(size);
2682         assert(idx == i);
2683         /* lists are sorted */
2684         if ((unsigned long) size >= (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
2685 	  assert(p->bk == b ||
2686 		 (unsigned long)chunksize(p->bk) >=
2687 		 (unsigned long)chunksize(p));
2688 	}
2689       }
2690       /* chunk is followed by a legal chain of inuse chunks */
2691       for (q = next_chunk(p);
2692            (q != av->top && inuse(q) &&
2693              (unsigned long)(chunksize(q)) >= MINSIZE);
2694            q = next_chunk(q))
2695         do_check_inuse_chunk(av, q);
2696     }
2697   }
2698 
2699   /* top chunk is OK */
2700   check_chunk(av, av->top);
2701 
2702   /* sanity checks for statistics */
2703 
2704 #ifdef NO_THREADS
2705   assert(total <= (unsigned long)(mp_.max_total_mem));
2706   assert(mp_.n_mmaps >= 0);
2707 #endif
2708   assert(mp_.n_mmaps <= mp_.n_mmaps_max);
2709   assert(mp_.n_mmaps <= mp_.max_n_mmaps);
2710 
2711   assert((unsigned long)(av->system_mem) <=
2712          (unsigned long)(av->max_system_mem));
2713 
2714   assert((unsigned long)(mp_.mmapped_mem) <=
2715          (unsigned long)(mp_.max_mmapped_mem));
2716 
2717 #ifdef NO_THREADS
2718   assert((unsigned long)(mp_.max_total_mem) >=
2719          (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem));
2720 #endif
2721 }
2722 #endif
2723 
2724 
2725 /* ----------------- Support for debugging hooks -------------------- */
2726 #include "hooks.c"
2727 
2728 
2729 /* ----------- Routines dealing with system allocation -------------- */
2730 
2731 /*
2732   sysmalloc handles malloc cases requiring more memory from the system.
2733   On entry, it is assumed that av->top does not have enough
2734   space to service request for nb bytes, thus requiring that av->top
2735   be extended or replaced.
2736 */
2737 
2738 #if __STD_C
sYSMALLOc(INTERNAL_SIZE_T nb,mstate av)2739 static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
2740 #else
2741 static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
2742 #endif
2743 {
2744   mchunkptr       old_top;        /* incoming value of av->top */
2745   INTERNAL_SIZE_T old_size;       /* its size */
2746   char*           old_end;        /* its end address */
2747 
2748   long            size;           /* arg to first MORECORE or mmap call */
2749   char*           brk;            /* return value from MORECORE */
2750 
2751   long            correction;     /* arg to 2nd MORECORE call */
2752   char*           snd_brk;        /* 2nd return val */
2753 
2754   INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2755   INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
2756   char*           aligned_brk;    /* aligned offset into brk */
2757 
2758   mchunkptr       p;              /* the allocated/returned chunk */
2759   mchunkptr       remainder;      /* remainder from allocation */
2760   unsigned long   remainder_size; /* its size */
2761 
2762   unsigned long   sum;            /* for updating stats */
2763 
2764   size_t          pagemask  = mp_.pagesize - 1;
2765 
2766 
2767 #if HAVE_MMAP
2768 
2769   /*
2770     If have mmap, and the request size meets the mmap threshold, and
2771     the system supports mmap, and there are few enough currently
2772     allocated mmapped regions, try to directly map this request
2773     rather than expanding top.
2774   */
2775 
2776   if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
2777       (mp_.n_mmaps < mp_.n_mmaps_max)) {
2778 
2779     char* mm;             /* return value from mmap call*/
2780 
2781     /*
2782       Round up size to nearest page.  For mmapped chunks, the overhead
2783       is one SIZE_SZ unit larger than for normal chunks, because there
2784       is no following chunk whose prev_size field could be used.
2785     */
2786     size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
2787 
2788     /* Don't try if size wraps around 0 */
2789     if ((unsigned long)(size) > (unsigned long)(nb)) {
2790 
2791       mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2792 
2793       if (mm != MAP_FAILED) {
2794 
2795         /*
2796           The offset to the start of the mmapped region is stored
2797           in the prev_size field of the chunk. This allows us to adjust
2798           returned start address to meet alignment requirements here
2799           and in memalign(), and still be able to compute proper
2800           address argument for later munmap in free() and realloc().
2801         */
2802 
2803         front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
2804         if (front_misalign > 0) {
2805           correction = MALLOC_ALIGNMENT - front_misalign;
2806           p = (mchunkptr)(mm + correction);
2807           p->prev_size = correction;
2808           set_head(p, (size - correction) |IS_MMAPPED);
2809         }
2810         else {
2811           p = (mchunkptr)mm;
2812           set_head(p, size|IS_MMAPPED);
2813         }
2814 
2815         /* update statistics */
2816 
2817         if (++mp_.n_mmaps > mp_.max_n_mmaps)
2818           mp_.max_n_mmaps = mp_.n_mmaps;
2819 
2820         sum = mp_.mmapped_mem += size;
2821         if (sum > (unsigned long)(mp_.max_mmapped_mem))
2822           mp_.max_mmapped_mem = sum;
2823 #ifdef NO_THREADS
2824         sum += av->system_mem;
2825         if (sum > (unsigned long)(mp_.max_total_mem))
2826           mp_.max_total_mem = sum;
2827 #endif
2828 
2829         check_chunk(av, p);
2830 
2831         return chunk2mem(p);
2832       }
2833     }
2834   }
2835 #endif
2836 
2837   /* Record incoming configuration of top */
2838 
2839   old_top  = av->top;
2840   old_size = chunksize(old_top);
2841   old_end  = (char*)(chunk_at_offset(old_top, old_size));
2842 
2843   brk = snd_brk = (char*)(MORECORE_FAILURE);
2844 
2845   /*
2846      If not the first time through, we require old_size to be
2847      at least MINSIZE and to have prev_inuse set.
2848   */
2849 
2850   assert((old_top == initial_top(av) && old_size == 0) ||
2851          ((unsigned long) (old_size) >= MINSIZE &&
2852           prev_inuse(old_top) &&
2853 	  ((unsigned long)old_end & pagemask) == 0));
2854 
2855   /* Precondition: not enough current space to satisfy nb request */
2856   assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
2857 
2858   /* Precondition: all fastbins are consolidated */
2859   assert(!have_fastchunks(av));
2860 
2861 
2862   if (av != &main_arena) {
2863 
2864     heap_info *old_heap, *heap;
2865     size_t old_heap_size;
2866 
2867     /* First try to extend the current heap. */
2868     old_heap = heap_for_ptr(old_top);
2869     old_heap_size = old_heap->size;
2870     if (grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
2871       av->system_mem += old_heap->size - old_heap_size;
2872       arena_mem += old_heap->size - old_heap_size;
2873 #if 0
2874       if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
2875         max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
2876 #endif
2877       set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
2878 	       | PREV_INUSE);
2879     }
2880     else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
2881       /* Use a newly allocated heap.  */
2882       heap->ar_ptr = av;
2883       heap->prev = old_heap;
2884       av->system_mem += heap->size;
2885       arena_mem += heap->size;
2886 #if 0
2887       if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
2888 	max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
2889 #endif
2890       /* Set up the new top.  */
2891       top(av) = chunk_at_offset(heap, sizeof(*heap));
2892       set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
2893 
2894       /* Setup fencepost and free the old top chunk. */
2895       /* The fencepost takes at least MINSIZE bytes, because it might
2896 	 become the top chunk again later.  Note that a footer is set
2897 	 up, too, although the chunk is marked in use. */
2898       old_size -= MINSIZE;
2899       set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
2900       if (old_size >= MINSIZE) {
2901 	set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
2902 	set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
2903 	set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
2904 	_int_free(av, chunk2mem(old_top));
2905       } else {
2906 	set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
2907 	set_foot(old_top, (old_size + 2*SIZE_SZ));
2908       }
2909     }
2910 
2911   } else { /* av == main_arena */
2912 
2913 
2914   /* Request enough space for nb + pad + overhead */
2915 
2916   size = nb + mp_.top_pad + MINSIZE;
2917 
2918   /*
2919     If contiguous, we can subtract out existing space that we hope to
2920     combine with new space. We add it back later only if
2921     we don't actually get contiguous space.
2922   */
2923 
2924   if (contiguous(av))
2925     size -= old_size;
2926 
2927   /*
2928     Round to a multiple of page size.
2929     If MORECORE is not contiguous, this ensures that we only call it
2930     with whole-page arguments.  And if MORECORE is contiguous and
2931     this is not first time through, this preserves page-alignment of
2932     previous calls. Otherwise, we correct to page-align below.
2933   */
2934 
2935   size = (size + pagemask) & ~pagemask;
2936 
2937   /*
2938     Don't try to call MORECORE if argument is so big as to appear
2939     negative. Note that since mmap takes size_t arg, it may succeed
2940     below even if we cannot call MORECORE.
2941   */
2942 
2943   if (size > 0)
2944     brk = (char*)(MORECORE(size));
2945 
2946   if (brk != (char*)(MORECORE_FAILURE)) {
2947     /* Call the `morecore' hook if necessary.  */
2948     if (__after_morecore_hook)
2949       (*__after_morecore_hook) ();
2950   } else {
2951   /*
2952     If have mmap, try using it as a backup when MORECORE fails or
2953     cannot be used. This is worth doing on systems that have "holes" in
2954     address space, so sbrk cannot extend to give contiguous space, but
2955     space is available elsewhere.  Note that we ignore mmap max count
2956     and threshold limits, since the space will not be used as a
2957     segregated mmap region.
2958   */
2959 
2960 #if HAVE_MMAP
2961     /* Cannot merge with old top, so add its size back in */
2962     if (contiguous(av))
2963       size = (size + old_size + pagemask) & ~pagemask;
2964 
2965     /* If we are relying on mmap as backup, then use larger units */
2966     if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
2967       size = MMAP_AS_MORECORE_SIZE;
2968 
2969     /* Don't try if size wraps around 0 */
2970     if ((unsigned long)(size) > (unsigned long)(nb)) {
2971 
2972       char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2973 
2974       if (mbrk != MAP_FAILED) {
2975 
2976         /* We do not need, and cannot use, another sbrk call to find end */
2977         brk = mbrk;
2978         snd_brk = brk + size;
2979 
2980         /*
2981            Record that we no longer have a contiguous sbrk region.
2982            After the first time mmap is used as backup, we do not
2983            ever rely on contiguous space since this could incorrectly
2984            bridge regions.
2985         */
2986         set_noncontiguous(av);
2987       }
2988     }
2989 #endif
2990   }
2991 
2992   if (brk != (char*)(MORECORE_FAILURE)) {
2993     if (mp_.sbrk_base == 0)
2994       mp_.sbrk_base = brk;
2995     av->system_mem += size;
2996 
2997     /*
2998       If MORECORE extends previous space, we can likewise extend top size.
2999     */
3000 
3001     if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
3002       set_head(old_top, (size + old_size) | PREV_INUSE);
3003 
3004     else if (contiguous(av) && old_size && brk < old_end) {
3005       /* Oops!  Someone else killed our space..  Can't touch anything.  */
3006       assert(0);
3007     }
3008 
3009     /*
3010       Otherwise, make adjustments:
3011 
3012       * If the first time through or noncontiguous, we need to call sbrk
3013         just to find out where the end of memory lies.
3014 
3015       * We need to ensure that all returned chunks from malloc will meet
3016         MALLOC_ALIGNMENT
3017 
3018       * If there was an intervening foreign sbrk, we need to adjust sbrk
3019         request size to account for fact that we will not be able to
3020         combine new space with existing space in old_top.
3021 
3022       * Almost all systems internally allocate whole pages at a time, in
3023         which case we might as well use the whole last page of request.
3024         So we allocate enough more memory to hit a page boundary now,
3025         which in turn causes future contiguous calls to page-align.
3026     */
3027 
3028     else {
3029       front_misalign = 0;
3030       end_misalign = 0;
3031       correction = 0;
3032       aligned_brk = brk;
3033 
3034       /* handle contiguous cases */
3035       if (contiguous(av)) {
3036 
3037 	/* Count foreign sbrk as system_mem.  */
3038 	if (old_size)
3039 	  av->system_mem += brk - old_end;
3040 
3041         /* Guarantee alignment of first new chunk made from this space */
3042 
3043         front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
3044         if (front_misalign > 0) {
3045 
3046           /*
3047             Skip over some bytes to arrive at an aligned position.
3048             We don't need to specially mark these wasted front bytes.
3049             They will never be accessed anyway because
3050             prev_inuse of av->top (and any chunk created from its start)
3051             is always true after initialization.
3052           */
3053 
3054           correction = MALLOC_ALIGNMENT - front_misalign;
3055           aligned_brk += correction;
3056         }
3057 
3058         /*
3059           If this isn't adjacent to existing space, then we will not
3060           be able to merge with old_top space, so must add to 2nd request.
3061         */
3062 
3063         correction += old_size;
3064 
3065         /* Extend the end address to hit a page boundary */
3066         end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3067         correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3068 
3069         assert(correction >= 0);
3070         snd_brk = (char*)(MORECORE(correction));
3071 
3072         /*
3073           If can't allocate correction, try to at least find out current
3074           brk.  It might be enough to proceed without failing.
3075 
3076           Note that if second sbrk did NOT fail, we assume that space
3077           is contiguous with first sbrk. This is a safe assumption unless
3078           program is multithreaded but doesn't use locks and a foreign sbrk
3079           occurred between our first and second calls.
3080         */
3081 
3082         if (snd_brk == (char*)(MORECORE_FAILURE)) {
3083           correction = 0;
3084           snd_brk = (char*)(MORECORE(0));
3085         } else
3086 	  /* Call the `morecore' hook if necessary.  */
3087 	  if (__after_morecore_hook)
3088 	    (*__after_morecore_hook) ();
3089       }
3090 
3091       /* handle non-contiguous cases */
3092       else {
3093         /* MORECORE/mmap must correctly align */
3094         assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
3095 
3096         /* Find out current end of memory */
3097         if (snd_brk == (char*)(MORECORE_FAILURE)) {
3098           snd_brk = (char*)(MORECORE(0));
3099         }
3100       }
3101 
3102       /* Adjust top based on results of second sbrk */
3103       if (snd_brk != (char*)(MORECORE_FAILURE)) {
3104         av->top = (mchunkptr)aligned_brk;
3105         set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3106         av->system_mem += correction;
3107 
3108         /*
3109           If not the first time through, we either have a
3110           gap due to foreign sbrk or a non-contiguous region.  Insert a
3111           double fencepost at old_top to prevent consolidation with space
3112           we don't own. These fenceposts are artificial chunks that are
3113           marked as inuse and are in any case too small to use.  We need
3114           two to make sizes and alignments work out.
3115         */
3116 
3117         if (old_size != 0) {
3118           /*
3119              Shrink old_top to insert fenceposts, keeping size a
3120              multiple of MALLOC_ALIGNMENT. We know there is at least
3121              enough space in old_top to do this.
3122           */
3123           old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3124           set_head(old_top, old_size | PREV_INUSE);
3125 
3126           /*
3127             Note that the following assignments completely overwrite
3128             old_top when old_size was previously MINSIZE.  This is
3129             intentional. We need the fencepost, even if old_top otherwise gets
3130             lost.
3131           */
3132           chunk_at_offset(old_top, old_size            )->size =
3133             (2*SIZE_SZ)|PREV_INUSE;
3134 
3135           chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
3136             (2*SIZE_SZ)|PREV_INUSE;
3137 
3138           /* If possible, release the rest. */
3139           if (old_size >= MINSIZE) {
3140             _int_free(av, chunk2mem(old_top));
3141           }
3142 
3143         }
3144       }
3145     }
3146 
3147     /* Update statistics */
3148 #ifdef NO_THREADS
3149     sum = av->system_mem + mp_.mmapped_mem;
3150     if (sum > (unsigned long)(mp_.max_total_mem))
3151       mp_.max_total_mem = sum;
3152 #endif
3153 
3154   }
3155 
3156   } /* if (av !=  &main_arena) */
3157 
3158   if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
3159     av->max_system_mem = av->system_mem;
3160   check_malloc_state(av);
3161 
3162   /* finally, do the allocation */
3163   p = av->top;
3164   size = chunksize(p);
3165 
3166   /* check that one of the above allocation paths succeeded */
3167   if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3168     remainder_size = size - nb;
3169     remainder = chunk_at_offset(p, nb);
3170     av->top = remainder;
3171     set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
3172     set_head(remainder, remainder_size | PREV_INUSE);
3173     check_malloced_chunk(av, p, nb);
3174     return chunk2mem(p);
3175   }
3176 
3177   /* catch all failure paths */
3178   MALLOC_FAILURE_ACTION;
3179   return 0;
3180 }
3181 
3182 
3183 /*
3184   sYSTRIm is an inverse of sorts to sYSMALLOc.  It gives memory back
3185   to the system (via negative arguments to sbrk) if there is unused
3186   memory at the `high' end of the malloc pool. It is called
3187   automatically by free() when top space exceeds the trim
3188   threshold. It is also called by the public malloc_trim routine.  It
3189   returns 1 if it actually released any memory, else 0.
3190 */
3191 
3192 #if __STD_C
sYSTRIm(size_t pad,mstate av)3193 static int sYSTRIm(size_t pad, mstate av)
3194 #else
3195 static int sYSTRIm(pad, av) size_t pad; mstate av;
3196 #endif
3197 {
3198   long  top_size;        /* Amount of top-most memory */
3199   long  extra;           /* Amount to release */
3200   long  released;        /* Amount actually released */
3201   char* current_brk;     /* address returned by pre-check sbrk call */
3202   char* new_brk;         /* address returned by post-check sbrk call */
3203   size_t pagesz;
3204 
3205   pagesz = mp_.pagesize;
3206   top_size = chunksize(av->top);
3207 
3208   /* Release in pagesize units, keeping at least one page */
3209   extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3210 
3211   if (extra > 0) {
3212 
3213     /*
3214       Only proceed if end of memory is where we last set it.
3215       This avoids problems if there were foreign sbrk calls.
3216     */
3217     current_brk = (char*)(MORECORE(0));
3218     if (current_brk == (char*)(av->top) + top_size) {
3219 
3220       /*
3221         Attempt to release memory. We ignore MORECORE return value,
3222         and instead call again to find out where new end of memory is.
3223         This avoids problems if first call releases less than we asked,
3224         of if failure somehow altered brk value. (We could still
3225         encounter problems if it altered brk in some very bad way,
3226         but the only thing we can do is adjust anyway, which will cause
3227         some downstream failure.)
3228       */
3229 
3230       MORECORE(-extra);
3231       /* Call the `morecore' hook if necessary.  */
3232       if (__after_morecore_hook)
3233 	(*__after_morecore_hook) ();
3234       new_brk = (char*)(MORECORE(0));
3235 
3236       if (new_brk != (char*)MORECORE_FAILURE) {
3237         released = (long)(current_brk - new_brk);
3238 
3239         if (released != 0) {
3240           /* Success. Adjust top. */
3241           av->system_mem -= released;
3242           set_head(av->top, (top_size - released) | PREV_INUSE);
3243           check_malloc_state(av);
3244           return 1;
3245         }
3246       }
3247     }
3248   }
3249   return 0;
3250 }
3251 
3252 #ifdef HAVE_MMAP
3253 
3254 static void
3255 internal_function
3256 #if __STD_C
munmap_chunk(mchunkptr p)3257 munmap_chunk(mchunkptr p)
3258 #else
3259 munmap_chunk(p) mchunkptr p;
3260 #endif
3261 {
3262   INTERNAL_SIZE_T size = chunksize(p);
3263   int ret;
3264 
3265   assert (chunk_is_mmapped(p));
3266 #if 0
3267   assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3268   assert((mp_.n_mmaps > 0));
3269 #endif
3270   assert(((p->prev_size + size) & (mp_.pagesize-1)) == 0);
3271 
3272   mp_.n_mmaps--;
3273   mp_.mmapped_mem -= (size + p->prev_size);
3274 
3275   ret = munmap((char *)p - p->prev_size, size + p->prev_size);
3276 
3277   /* munmap returns non-zero on failure */
3278   assert(ret == 0);
3279 }
3280 
3281 #if HAVE_MREMAP
3282 
3283 static mchunkptr
3284 internal_function
3285 #if __STD_C
mremap_chunk(mchunkptr p,size_t new_size)3286 mremap_chunk(mchunkptr p, size_t new_size)
3287 #else
3288 mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
3289 #endif
3290 {
3291   size_t page_mask = mp_.pagesize - 1;
3292   INTERNAL_SIZE_T offset = p->prev_size;
3293   INTERNAL_SIZE_T size = chunksize(p);
3294   char *cp;
3295 
3296   assert (chunk_is_mmapped(p));
3297 #if 0
3298   assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3299   assert((mp_.n_mmaps > 0));
3300 #endif
3301   assert(((size + offset) & (mp_.pagesize-1)) == 0);
3302 
3303   /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3304   new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
3305 
3306   cp = (char *)mremap((char *)p - offset, size + offset, new_size,
3307                       MREMAP_MAYMOVE);
3308 
3309   if (cp == MAP_FAILED) return 0;
3310 
3311   p = (mchunkptr)(cp + offset);
3312 
3313   assert(aligned_OK(chunk2mem(p)));
3314 
3315   assert((p->prev_size == offset));
3316   set_head(p, (new_size - offset)|IS_MMAPPED);
3317 
3318   mp_.mmapped_mem -= size + offset;
3319   mp_.mmapped_mem += new_size;
3320   if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
3321     mp_.max_mmapped_mem = mp_.mmapped_mem;
3322 #ifdef NO_THREADS
3323   if ((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
3324       mp_.max_total_mem)
3325     mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
3326 #endif
3327   return p;
3328 }
3329 
3330 #endif /* HAVE_MREMAP */
3331 
3332 #endif /* HAVE_MMAP */
3333 
3334 /*------------------------ Public wrappers. --------------------------------*/
3335 
3336 Void_t*
public_mALLOc(size_t bytes)3337 public_mALLOc(size_t bytes)
3338 {
3339   mstate ar_ptr;
3340   Void_t *victim;
3341 
3342   __malloc_ptr_t (*hook) __MALLOC_P ((size_t, __const __malloc_ptr_t)) =
3343     __malloc_hook;
3344   if (hook != NULL)
3345     return (*hook)(bytes, RETURN_ADDRESS (0));
3346 
3347   arena_get(ar_ptr, bytes);
3348   if(!ar_ptr)
3349     return 0;
3350   victim = _int_malloc(ar_ptr, bytes);
3351   if(!victim) {
3352     /* Maybe the failure is due to running out of mmapped areas. */
3353     if(ar_ptr != &main_arena) {
3354       (void)mutex_unlock(&ar_ptr->mutex);
3355       (void)mutex_lock(&main_arena.mutex);
3356       victim = _int_malloc(&main_arena, bytes);
3357       (void)mutex_unlock(&main_arena.mutex);
3358     } else {
3359 #if USE_ARENAS
3360       /* ... or sbrk() has failed and there is still a chance to mmap() */
3361       ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3362       (void)mutex_unlock(&main_arena.mutex);
3363       if(ar_ptr) {
3364         victim = _int_malloc(ar_ptr, bytes);
3365         (void)mutex_unlock(&ar_ptr->mutex);
3366       }
3367 #endif
3368     }
3369   } else
3370     (void)mutex_unlock(&ar_ptr->mutex);
3371   assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
3372 	 ar_ptr == arena_for_chunk(mem2chunk(victim)));
3373   return victim;
3374 }
3375 #ifdef libc_hidden_def
libc_hidden_def(public_mALLOc)3376 libc_hidden_def(public_mALLOc)
3377 #endif
3378 
3379 void
3380 public_fREe(Void_t* mem)
3381 {
3382   mstate ar_ptr;
3383   mchunkptr p;                          /* chunk corresponding to mem */
3384 
3385   void (*hook) __MALLOC_P ((__malloc_ptr_t, __const __malloc_ptr_t)) =
3386     __free_hook;
3387   if (hook != NULL) {
3388     (*hook)(mem, RETURN_ADDRESS (0));
3389     return;
3390   }
3391 
3392   if (mem == 0)                              /* free(0) has no effect */
3393     return;
3394 
3395   p = mem2chunk(mem);
3396 
3397 #if HAVE_MMAP
3398   if (chunk_is_mmapped(p))                       /* release mmapped memory. */
3399   {
3400     munmap_chunk(p);
3401     return;
3402   }
3403 #endif
3404 
3405   ar_ptr = arena_for_chunk(p);
3406 #if THREAD_STATS
3407   if(!mutex_trylock(&ar_ptr->mutex))
3408     ++(ar_ptr->stat_lock_direct);
3409   else {
3410     (void)mutex_lock(&ar_ptr->mutex);
3411     ++(ar_ptr->stat_lock_wait);
3412   }
3413 #else
3414   (void)mutex_lock(&ar_ptr->mutex);
3415 #endif
3416   _int_free(ar_ptr, mem);
3417   (void)mutex_unlock(&ar_ptr->mutex);
3418 }
3419 #ifdef libc_hidden_def
libc_hidden_def(public_fREe)3420 libc_hidden_def (public_fREe)
3421 #endif
3422 
3423 Void_t*
3424 public_rEALLOc(Void_t* oldmem, size_t bytes)
3425 {
3426   mstate ar_ptr;
3427   INTERNAL_SIZE_T    nb;      /* padded request size */
3428 
3429   mchunkptr oldp;             /* chunk corresponding to oldmem */
3430   INTERNAL_SIZE_T    oldsize; /* its size */
3431 
3432   Void_t* newp;             /* chunk to return */
3433 
3434   __malloc_ptr_t (*hook) __MALLOC_P ((__malloc_ptr_t, size_t,
3435 				      __const __malloc_ptr_t)) =
3436     __realloc_hook;
3437   if (hook != NULL)
3438     return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
3439 
3440 #if REALLOC_ZERO_BYTES_FREES
3441   if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
3442 #endif
3443 
3444   /* realloc of null is supposed to be same as malloc */
3445   if (oldmem == 0) return public_mALLOc(bytes);
3446 
3447   oldp    = mem2chunk(oldmem);
3448   oldsize = chunksize(oldp);
3449 
3450   checked_request2size(bytes, nb);
3451 
3452 #if HAVE_MMAP
3453   if (chunk_is_mmapped(oldp))
3454   {
3455     Void_t* newmem;
3456 
3457 #if HAVE_MREMAP
3458     newp = mremap_chunk(oldp, nb);
3459     if(newp) return chunk2mem(newp);
3460 #endif
3461     /* Note the extra SIZE_SZ overhead. */
3462     if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
3463     /* Must alloc, copy, free. */
3464     newmem = public_mALLOc(bytes);
3465     if (newmem == 0) return 0; /* propagate failure */
3466     MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
3467     munmap_chunk(oldp);
3468     return newmem;
3469   }
3470 #endif
3471 
3472   ar_ptr = arena_for_chunk(oldp);
3473 #if THREAD_STATS
3474   if(!mutex_trylock(&ar_ptr->mutex))
3475     ++(ar_ptr->stat_lock_direct);
3476   else {
3477     (void)mutex_lock(&ar_ptr->mutex);
3478     ++(ar_ptr->stat_lock_wait);
3479   }
3480 #else
3481   (void)mutex_lock(&ar_ptr->mutex);
3482 #endif
3483 
3484 #ifndef NO_THREADS
3485   /* As in malloc(), remember this arena for the next allocation. */
3486   tsd_setspecific(arena_key, (Void_t *)ar_ptr);
3487 #endif
3488 
3489   newp = _int_realloc(ar_ptr, oldmem, bytes);
3490 
3491   (void)mutex_unlock(&ar_ptr->mutex);
3492   assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
3493 	 ar_ptr == arena_for_chunk(mem2chunk(newp)));
3494   return newp;
3495 }
3496 #ifdef libc_hidden_def
libc_hidden_def(public_rEALLOc)3497 libc_hidden_def (public_rEALLOc)
3498 #endif
3499 
3500 Void_t*
3501 public_mEMALIGn(size_t alignment, size_t bytes)
3502 {
3503   mstate ar_ptr;
3504   Void_t *p;
3505 
3506   __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3507 					__const __malloc_ptr_t)) =
3508     __memalign_hook;
3509   if (hook != NULL)
3510     return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
3511 
3512   /* If need less alignment than we give anyway, just relay to malloc */
3513   if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
3514 
3515   /* Otherwise, ensure that it is at least a minimum chunk size */
3516   if (alignment <  MINSIZE) alignment = MINSIZE;
3517 
3518   arena_get(ar_ptr, bytes + alignment + MINSIZE);
3519   if(!ar_ptr)
3520     return 0;
3521   p = _int_memalign(ar_ptr, alignment, bytes);
3522   (void)mutex_unlock(&ar_ptr->mutex);
3523   if(!p) {
3524     /* Maybe the failure is due to running out of mmapped areas. */
3525     if(ar_ptr != &main_arena) {
3526       (void)mutex_lock(&main_arena.mutex);
3527       p = _int_memalign(&main_arena, alignment, bytes);
3528       (void)mutex_unlock(&main_arena.mutex);
3529     } else {
3530 #if USE_ARENAS
3531       /* ... or sbrk() has failed and there is still a chance to mmap() */
3532       ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3533       if(ar_ptr) {
3534         p = _int_memalign(ar_ptr, alignment, bytes);
3535         (void)mutex_unlock(&ar_ptr->mutex);
3536       }
3537 #endif
3538     }
3539   }
3540   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3541 	 ar_ptr == arena_for_chunk(mem2chunk(p)));
3542   return p;
3543 }
3544 #ifdef libc_hidden_def
libc_hidden_def(public_mEMALIGn)3545 libc_hidden_def (public_mEMALIGn)
3546 #endif
3547 
3548 Void_t*
3549 public_vALLOc(size_t bytes)
3550 {
3551   mstate ar_ptr;
3552   Void_t *p;
3553 
3554   if(__malloc_initialized < 0)
3555     ptmalloc_init ();
3556   arena_get(ar_ptr, bytes + mp_.pagesize + MINSIZE);
3557   if(!ar_ptr)
3558     return 0;
3559   p = _int_valloc(ar_ptr, bytes);
3560   (void)mutex_unlock(&ar_ptr->mutex);
3561   return p;
3562 }
3563 
3564 Void_t*
public_pVALLOc(size_t bytes)3565 public_pVALLOc(size_t bytes)
3566 {
3567   mstate ar_ptr;
3568   Void_t *p;
3569 
3570   if(__malloc_initialized < 0)
3571     ptmalloc_init ();
3572   arena_get(ar_ptr, bytes + 2*mp_.pagesize + MINSIZE);
3573   p = _int_pvalloc(ar_ptr, bytes);
3574   (void)mutex_unlock(&ar_ptr->mutex);
3575   return p;
3576 }
3577 
3578 Void_t*
public_cALLOc(size_t n,size_t elem_size)3579 public_cALLOc(size_t n, size_t elem_size)
3580 {
3581   mstate av;
3582   mchunkptr oldtop, p;
3583   INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
3584   Void_t* mem;
3585   unsigned long clearsize;
3586   unsigned long nclears;
3587   INTERNAL_SIZE_T* d;
3588   __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
3589     __malloc_hook;
3590 
3591   /* size_t is unsigned so the behavior on overflow is defined.  */
3592   bytes = n * elem_size;
3593 #define HALF_INTERNAL_SIZE_T \
3594   (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3595   if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
3596     if (elem_size != 0 && bytes / elem_size != n) {
3597       MALLOC_FAILURE_ACTION;
3598       return 0;
3599     }
3600   }
3601 
3602   if (hook != NULL) {
3603     sz = bytes;
3604     mem = (*hook)(sz, RETURN_ADDRESS (0));
3605     if(mem == 0)
3606       return 0;
3607 #ifdef HAVE_MEMCPY
3608     return memset(mem, 0, sz);
3609 #else
3610     while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
3611     return mem;
3612 #endif
3613   }
3614 
3615   sz = bytes;
3616 
3617   arena_get(av, sz);
3618   if(!av)
3619     return 0;
3620 
3621   /* Check if we hand out the top chunk, in which case there may be no
3622      need to clear. */
3623 #if MORECORE_CLEARS
3624   oldtop = top(av);
3625   oldtopsize = chunksize(top(av));
3626 #if MORECORE_CLEARS < 2
3627   /* Only newly allocated memory is guaranteed to be cleared.  */
3628   if (av == &main_arena &&
3629       oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
3630     oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
3631 #endif
3632 #endif
3633   mem = _int_malloc(av, sz);
3634 
3635   /* Only clearing follows, so we can unlock early. */
3636   (void)mutex_unlock(&av->mutex);
3637 
3638   assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
3639 	 av == arena_for_chunk(mem2chunk(mem)));
3640 
3641   if (mem == 0) {
3642     /* Maybe the failure is due to running out of mmapped areas. */
3643     if(av != &main_arena) {
3644       (void)mutex_lock(&main_arena.mutex);
3645       mem = _int_malloc(&main_arena, sz);
3646       (void)mutex_unlock(&main_arena.mutex);
3647     } else {
3648 #if USE_ARENAS
3649       /* ... or sbrk() has failed and there is still a chance to mmap() */
3650       (void)mutex_lock(&main_arena.mutex);
3651       av = arena_get2(av->next ? av : 0, sz);
3652       (void)mutex_unlock(&main_arena.mutex);
3653       if(av) {
3654         mem = _int_malloc(av, sz);
3655         (void)mutex_unlock(&av->mutex);
3656       }
3657 #endif
3658     }
3659     if (mem == 0) return 0;
3660   }
3661   p = mem2chunk(mem);
3662 
3663   /* Two optional cases in which clearing not necessary */
3664 #if HAVE_MMAP
3665   if (chunk_is_mmapped(p))
3666     return mem;
3667 #endif
3668 
3669   csz = chunksize(p);
3670 
3671 #if MORECORE_CLEARS
3672   if (p == oldtop && csz > oldtopsize) {
3673     /* clear only the bytes from non-freshly-sbrked memory */
3674     csz = oldtopsize;
3675   }
3676 #endif
3677 
3678   /* Unroll clear of <= 36 bytes (72 if 8byte sizes).  We know that
3679      contents have an odd number of INTERNAL_SIZE_T-sized words;
3680      minimally 3.  */
3681   d = (INTERNAL_SIZE_T*)mem;
3682   clearsize = csz - SIZE_SZ;
3683   nclears = clearsize / sizeof(INTERNAL_SIZE_T);
3684   assert(nclears >= 3);
3685 
3686   if (nclears > 9)
3687     MALLOC_ZERO(d, clearsize);
3688 
3689   else {
3690     *(d+0) = 0;
3691     *(d+1) = 0;
3692     *(d+2) = 0;
3693     if (nclears > 4) {
3694       *(d+3) = 0;
3695       *(d+4) = 0;
3696       if (nclears > 6) {
3697 	*(d+5) = 0;
3698 	*(d+6) = 0;
3699 	if (nclears > 8) {
3700 	  *(d+7) = 0;
3701 	  *(d+8) = 0;
3702 	}
3703       }
3704     }
3705   }
3706 
3707   return mem;
3708 }
3709 
3710 Void_t**
public_iCALLOc(size_t n,size_t elem_size,Void_t ** chunks)3711 public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
3712 {
3713   mstate ar_ptr;
3714   Void_t** m;
3715 
3716   arena_get(ar_ptr, n*elem_size);
3717   if(!ar_ptr)
3718     return 0;
3719 
3720   m = _int_icalloc(ar_ptr, n, elem_size, chunks);
3721   (void)mutex_unlock(&ar_ptr->mutex);
3722   return m;
3723 }
3724 
3725 Void_t**
public_iCOMALLOc(size_t n,size_t sizes[],Void_t ** chunks)3726 public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
3727 {
3728   mstate ar_ptr;
3729   Void_t** m;
3730 
3731   arena_get(ar_ptr, 0);
3732   if(!ar_ptr)
3733     return 0;
3734 
3735   m = _int_icomalloc(ar_ptr, n, sizes, chunks);
3736   (void)mutex_unlock(&ar_ptr->mutex);
3737   return m;
3738 }
3739 
3740 #ifndef _LIBC
3741 
3742 void
public_cFREe(Void_t * m)3743 public_cFREe(Void_t* m)
3744 {
3745   public_fREe(m);
3746 }
3747 
3748 #endif /* _LIBC */
3749 
3750 int
public_mTRIm(size_t s)3751 public_mTRIm(size_t s)
3752 {
3753   int result;
3754 
3755   (void)mutex_lock(&main_arena.mutex);
3756   result = mTRIm(s);
3757   (void)mutex_unlock(&main_arena.mutex);
3758   return result;
3759 }
3760 
3761 size_t
public_mUSABLe(Void_t * m)3762 public_mUSABLe(Void_t* m)
3763 {
3764   size_t result;
3765 
3766   result = mUSABLe(m);
3767   return result;
3768 }
3769 
3770 /* This exists mainly for backward compatibility.  Calling
3771    _int_get_arena_info() directly is more useful.  */
3772 struct mallinfo
public_mALLINFo()3773 public_mALLINFo()
3774 {
3775   struct malloc_arena_info mai;
3776   struct mallinfo m;
3777   size_t avail;
3778 
3779   if(__malloc_initialized < 0)
3780     ptmalloc_init ();
3781   _int_get_arena_info(&main_arena, &mai);
3782   /* Account for top */
3783   avail = mai.fastavail + mai.binavail + mai.top_size;
3784   m.smblks = mai.nfastblocks;
3785   m.ordblks = mai.nbinblocks + 1;
3786   m.fordblks = avail;
3787   m.uordblks = mai.system_mem - avail;
3788   m.arena = mai.system_mem;
3789   m.hblks = mp_.n_mmaps;
3790   m.hblkhd = mp_.mmapped_mem;
3791   m.fsmblks = mai.fastavail;
3792   m.keepcost = mai.top_size;
3793   m.usmblks = mp_.max_total_mem;
3794   return m;
3795 }
3796 
3797 int
public_mALLOPt(int p,int v)3798 public_mALLOPt(int p, int v)
3799 {
3800   int result;
3801   result = mALLOPt(p, v);
3802   return result;
3803 }
3804 
3805 /*
3806   ------------------------------ malloc ------------------------------
3807 */
3808 
3809 Void_t*
_int_malloc(mstate av,size_t bytes)3810 _int_malloc(mstate av, size_t bytes)
3811 {
3812   INTERNAL_SIZE_T nb;               /* normalized request size */
3813   unsigned int    idx;              /* associated bin index */
3814   mbinptr         bin;              /* associated bin */
3815   mfastbinptr*    fb;               /* associated fastbin */
3816 
3817   mchunkptr       victim;           /* inspected/selected chunk */
3818   INTERNAL_SIZE_T size;             /* its size */
3819   int             victim_index;     /* its bin index */
3820 
3821   mchunkptr       remainder;        /* remainder from a split */
3822   unsigned long   remainder_size;   /* its size */
3823 
3824   unsigned int    block;            /* bit map traverser */
3825   unsigned int    bit;              /* bit map traverser */
3826   unsigned int    map;              /* current word of binmap */
3827 
3828   mchunkptr       fwd;              /* misc temp for linking */
3829   mchunkptr       bck;              /* misc temp for linking */
3830 
3831   /*
3832     Convert request size to internal form by adding SIZE_SZ bytes
3833     overhead plus possibly more to obtain necessary alignment and/or
3834     to obtain a size of at least MINSIZE, the smallest allocatable
3835     size. Also, checked_request2size traps (returning 0) request sizes
3836     that are so large that they wrap around zero when padded and
3837     aligned.
3838   */
3839 
3840   checked_request2size(bytes, nb);
3841 
3842   /*
3843     If the size qualifies as a fastbin, first check corresponding bin.
3844     This code is safe to execute even if av is not yet initialized, so we
3845     can try it without checking, which saves some time on this fast path.
3846   */
3847 
3848   if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) {
3849     fb = &(av->fastbins[(fastbin_index(nb))]);
3850     if ( (victim = *fb) != 0) {
3851       *fb = victim->fd;
3852       check_remalloced_chunk(av, victim, nb);
3853       return chunk2mem(victim);
3854     }
3855   }
3856 
3857   /*
3858     If a small request, check regular bin.  Since these "smallbins"
3859     hold one size each, no searching within bins is necessary.
3860     (For a large request, we need to wait until unsorted chunks are
3861     processed to find best fit. But for small ones, fits are exact
3862     anyway, so we can check now, which is faster.)
3863   */
3864 
3865   if (in_smallbin_range(nb)) {
3866     idx = smallbin_index(nb);
3867     bin = bin_at(av,idx);
3868 
3869     if ( (victim = last(bin)) != bin) {
3870       if (victim == 0) /* initialization check */
3871         malloc_consolidate(av);
3872       else {
3873         bck = victim->bk;
3874         set_inuse_bit_at_offset(victim, nb);
3875         bin->bk = bck;
3876         bck->fd = bin;
3877 
3878         if (av != &main_arena)
3879 	  victim->size |= NON_MAIN_ARENA;
3880         check_malloced_chunk(av, victim, nb);
3881         return chunk2mem(victim);
3882       }
3883     }
3884   }
3885 
3886   /*
3887      If this is a large request, consolidate fastbins before continuing.
3888      While it might look excessive to kill all fastbins before
3889      even seeing if there is space available, this avoids
3890      fragmentation problems normally associated with fastbins.
3891      Also, in practice, programs tend to have runs of either small or
3892      large requests, but less often mixtures, so consolidation is not
3893      invoked all that often in most programs. And the programs that
3894      it is called frequently in otherwise tend to fragment.
3895   */
3896 
3897   else {
3898     idx = largebin_index(nb);
3899     if (have_fastchunks(av))
3900       malloc_consolidate(av);
3901   }
3902 
3903   /*
3904     Process recently freed or remaindered chunks, taking one only if
3905     it is exact fit, or, if this a small request, the chunk is remainder from
3906     the most recent non-exact fit.  Place other traversed chunks in
3907     bins.  Note that this step is the only place in any routine where
3908     chunks are placed in bins.
3909 
3910     The outer loop here is needed because we might not realize until
3911     near the end of malloc that we should have consolidated, so must
3912     do so and retry. This happens at most once, and only when we would
3913     otherwise need to expand memory to service a "small" request.
3914   */
3915 
3916   for(;;) {
3917 
3918     while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
3919       bck = victim->bk;
3920       size = chunksize(victim);
3921 
3922       /*
3923          If a small request, try to use last remainder if it is the
3924          only chunk in unsorted bin.  This helps promote locality for
3925          runs of consecutive small requests. This is the only
3926          exception to best-fit, and applies only when there is
3927          no exact fit for a small chunk.
3928       */
3929 
3930       if (in_smallbin_range(nb) &&
3931           bck == unsorted_chunks(av) &&
3932           victim == av->last_remainder &&
3933           (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
3934 
3935         /* split and reattach remainder */
3936         remainder_size = size - nb;
3937         remainder = chunk_at_offset(victim, nb);
3938         unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3939         av->last_remainder = remainder;
3940         remainder->bk = remainder->fd = unsorted_chunks(av);
3941 
3942         set_head(victim, nb | PREV_INUSE |
3943 		 (av != &main_arena ? NON_MAIN_ARENA : 0));
3944         set_head(remainder, remainder_size | PREV_INUSE);
3945         set_foot(remainder, remainder_size);
3946 
3947         check_malloced_chunk(av, victim, nb);
3948         return chunk2mem(victim);
3949       }
3950 
3951       /* remove from unsorted list */
3952       unsorted_chunks(av)->bk = bck;
3953       bck->fd = unsorted_chunks(av);
3954 
3955       /* Take now instead of binning if exact fit */
3956 
3957       if (size == nb) {
3958         set_inuse_bit_at_offset(victim, size);
3959 	if (av != &main_arena)
3960 	  victim->size |= NON_MAIN_ARENA;
3961         check_malloced_chunk(av, victim, nb);
3962         return chunk2mem(victim);
3963       }
3964 
3965       /* place chunk in bin */
3966 
3967       if (in_smallbin_range(size)) {
3968         victim_index = smallbin_index(size);
3969         bck = bin_at(av, victim_index);
3970         fwd = bck->fd;
3971       }
3972       else {
3973         victim_index = largebin_index(size);
3974         bck = bin_at(av, victim_index);
3975         fwd = bck->fd;
3976 
3977         if (fwd != bck) {
3978           /* if smaller than smallest, place first */
3979 	  assert((bck->bk->size & NON_MAIN_ARENA) == 0);
3980           if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
3981             fwd = bck;
3982             bck = bck->bk;
3983           }
3984           else if ((unsigned long)(size) >=
3985                    (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
3986 
3987             /* maintain large bins in sorted order */
3988             size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
3989 	    assert((fwd->size & NON_MAIN_ARENA) == 0);
3990             while ((unsigned long)(size) < (unsigned long)(fwd->size)) {
3991               fwd = fwd->fd;
3992 	      assert((fwd->size & NON_MAIN_ARENA) == 0);
3993 	    }
3994             bck = fwd->bk;
3995           }
3996         }
3997       }
3998 
3999       mark_bin(av, victim_index);
4000       victim->bk = bck;
4001       victim->fd = fwd;
4002       fwd->bk = victim;
4003       bck->fd = victim;
4004     }
4005 
4006     /*
4007       If a large request, scan through the chunks of current bin in
4008       sorted order to find smallest that fits.  This is the only step
4009       where an unbounded number of chunks might be scanned without doing
4010       anything useful with them. However the lists tend to be short.
4011     */
4012 
4013     if (!in_smallbin_range(nb)) {
4014       bin = bin_at(av, idx);
4015 
4016       for (victim = last(bin); victim != bin; victim = victim->bk) {
4017 	size = chunksize(victim);
4018 
4019 	if ((unsigned long)(size) >= (unsigned long)(nb)) {
4020 	  remainder_size = size - nb;
4021 	  unlink(victim, bck, fwd);
4022 
4023 	  /* Exhaust */
4024 	  if (remainder_size < MINSIZE)  {
4025 	    set_inuse_bit_at_offset(victim, size);
4026 	    if (av != &main_arena)
4027 	      victim->size |= NON_MAIN_ARENA;
4028 	    check_malloced_chunk(av, victim, nb);
4029 	    return chunk2mem(victim);
4030 	  }
4031 	  /* Split */
4032 	  else {
4033 	    remainder = chunk_at_offset(victim, nb);
4034 	    unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4035 	    remainder->bk = remainder->fd = unsorted_chunks(av);
4036 	    set_head(victim, nb | PREV_INUSE |
4037 		     (av != &main_arena ? NON_MAIN_ARENA : 0));
4038 	    set_head(remainder, remainder_size | PREV_INUSE);
4039 	    set_foot(remainder, remainder_size);
4040 	    check_malloced_chunk(av, victim, nb);
4041 	    return chunk2mem(victim);
4042 	  }
4043 	}
4044       }
4045     }
4046 
4047     /*
4048       Search for a chunk by scanning bins, starting with next largest
4049       bin. This search is strictly by best-fit; i.e., the smallest
4050       (with ties going to approximately the least recently used) chunk
4051       that fits is selected.
4052 
4053       The bitmap avoids needing to check that most blocks are nonempty.
4054       The particular case of skipping all bins during warm-up phases
4055       when no chunks have been returned yet is faster than it might look.
4056     */
4057 
4058     ++idx;
4059     bin = bin_at(av,idx);
4060     block = idx2block(idx);
4061     map = av->binmap[block];
4062     bit = idx2bit(idx);
4063 
4064     for (;;) {
4065 
4066       /* Skip rest of block if there are no more set bits in this block.  */
4067       if (bit > map || bit == 0) {
4068         do {
4069           if (++block >= BINMAPSIZE)  /* out of bins */
4070             goto use_top;
4071         } while ( (map = av->binmap[block]) == 0);
4072 
4073         bin = bin_at(av, (block << BINMAPSHIFT));
4074         bit = 1;
4075       }
4076 
4077       /* Advance to bin with set bit. There must be one. */
4078       while ((bit & map) == 0) {
4079         bin = next_bin(bin);
4080         bit <<= 1;
4081         assert(bit != 0);
4082       }
4083 
4084       /* Inspect the bin. It is likely to be non-empty */
4085       victim = last(bin);
4086 
4087       /*  If a false alarm (empty bin), clear the bit. */
4088       if (victim == bin) {
4089         av->binmap[block] = map &= ~bit; /* Write through */
4090         bin = next_bin(bin);
4091         bit <<= 1;
4092       }
4093 
4094       else {
4095         size = chunksize(victim);
4096 
4097         /*  We know the first chunk in this bin is big enough to use. */
4098         assert((unsigned long)(size) >= (unsigned long)(nb));
4099 
4100         remainder_size = size - nb;
4101 
4102         /* unlink */
4103         bck = victim->bk;
4104         bin->bk = bck;
4105         bck->fd = bin;
4106 
4107         /* Exhaust */
4108         if (remainder_size < MINSIZE) {
4109           set_inuse_bit_at_offset(victim, size);
4110 	  if (av != &main_arena)
4111 	    victim->size |= NON_MAIN_ARENA;
4112           check_malloced_chunk(av, victim, nb);
4113           return chunk2mem(victim);
4114         }
4115 
4116         /* Split */
4117         else {
4118           remainder = chunk_at_offset(victim, nb);
4119 
4120           unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4121           remainder->bk = remainder->fd = unsorted_chunks(av);
4122           /* advertise as last remainder */
4123           if (in_smallbin_range(nb))
4124             av->last_remainder = remainder;
4125 
4126           set_head(victim, nb | PREV_INUSE |
4127 		   (av != &main_arena ? NON_MAIN_ARENA : 0));
4128           set_head(remainder, remainder_size | PREV_INUSE);
4129           set_foot(remainder, remainder_size);
4130           check_malloced_chunk(av, victim, nb);
4131           return chunk2mem(victim);
4132         }
4133       }
4134     }
4135 
4136   use_top:
4137     /*
4138       If large enough, split off the chunk bordering the end of memory
4139       (held in av->top). Note that this is in accord with the best-fit
4140       search rule.  In effect, av->top is treated as larger (and thus
4141       less well fitting) than any other available chunk since it can
4142       be extended to be as large as necessary (up to system
4143       limitations).
4144 
4145       We require that av->top always exists (i.e., has size >=
4146       MINSIZE) after initialization, so if it would otherwise be
4147       exhuasted by current request, it is replenished. (The main
4148       reason for ensuring it exists is that we may need MINSIZE space
4149       to put in fenceposts in sysmalloc.)
4150     */
4151 
4152     victim = av->top;
4153     size = chunksize(victim);
4154 
4155     if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
4156       remainder_size = size - nb;
4157       remainder = chunk_at_offset(victim, nb);
4158       av->top = remainder;
4159       set_head(victim, nb | PREV_INUSE |
4160 	       (av != &main_arena ? NON_MAIN_ARENA : 0));
4161       set_head(remainder, remainder_size | PREV_INUSE);
4162 
4163       check_malloced_chunk(av, victim, nb);
4164       return chunk2mem(victim);
4165     }
4166 
4167     /*
4168       If there is space available in fastbins, consolidate and retry,
4169       to possibly avoid expanding memory. This can occur only if nb is
4170       in smallbin range so we didn't consolidate upon entry.
4171     */
4172 
4173     else if (have_fastchunks(av)) {
4174       assert(in_smallbin_range(nb));
4175       malloc_consolidate(av);
4176       idx = smallbin_index(nb); /* restore original bin index */
4177     }
4178 
4179     /*
4180        Otherwise, relay to handle system-dependent cases
4181     */
4182     else
4183       return sYSMALLOc(nb, av);
4184   }
4185 }
4186 
4187 /*
4188   ------------------------------ free ------------------------------
4189 */
4190 
4191 void
_int_free(mstate av,Void_t * mem)4192 _int_free(mstate av, Void_t* mem)
4193 {
4194   mchunkptr       p;           /* chunk corresponding to mem */
4195   INTERNAL_SIZE_T size;        /* its size */
4196   mfastbinptr*    fb;          /* associated fastbin */
4197   mchunkptr       nextchunk;   /* next contiguous chunk */
4198   INTERNAL_SIZE_T nextsize;    /* its size */
4199   int             nextinuse;   /* true if nextchunk is used */
4200   INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
4201   mchunkptr       bck;         /* misc temp for linking */
4202   mchunkptr       fwd;         /* misc temp for linking */
4203 
4204 
4205   /* free(0) has no effect */
4206   if (mem != 0) {
4207     p = mem2chunk(mem);
4208     size = chunksize(p);
4209 
4210     check_inuse_chunk(av, p);
4211 
4212     /*
4213       If eligible, place chunk on a fastbin so it can be found
4214       and used quickly in malloc.
4215     */
4216 
4217     if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
4218 
4219 #if TRIM_FASTBINS
4220         /*
4221            If TRIM_FASTBINS set, don't place chunks
4222            bordering top into fastbins
4223         */
4224         && (chunk_at_offset(p, size) != av->top)
4225 #endif
4226         ) {
4227 
4228       set_fastchunks(av);
4229       fb = &(av->fastbins[fastbin_index(size)]);
4230       p->fd = *fb;
4231       *fb = p;
4232     }
4233 
4234     /*
4235        Consolidate other non-mmapped chunks as they arrive.
4236     */
4237 
4238     else if (!chunk_is_mmapped(p)) {
4239       nextchunk = chunk_at_offset(p, size);
4240       nextsize = chunksize(nextchunk);
4241       assert(nextsize > 0);
4242 
4243       /* consolidate backward */
4244       if (!prev_inuse(p)) {
4245         prevsize = p->prev_size;
4246         size += prevsize;
4247         p = chunk_at_offset(p, -((long) prevsize));
4248         unlink(p, bck, fwd);
4249       }
4250 
4251       if (nextchunk != av->top) {
4252         /* get and clear inuse bit */
4253         nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4254 
4255         /* consolidate forward */
4256         if (!nextinuse) {
4257           unlink(nextchunk, bck, fwd);
4258           size += nextsize;
4259         } else
4260 	  clear_inuse_bit_at_offset(nextchunk, 0);
4261 
4262         /*
4263           Place the chunk in unsorted chunk list. Chunks are
4264           not placed into regular bins until after they have
4265           been given one chance to be used in malloc.
4266         */
4267 
4268         bck = unsorted_chunks(av);
4269         fwd = bck->fd;
4270         p->bk = bck;
4271         p->fd = fwd;
4272         bck->fd = p;
4273         fwd->bk = p;
4274 
4275         set_head(p, size | PREV_INUSE);
4276         set_foot(p, size);
4277 
4278         check_free_chunk(av, p);
4279       }
4280 
4281       /*
4282          If the chunk borders the current high end of memory,
4283          consolidate into top
4284       */
4285 
4286       else {
4287         size += nextsize;
4288         set_head(p, size | PREV_INUSE);
4289         av->top = p;
4290         check_chunk(av, p);
4291       }
4292 
4293       /*
4294         If freeing a large space, consolidate possibly-surrounding
4295         chunks. Then, if the total unused topmost memory exceeds trim
4296         threshold, ask malloc_trim to reduce top.
4297 
4298         Unless max_fast is 0, we don't know if there are fastbins
4299         bordering top, so we cannot tell for sure whether threshold
4300         has been reached unless fastbins are consolidated.  But we
4301         don't want to consolidate on each free.  As a compromise,
4302         consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4303         is reached.
4304       */
4305 
4306       if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4307         if (have_fastchunks(av))
4308           malloc_consolidate(av);
4309 
4310 	if (av == &main_arena) {
4311 #ifndef MORECORE_CANNOT_TRIM
4312 	  if ((unsigned long)(chunksize(av->top)) >=
4313 	      (unsigned long)(mp_.trim_threshold))
4314 	    sYSTRIm(mp_.top_pad, av);
4315 #endif
4316 	} else {
4317 	  /* Always try heap_trim(), even if the top chunk is not
4318              large, because the corresponding heap might go away.  */
4319 	  heap_info *heap = heap_for_ptr(top(av));
4320 
4321 	  assert(heap->ar_ptr == av);
4322 	  heap_trim(heap, mp_.top_pad);
4323 	}
4324       }
4325 
4326     }
4327     /*
4328       If the chunk was allocated via mmap, release via munmap(). Note
4329       that if HAVE_MMAP is false but chunk_is_mmapped is true, then
4330       user must have overwritten memory. There's nothing we can do to
4331       catch this error unless MALLOC_DEBUG is set, in which case
4332       check_inuse_chunk (above) will have triggered error.
4333     */
4334 
4335     else {
4336 #if HAVE_MMAP
4337       int ret;
4338       INTERNAL_SIZE_T offset = p->prev_size;
4339       mp_.n_mmaps--;
4340       mp_.mmapped_mem -= (size + offset);
4341       ret = munmap((char*)p - offset, size + offset);
4342       /* munmap returns non-zero on failure */
4343       assert(ret == 0);
4344 #endif
4345     }
4346   }
4347 }
4348 
4349 /*
4350   ------------------------- malloc_consolidate -------------------------
4351 
4352   malloc_consolidate is a specialized version of free() that tears
4353   down chunks held in fastbins.  Free itself cannot be used for this
4354   purpose since, among other things, it might place chunks back onto
4355   fastbins.  So, instead, we need to use a minor variant of the same
4356   code.
4357 
4358   Also, because this routine needs to be called the first time through
4359   malloc anyway, it turns out to be the perfect place to trigger
4360   initialization code.
4361 */
4362 
4363 #if __STD_C
malloc_consolidate(mstate av)4364 static void malloc_consolidate(mstate av)
4365 #else
4366 static void malloc_consolidate(av) mstate av;
4367 #endif
4368 {
4369   mfastbinptr*    fb;                 /* current fastbin being consolidated */
4370   mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
4371   mchunkptr       p;                  /* current chunk being consolidated */
4372   mchunkptr       nextp;              /* next chunk to consolidate */
4373   mchunkptr       unsorted_bin;       /* bin header */
4374   mchunkptr       first_unsorted;     /* chunk to link to */
4375 
4376   /* These have same use as in free() */
4377   mchunkptr       nextchunk;
4378   INTERNAL_SIZE_T size;
4379   INTERNAL_SIZE_T nextsize;
4380   INTERNAL_SIZE_T prevsize;
4381   int             nextinuse;
4382   mchunkptr       bck;
4383   mchunkptr       fwd;
4384 
4385   /*
4386     If max_fast is 0, we know that av hasn't
4387     yet been initialized, in which case do so below
4388   */
4389 
4390   if (av->max_fast != 0) {
4391     clear_fastchunks(av);
4392 
4393     unsorted_bin = unsorted_chunks(av);
4394 
4395     /*
4396       Remove each chunk from fast bin and consolidate it, placing it
4397       then in unsorted bin. Among other reasons for doing this,
4398       placing in unsorted bin avoids needing to calculate actual bins
4399       until malloc is sure that chunks aren't immediately going to be
4400       reused anyway.
4401     */
4402 
4403     maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4404     fb = &(av->fastbins[0]);
4405     do {
4406       if ( (p = *fb) != 0) {
4407         *fb = 0;
4408 
4409         do {
4410           check_inuse_chunk(av, p);
4411           nextp = p->fd;
4412 
4413           /* Slightly streamlined version of consolidation code in free() */
4414           size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4415           nextchunk = chunk_at_offset(p, size);
4416           nextsize = chunksize(nextchunk);
4417 
4418           if (!prev_inuse(p)) {
4419             prevsize = p->prev_size;
4420             size += prevsize;
4421             p = chunk_at_offset(p, -((long) prevsize));
4422             unlink(p, bck, fwd);
4423           }
4424 
4425           if (nextchunk != av->top) {
4426             nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4427 
4428             if (!nextinuse) {
4429               size += nextsize;
4430               unlink(nextchunk, bck, fwd);
4431             } else
4432 	      clear_inuse_bit_at_offset(nextchunk, 0);
4433 
4434             first_unsorted = unsorted_bin->fd;
4435             unsorted_bin->fd = p;
4436             first_unsorted->bk = p;
4437 
4438             set_head(p, size | PREV_INUSE);
4439             p->bk = unsorted_bin;
4440             p->fd = first_unsorted;
4441             set_foot(p, size);
4442           }
4443 
4444           else {
4445             size += nextsize;
4446             set_head(p, size | PREV_INUSE);
4447             av->top = p;
4448           }
4449 
4450         } while ( (p = nextp) != 0);
4451 
4452       }
4453     } while (fb++ != maxfb);
4454   }
4455   else {
4456     malloc_init_state(av);
4457     check_malloc_state(av);
4458   }
4459 }
4460 
4461 /*
4462   ------------------------------ realloc ------------------------------
4463 */
4464 
4465 Void_t*
_int_realloc(mstate av,Void_t * oldmem,size_t bytes)4466 _int_realloc(mstate av, Void_t* oldmem, size_t bytes)
4467 {
4468   INTERNAL_SIZE_T  nb;              /* padded request size */
4469 
4470   mchunkptr        oldp;            /* chunk corresponding to oldmem */
4471   INTERNAL_SIZE_T  oldsize;         /* its size */
4472 
4473   mchunkptr        newp;            /* chunk to return */
4474   INTERNAL_SIZE_T  newsize;         /* its size */
4475   Void_t*          newmem;          /* corresponding user mem */
4476 
4477   mchunkptr        next;            /* next contiguous chunk after oldp */
4478 
4479   mchunkptr        remainder;       /* extra space at end of newp */
4480   unsigned long    remainder_size;  /* its size */
4481 
4482   mchunkptr        bck;             /* misc temp for linking */
4483   mchunkptr        fwd;             /* misc temp for linking */
4484 
4485   unsigned long    copysize;        /* bytes to copy */
4486   unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
4487   INTERNAL_SIZE_T* s;               /* copy source */
4488   INTERNAL_SIZE_T* d;               /* copy destination */
4489 
4490 
4491 #if REALLOC_ZERO_BYTES_FREES
4492   if (bytes == 0) {
4493     _int_free(av, oldmem);
4494     return 0;
4495   }
4496 #endif
4497 
4498   /* realloc of null is supposed to be same as malloc */
4499   if (oldmem == 0) return _int_malloc(av, bytes);
4500 
4501   checked_request2size(bytes, nb);
4502 
4503   oldp    = mem2chunk(oldmem);
4504   oldsize = chunksize(oldp);
4505 
4506   check_inuse_chunk(av, oldp);
4507 
4508   if (!chunk_is_mmapped(oldp)) {
4509 
4510     if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
4511       /* already big enough; split below */
4512       newp = oldp;
4513       newsize = oldsize;
4514     }
4515 
4516     else {
4517       next = chunk_at_offset(oldp, oldsize);
4518 
4519       /* Try to expand forward into top */
4520       if (next == av->top &&
4521           (unsigned long)(newsize = oldsize + chunksize(next)) >=
4522           (unsigned long)(nb + MINSIZE)) {
4523         set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4524         av->top = chunk_at_offset(oldp, nb);
4525         set_head(av->top, (newsize - nb) | PREV_INUSE);
4526 	check_inuse_chunk(av, oldp);
4527         return chunk2mem(oldp);
4528       }
4529 
4530       /* Try to expand forward into next chunk;  split off remainder below */
4531       else if (next != av->top &&
4532                !inuse(next) &&
4533                (unsigned long)(newsize = oldsize + chunksize(next)) >=
4534                (unsigned long)(nb)) {
4535         newp = oldp;
4536         unlink(next, bck, fwd);
4537       }
4538 
4539       /* allocate, copy, free */
4540       else {
4541         newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4542         if (newmem == 0)
4543           return 0; /* propagate failure */
4544 
4545         newp = mem2chunk(newmem);
4546         newsize = chunksize(newp);
4547 
4548         /*
4549           Avoid copy if newp is next chunk after oldp.
4550         */
4551         if (newp == next) {
4552           newsize += oldsize;
4553           newp = oldp;
4554         }
4555         else {
4556           /*
4557             Unroll copy of <= 36 bytes (72 if 8byte sizes)
4558             We know that contents have an odd number of
4559             INTERNAL_SIZE_T-sized words; minimally 3.
4560           */
4561 
4562           copysize = oldsize - SIZE_SZ;
4563           s = (INTERNAL_SIZE_T*)(oldmem);
4564           d = (INTERNAL_SIZE_T*)(newmem);
4565           ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4566           assert(ncopies >= 3);
4567 
4568           if (ncopies > 9)
4569             MALLOC_COPY(d, s, copysize);
4570 
4571           else {
4572             *(d+0) = *(s+0);
4573             *(d+1) = *(s+1);
4574             *(d+2) = *(s+2);
4575             if (ncopies > 4) {
4576               *(d+3) = *(s+3);
4577               *(d+4) = *(s+4);
4578               if (ncopies > 6) {
4579                 *(d+5) = *(s+5);
4580                 *(d+6) = *(s+6);
4581                 if (ncopies > 8) {
4582                   *(d+7) = *(s+7);
4583                   *(d+8) = *(s+8);
4584                 }
4585               }
4586             }
4587           }
4588 
4589           _int_free(av, oldmem);
4590           check_inuse_chunk(av, newp);
4591           return chunk2mem(newp);
4592         }
4593       }
4594     }
4595 
4596     /* If possible, free extra space in old or extended chunk */
4597 
4598     assert((unsigned long)(newsize) >= (unsigned long)(nb));
4599 
4600     remainder_size = newsize - nb;
4601 
4602     if (remainder_size < MINSIZE) { /* not enough extra to split off */
4603       set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4604       set_inuse_bit_at_offset(newp, newsize);
4605     }
4606     else { /* split remainder */
4607       remainder = chunk_at_offset(newp, nb);
4608       set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4609       set_head(remainder, remainder_size | PREV_INUSE |
4610 	       (av != &main_arena ? NON_MAIN_ARENA : 0));
4611       /* Mark remainder as inuse so free() won't complain */
4612       set_inuse_bit_at_offset(remainder, remainder_size);
4613       _int_free(av, chunk2mem(remainder));
4614     }
4615 
4616     check_inuse_chunk(av, newp);
4617     return chunk2mem(newp);
4618   }
4619 
4620   /*
4621     Handle mmap cases
4622   */
4623 
4624   else {
4625 #if HAVE_MMAP
4626 
4627 #if HAVE_MREMAP
4628     INTERNAL_SIZE_T offset = oldp->prev_size;
4629     size_t pagemask = mp_.pagesize - 1;
4630     char *cp;
4631     unsigned long sum;
4632 
4633     /* Note the extra SIZE_SZ overhead */
4634     newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
4635 
4636     /* don't need to remap if still within same page */
4637     if (oldsize == newsize - offset)
4638       return oldmem;
4639 
4640     cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
4641 
4642     if (cp != MAP_FAILED) {
4643 
4644       newp = (mchunkptr)(cp + offset);
4645       set_head(newp, (newsize - offset)|IS_MMAPPED);
4646 
4647       assert(aligned_OK(chunk2mem(newp)));
4648       assert((newp->prev_size == offset));
4649 
4650       /* update statistics */
4651       sum = mp_.mmapped_mem += newsize - oldsize;
4652       if (sum > (unsigned long)(mp_.max_mmapped_mem))
4653         mp_.max_mmapped_mem = sum;
4654 #ifdef NO_THREADS
4655       sum += main_arena.system_mem;
4656       if (sum > (unsigned long)(mp_.max_total_mem))
4657         mp_.max_total_mem = sum;
4658 #endif
4659 
4660       return chunk2mem(newp);
4661     }
4662 #endif
4663 
4664     /* Note the extra SIZE_SZ overhead. */
4665     if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
4666       newmem = oldmem; /* do nothing */
4667     else {
4668       /* Must alloc, copy, free. */
4669       newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4670       if (newmem != 0) {
4671         MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
4672         _int_free(av, oldmem);
4673       }
4674     }
4675     return newmem;
4676 
4677 #else
4678     /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
4679     check_malloc_state(av);
4680     MALLOC_FAILURE_ACTION;
4681     return 0;
4682 #endif
4683   }
4684 }
4685 
4686 /*
4687   ------------------------------ memalign ------------------------------
4688 */
4689 
4690 Void_t*
_int_memalign(mstate av,size_t alignment,size_t bytes)4691 _int_memalign(mstate av, size_t alignment, size_t bytes)
4692 {
4693   INTERNAL_SIZE_T nb;             /* padded  request size */
4694   char*           m;              /* memory returned by malloc call */
4695   mchunkptr       p;              /* corresponding chunk */
4696   char*           brk;            /* alignment point within p */
4697   mchunkptr       newp;           /* chunk to return */
4698   INTERNAL_SIZE_T newsize;        /* its size */
4699   INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
4700   mchunkptr       remainder;      /* spare room at end to split off */
4701   unsigned long   remainder_size; /* its size */
4702   INTERNAL_SIZE_T size;
4703 
4704   /* If need less alignment than we give anyway, just relay to malloc */
4705 
4706   if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
4707 
4708   /* Otherwise, ensure that it is at least a minimum chunk size */
4709 
4710   if (alignment <  MINSIZE) alignment = MINSIZE;
4711 
4712   /* Make sure alignment is power of 2 (in case MINSIZE is not).  */
4713   if ((alignment & (alignment - 1)) != 0) {
4714     size_t a = MALLOC_ALIGNMENT * 2;
4715     while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
4716     alignment = a;
4717   }
4718 
4719   checked_request2size(bytes, nb);
4720 
4721   /*
4722     Strategy: find a spot within that chunk that meets the alignment
4723     request, and then possibly free the leading and trailing space.
4724   */
4725 
4726 
4727   /* Call malloc with worst case padding to hit alignment. */
4728 
4729   m  = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
4730 
4731   if (m == 0) return 0; /* propagate failure */
4732 
4733   p = mem2chunk(m);
4734 
4735   if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
4736 
4737     /*
4738       Find an aligned spot inside chunk.  Since we need to give back
4739       leading space in a chunk of at least MINSIZE, if the first
4740       calculation places us at a spot with less than MINSIZE leader,
4741       we can move to the next aligned spot -- we've allocated enough
4742       total room so that this is always possible.
4743     */
4744 
4745     brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
4746                            -((signed long) alignment));
4747     if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
4748       brk += alignment;
4749 
4750     newp = (mchunkptr)brk;
4751     leadsize = brk - (char*)(p);
4752     newsize = chunksize(p) - leadsize;
4753 
4754     /* For mmapped chunks, just adjust offset */
4755     if (chunk_is_mmapped(p)) {
4756       newp->prev_size = p->prev_size + leadsize;
4757       set_head(newp, newsize|IS_MMAPPED);
4758       return chunk2mem(newp);
4759     }
4760 
4761     /* Otherwise, give back leader, use the rest */
4762     set_head(newp, newsize | PREV_INUSE |
4763 	     (av != &main_arena ? NON_MAIN_ARENA : 0));
4764     set_inuse_bit_at_offset(newp, newsize);
4765     set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4766     _int_free(av, chunk2mem(p));
4767     p = newp;
4768 
4769     assert (newsize >= nb &&
4770             (((unsigned long)(chunk2mem(p))) % alignment) == 0);
4771   }
4772 
4773   /* Also give back spare room at the end */
4774   if (!chunk_is_mmapped(p)) {
4775     size = chunksize(p);
4776     if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4777       remainder_size = size - nb;
4778       remainder = chunk_at_offset(p, nb);
4779       set_head(remainder, remainder_size | PREV_INUSE |
4780 	       (av != &main_arena ? NON_MAIN_ARENA : 0));
4781       set_head_size(p, nb);
4782       _int_free(av, chunk2mem(remainder));
4783     }
4784   }
4785 
4786   check_inuse_chunk(av, p);
4787   return chunk2mem(p);
4788 }
4789 
4790 #if 0
4791 /*
4792   ------------------------------ calloc ------------------------------
4793 */
4794 
4795 #if __STD_C
4796 Void_t* cALLOc(size_t n_elements, size_t elem_size)
4797 #else
4798 Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
4799 #endif
4800 {
4801   mchunkptr p;
4802   unsigned long clearsize;
4803   unsigned long nclears;
4804   INTERNAL_SIZE_T* d;
4805 
4806   Void_t* mem = mALLOc(n_elements * elem_size);
4807 
4808   if (mem != 0) {
4809     p = mem2chunk(mem);
4810 
4811 #if MMAP_CLEARS
4812     if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */
4813 #endif
4814     {
4815       /*
4816         Unroll clear of <= 36 bytes (72 if 8byte sizes)
4817         We know that contents have an odd number of
4818         INTERNAL_SIZE_T-sized words; minimally 3.
4819       */
4820 
4821       d = (INTERNAL_SIZE_T*)mem;
4822       clearsize = chunksize(p) - SIZE_SZ;
4823       nclears = clearsize / sizeof(INTERNAL_SIZE_T);
4824       assert(nclears >= 3);
4825 
4826       if (nclears > 9)
4827         MALLOC_ZERO(d, clearsize);
4828 
4829       else {
4830         *(d+0) = 0;
4831         *(d+1) = 0;
4832         *(d+2) = 0;
4833         if (nclears > 4) {
4834           *(d+3) = 0;
4835           *(d+4) = 0;
4836           if (nclears > 6) {
4837             *(d+5) = 0;
4838             *(d+6) = 0;
4839             if (nclears > 8) {
4840               *(d+7) = 0;
4841               *(d+8) = 0;
4842             }
4843           }
4844         }
4845       }
4846     }
4847   }
4848   return mem;
4849 }
4850 #endif /* 0 */
4851 
4852 /*
4853   ------------------------- independent_calloc -------------------------
4854 */
4855 
4856 Void_t**
4857 #if __STD_C
_int_icalloc(mstate av,size_t n_elements,size_t elem_size,Void_t * chunks[])4858 _int_icalloc(mstate av, size_t n_elements, size_t elem_size, Void_t* chunks[])
4859 #else
4860 _int_icalloc(av, n_elements, elem_size, chunks)
4861 mstate av; size_t n_elements; size_t elem_size; Void_t* chunks[];
4862 #endif
4863 {
4864   size_t sz = elem_size; /* serves as 1-element array */
4865   /* opts arg of 3 means all elements are same size, and should be cleared */
4866   return iALLOc(av, n_elements, &sz, 3, chunks);
4867 }
4868 
4869 /*
4870   ------------------------- independent_comalloc -------------------------
4871 */
4872 
4873 Void_t**
4874 #if __STD_C
_int_icomalloc(mstate av,size_t n_elements,size_t sizes[],Void_t * chunks[])4875 _int_icomalloc(mstate av, size_t n_elements, size_t sizes[], Void_t* chunks[])
4876 #else
4877 _int_icomalloc(av, n_elements, sizes, chunks)
4878 mstate av; size_t n_elements; size_t sizes[]; Void_t* chunks[];
4879 #endif
4880 {
4881   return iALLOc(av, n_elements, sizes, 0, chunks);
4882 }
4883 
4884 
4885 /*
4886   ------------------------------ ialloc ------------------------------
4887   ialloc provides common support for independent_X routines, handling all of
4888   the combinations that can result.
4889 
4890   The opts arg has:
4891     bit 0 set if all elements are same size (using sizes[0])
4892     bit 1 set if elements should be zeroed
4893 */
4894 
4895 
4896 static Void_t**
4897 #if __STD_C
iALLOc(mstate av,size_t n_elements,size_t * sizes,int opts,Void_t * chunks[])4898 iALLOc(mstate av, size_t n_elements, size_t* sizes, int opts, Void_t* chunks[])
4899 #else
4900 iALLOc(av, n_elements, sizes, opts, chunks)
4901 mstate av; size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
4902 #endif
4903 {
4904   INTERNAL_SIZE_T element_size;   /* chunksize of each element, if all same */
4905   INTERNAL_SIZE_T contents_size;  /* total size of elements */
4906   INTERNAL_SIZE_T array_size;     /* request size of pointer array */
4907   Void_t*         mem;            /* malloced aggregate space */
4908   mchunkptr       p;              /* corresponding chunk */
4909   INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
4910   Void_t**        marray;         /* either "chunks" or malloced ptr array */
4911   mchunkptr       array_chunk;    /* chunk for malloced ptr array */
4912   int             mmx;            /* to disable mmap */
4913   INTERNAL_SIZE_T size;
4914   INTERNAL_SIZE_T size_flags;
4915   size_t          i;
4916 
4917   /* Ensure initialization/consolidation */
4918   if (have_fastchunks(av)) malloc_consolidate(av);
4919 
4920   /* compute array length, if needed */
4921   if (chunks != 0) {
4922     if (n_elements == 0)
4923       return chunks; /* nothing to do */
4924     marray = chunks;
4925     array_size = 0;
4926   }
4927   else {
4928     /* if empty req, must still return chunk representing empty array */
4929     if (n_elements == 0)
4930       return (Void_t**) _int_malloc(av, 0);
4931     marray = 0;
4932     array_size = request2size(n_elements * (sizeof(Void_t*)));
4933   }
4934 
4935   /* compute total element size */
4936   if (opts & 0x1) { /* all-same-size */
4937     element_size = request2size(*sizes);
4938     contents_size = n_elements * element_size;
4939   }
4940   else { /* add up all the sizes */
4941     element_size = 0;
4942     contents_size = 0;
4943     for (i = 0; i != n_elements; ++i)
4944       contents_size += request2size(sizes[i]);
4945   }
4946 
4947   /* subtract out alignment bytes from total to minimize overallocation */
4948   size = contents_size + array_size - MALLOC_ALIGN_MASK;
4949 
4950   /*
4951      Allocate the aggregate chunk.
4952      But first disable mmap so malloc won't use it, since
4953      we would not be able to later free/realloc space internal
4954      to a segregated mmap region.
4955   */
4956   mmx = mp_.n_mmaps_max;   /* disable mmap */
4957   mp_.n_mmaps_max = 0;
4958   mem = _int_malloc(av, size);
4959   mp_.n_mmaps_max = mmx;   /* reset mmap */
4960   if (mem == 0)
4961     return 0;
4962 
4963   p = mem2chunk(mem);
4964   assert(!chunk_is_mmapped(p));
4965   remainder_size = chunksize(p);
4966 
4967   if (opts & 0x2) {       /* optionally clear the elements */
4968     MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
4969   }
4970 
4971   size_flags = PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0);
4972 
4973   /* If not provided, allocate the pointer array as final part of chunk */
4974   if (marray == 0) {
4975     array_chunk = chunk_at_offset(p, contents_size);
4976     marray = (Void_t**) (chunk2mem(array_chunk));
4977     set_head(array_chunk, (remainder_size - contents_size) | size_flags);
4978     remainder_size = contents_size;
4979   }
4980 
4981   /* split out elements */
4982   for (i = 0; ; ++i) {
4983     marray[i] = chunk2mem(p);
4984     if (i != n_elements-1) {
4985       if (element_size != 0)
4986         size = element_size;
4987       else
4988         size = request2size(sizes[i]);
4989       remainder_size -= size;
4990       set_head(p, size | size_flags);
4991       p = chunk_at_offset(p, size);
4992     }
4993     else { /* the final element absorbs any overallocation slop */
4994       set_head(p, remainder_size | size_flags);
4995       break;
4996     }
4997   }
4998 
4999 #if MALLOC_DEBUG
5000   if (marray != chunks) {
5001     /* final element must have exactly exhausted chunk */
5002     if (element_size != 0)
5003       assert(remainder_size == element_size);
5004     else
5005       assert(remainder_size == request2size(sizes[i]));
5006     check_inuse_chunk(av, mem2chunk(marray));
5007   }
5008 
5009   for (i = 0; i != n_elements; ++i)
5010     check_inuse_chunk(av, mem2chunk(marray[i]));
5011 #endif
5012 
5013   return marray;
5014 }
5015 
5016 
5017 /*
5018   ------------------------------ valloc ------------------------------
5019 */
5020 
5021 Void_t*
5022 #if __STD_C
_int_valloc(mstate av,size_t bytes)5023 _int_valloc(mstate av, size_t bytes)
5024 #else
5025 _int_valloc(av, bytes) mstate av; size_t bytes;
5026 #endif
5027 {
5028   /* Ensure initialization/consolidation */
5029   if (have_fastchunks(av)) malloc_consolidate(av);
5030   return _int_memalign(av, mp_.pagesize, bytes);
5031 }
5032 
5033 /*
5034   ------------------------------ pvalloc ------------------------------
5035 */
5036 
5037 
5038 Void_t*
5039 #if __STD_C
_int_pvalloc(mstate av,size_t bytes)5040 _int_pvalloc(mstate av, size_t bytes)
5041 #else
5042 _int_pvalloc(av, bytes) mstate av, size_t bytes;
5043 #endif
5044 {
5045   size_t pagesz;
5046 
5047   /* Ensure initialization/consolidation */
5048   if (have_fastchunks(av)) malloc_consolidate(av);
5049   pagesz = mp_.pagesize;
5050   return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5051 }
5052 
5053 
5054 /*
5055   ------------------------------ malloc_trim ------------------------------
5056 */
5057 
5058 #if __STD_C
mTRIm(size_t pad)5059 int mTRIm(size_t pad)
5060 #else
5061 int mTRIm(pad) size_t pad;
5062 #endif
5063 {
5064   mstate av = &main_arena; /* already locked */
5065 
5066   /* Ensure initialization/consolidation */
5067   malloc_consolidate(av);
5068 
5069 #ifndef MORECORE_CANNOT_TRIM
5070   return sYSTRIm(pad, av);
5071 #else
5072   return 0;
5073 #endif
5074 }
5075 
5076 
5077 /*
5078   ------------------------- malloc_usable_size -------------------------
5079 */
5080 
5081 #if __STD_C
mUSABLe(Void_t * mem)5082 size_t mUSABLe(Void_t* mem)
5083 #else
5084 size_t mUSABLe(mem) Void_t* mem;
5085 #endif
5086 {
5087   mchunkptr p;
5088   if (mem != 0) {
5089     p = mem2chunk(mem);
5090     if (chunk_is_mmapped(p))
5091       return chunksize(p) - 2*SIZE_SZ;
5092     else if (inuse(p))
5093       return chunksize(p) - SIZE_SZ;
5094   }
5095   return 0;
5096 }
5097 
5098 /*
5099   ---------------------- internal mallinfo -----------------------------
5100 */
5101 
_int_get_arena_info(mstate av,struct malloc_arena_info * mai)5102 void _int_get_arena_info(mstate av, struct malloc_arena_info *mai)
5103 {
5104   size_t i;
5105   mbinptr b;
5106   mchunkptr p;
5107   size_t binavail = 0;
5108   size_t fastavail = 0;
5109   int nbinblocks = 0;
5110   int nfastblocks = 0;
5111 
5112   (void)mutex_lock(&av->mutex);
5113 
5114   /* Ensure initialization */
5115   if (av->top == 0)  malloc_consolidate(av);
5116 
5117   check_malloc_state(av);
5118 
5119   /* traverse fastbins */
5120   for (i = 0; i < NFASTBINS; ++i) {
5121     for (p = av->fastbins[i]; p != 0; p = p->fd) {
5122       ++nfastblocks;
5123       fastavail += chunksize(p);
5124     }
5125   }
5126 
5127   /* traverse regular bins */
5128   for (i = 1; i < NBINS; ++i) {
5129     b = bin_at(av, i);
5130     for (p = last(b); p != b; p = p->bk) {
5131       ++nbinblocks;
5132       binavail += chunksize(p);
5133     }
5134   }
5135 
5136   mai->nfastblocks = nfastblocks;
5137   mai->nbinblocks = nbinblocks;
5138   mai->fastavail = fastavail;
5139   mai->binavail = binavail;
5140   mai->top_size = chunksize(av->top);
5141   mai->system_mem = av->system_mem;
5142   mai->max_system_mem = av->max_system_mem;
5143   mai->stat_lock_direct = av->stat_lock_direct;
5144   mai->stat_lock_loop   = av->stat_lock_loop;
5145   mai->stat_lock_wait   = av->stat_lock_wait;
5146 
5147   (void)mutex_unlock(&av->mutex);
5148 }
5149 
5150 void
_int_get_global_info(struct malloc_global_info * mgi)5151 _int_get_global_info (struct malloc_global_info *mgi)
5152 {
5153   mgi->n_mmaps = mp_.n_mmaps;
5154   mgi->max_n_mmaps = mp_.max_n_mmaps;
5155   mgi->mmapped_mem = mp_.mmapped_mem;
5156   mgi->max_mmapped_mem = mp_.max_mmapped_mem;
5157   mgi->max_total_mem = mp_.max_total_mem;
5158 #if THREAD_STATS
5159   mgi->stat_n_heaps = stat_n_heaps;
5160 #else
5161   mgi->stat_n_heaps = 0;
5162 #endif
5163 }
5164 
5165 /*
5166   ------------------------------ malloc_stats ------------------------------
5167 */
5168 
5169 /* Now in separate file, malloc-stats.c. */
5170 
5171 /*
5172   ------------------------------ mallopt ------------------------------
5173 */
5174 
5175 #if __STD_C
mALLOPt(int param_number,int value)5176 int mALLOPt(int param_number, int value)
5177 #else
5178 int mALLOPt(param_number, value) int param_number; int value;
5179 #endif
5180 {
5181   mstate av = &main_arena;
5182   int res = 1;
5183 
5184   if(__malloc_initialized < 0)
5185     ptmalloc_init ();
5186   (void)mutex_lock(&av->mutex);
5187   /* Ensure initialization/consolidation */
5188   malloc_consolidate(av);
5189 
5190   switch(param_number) {
5191   case M_MXFAST:
5192     if (value >= 0 && value <= MAX_FAST_SIZE) {
5193       set_max_fast(av, value);
5194     }
5195     else
5196       res = 0;
5197     break;
5198 
5199   case M_TRIM_THRESHOLD:
5200     mp_.trim_threshold = value;
5201     break;
5202 
5203   case M_TOP_PAD:
5204     mp_.top_pad = value;
5205     break;
5206 
5207   case M_MMAP_THRESHOLD:
5208 #if USE_ARENAS
5209     /* Forbid setting the threshold too high. */
5210     if((unsigned long)value > HEAP_MAX_SIZE/2)
5211       res = 0;
5212     else
5213 #endif
5214       mp_.mmap_threshold = value;
5215     break;
5216 
5217   case M_MMAP_MAX:
5218 #if !HAVE_MMAP
5219     if (value != 0)
5220       res = 0;
5221     else
5222 #endif
5223       mp_.n_mmaps_max = value;
5224     break;
5225 
5226   case M_CHECK_ACTION:
5227     check_action = value;
5228     break;
5229   }
5230   (void)mutex_unlock(&av->mutex);
5231   return res;
5232 }
5233 
5234 
5235 /*
5236   -------------------- Alternative MORECORE functions --------------------
5237 */
5238 
5239 
5240 /*
5241   General Requirements for MORECORE.
5242 
5243   The MORECORE function must have the following properties:
5244 
5245   If MORECORE_CONTIGUOUS is false:
5246 
5247     * MORECORE must allocate in multiples of pagesize. It will
5248       only be called with arguments that are multiples of pagesize.
5249 
5250     * MORECORE(0) must return an address that is at least
5251       MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5252 
5253   else (i.e. If MORECORE_CONTIGUOUS is true):
5254 
5255     * Consecutive calls to MORECORE with positive arguments
5256       return increasing addresses, indicating that space has been
5257       contiguously extended.
5258 
5259     * MORECORE need not allocate in multiples of pagesize.
5260       Calls to MORECORE need not have args of multiples of pagesize.
5261 
5262     * MORECORE need not page-align.
5263 
5264   In either case:
5265 
5266     * MORECORE may allocate more memory than requested. (Or even less,
5267       but this will generally result in a malloc failure.)
5268 
5269     * MORECORE must not allocate memory when given argument zero, but
5270       instead return one past the end address of memory from previous
5271       nonzero call. This malloc does NOT call MORECORE(0)
5272       until at least one call with positive arguments is made, so
5273       the initial value returned is not important.
5274 
5275     * Even though consecutive calls to MORECORE need not return contiguous
5276       addresses, it must be OK for malloc'ed chunks to span multiple
5277       regions in those cases where they do happen to be contiguous.
5278 
5279     * MORECORE need not handle negative arguments -- it may instead
5280       just return MORECORE_FAILURE when given negative arguments.
5281       Negative arguments are always multiples of pagesize. MORECORE
5282       must not misinterpret negative args as large positive unsigned
5283       args. You can suppress all such calls from even occurring by defining
5284       MORECORE_CANNOT_TRIM,
5285 
5286   There is some variation across systems about the type of the
5287   argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5288   actually be size_t, because sbrk supports negative args, so it is
5289   normally the signed type of the same width as size_t (sometimes
5290   declared as "intptr_t", and sometimes "ptrdiff_t").  It doesn't much
5291   matter though. Internally, we use "long" as arguments, which should
5292   work across all reasonable possibilities.
5293 
5294   Additionally, if MORECORE ever returns failure for a positive
5295   request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
5296   system allocator. This is a useful backup strategy for systems with
5297   holes in address spaces -- in this case sbrk cannot contiguously
5298   expand the heap, but mmap may be able to map noncontiguous space.
5299 
5300   If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5301   a function that always returns MORECORE_FAILURE.
5302 
5303   If you are using this malloc with something other than sbrk (or its
5304   emulation) to supply memory regions, you probably want to set
5305   MORECORE_CONTIGUOUS as false.  As an example, here is a custom
5306   allocator kindly contributed for pre-OSX macOS.  It uses virtually
5307   but not necessarily physically contiguous non-paged memory (locked
5308   in, present and won't get swapped out).  You can use it by
5309   uncommenting this section, adding some #includes, and setting up the
5310   appropriate defines above:
5311 
5312       #define MORECORE osMoreCore
5313       #define MORECORE_CONTIGUOUS 0
5314 
5315   There is also a shutdown routine that should somehow be called for
5316   cleanup upon program exit.
5317 
5318   #define MAX_POOL_ENTRIES 100
5319   #define MINIMUM_MORECORE_SIZE  (64 * 1024)
5320   static int next_os_pool;
5321   void *our_os_pools[MAX_POOL_ENTRIES];
5322 
5323   void *osMoreCore(int size)
5324   {
5325     void *ptr = 0;
5326     static void *sbrk_top = 0;
5327 
5328     if (size > 0)
5329     {
5330       if (size < MINIMUM_MORECORE_SIZE)
5331          size = MINIMUM_MORECORE_SIZE;
5332       if (CurrentExecutionLevel() == kTaskLevel)
5333          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5334       if (ptr == 0)
5335       {
5336         return (void *) MORECORE_FAILURE;
5337       }
5338       // save ptrs so they can be freed during cleanup
5339       our_os_pools[next_os_pool] = ptr;
5340       next_os_pool++;
5341       ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5342       sbrk_top = (char *) ptr + size;
5343       return ptr;
5344     }
5345     else if (size < 0)
5346     {
5347       // we don't currently support shrink behavior
5348       return (void *) MORECORE_FAILURE;
5349     }
5350     else
5351     {
5352       return sbrk_top;
5353     }
5354   }
5355 
5356   // cleanup any allocated memory pools
5357   // called as last thing before shutting down driver
5358 
5359   void osCleanupMem(void)
5360   {
5361     void **ptr;
5362 
5363     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5364       if (*ptr)
5365       {
5366          PoolDeallocate(*ptr);
5367          *ptr = 0;
5368       }
5369   }
5370 
5371 */
5372 
5373 
5374 #ifdef _LIBC
5375 # include <sys/param.h>
5376 
5377 /* We need a wrapper function for one of the additions of POSIX.  */
5378 int
__posix_memalign(void ** memptr,size_t alignment,size_t size)5379 __posix_memalign (void **memptr, size_t alignment, size_t size)
5380 {
5381   void *mem;
5382   __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
5383 					__const __malloc_ptr_t)) =
5384     __memalign_hook;
5385 
5386   /* Test whether the SIZE argument is valid.  It must be a power of
5387      two multiple of sizeof (void *).  */
5388   if (alignment % sizeof (void *) != 0
5389       || !powerof2 (alignment / sizeof (void *)) != 0
5390       || alignment == 0)
5391     return EINVAL;
5392 
5393   /* Call the hook here, so that caller is posix_memalign's caller
5394      and not posix_memalign itself.  */
5395   if (hook != NULL)
5396     mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
5397   else
5398     mem = public_mEMALIGn (alignment, size);
5399 
5400   if (mem != NULL) {
5401     *memptr = mem;
5402     return 0;
5403   }
5404 
5405   return ENOMEM;
5406 }
5407 weak_alias (__posix_memalign, posix_memalign)
5408 
5409 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5410 strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5411 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5412 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5413 strong_alias (__libc_memalign, __memalign)
5414 weak_alias (__libc_memalign, memalign)
5415 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5416 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5417 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5418 strong_alias (__libc_mallinfo, __mallinfo)
5419 weak_alias (__libc_mallinfo, mallinfo)
5420 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5421 
5422 weak_alias (__malloc_usable_size, malloc_usable_size)
5423 weak_alias (__malloc_trim, malloc_trim)
5424 weak_alias (__malloc_get_state, malloc_get_state)
5425 weak_alias (__malloc_set_state, malloc_set_state)
5426 
5427 #endif /* _LIBC */
5428 
5429 /* ------------------------------------------------------------
5430 History:
5431 
5432 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5433 
5434 */
5435 /*
5436  * Local variables:
5437  * c-basic-offset: 2
5438  * End:
5439  */
5440