1 /*
2   [insert usage description etc here]
3 
4   preliminary version 2.8.x
5 */
6 
7 /*
8   WIN32 sets up defaults for MS environment and compilers.
9   Otherwise defaults are for unix.
10 */
11 
12 /* #define WIN32 */
13 
14 #ifdef WIN32
15 
16 #define WIN32_LEAN_AND_MEAN
17 #include <windows.h>
18 
19 /* No-op failure action */
20 #define MALLOC_FAILURE_ACTION
21 
22 /* Win32 doesn't supply or need the following headers */
23 #define LACKS_UNISTD_H
24 #define LACKS_SYS_PARAM_H
25 #define LACKS_SYS_MMAN_H
26 
27 /* Use the supplied emulation of sbrk */
28 #define MORECORE sbrk
29 #define MORECORE_CONTIGUOUS 1
30 #define MORECORE_FAILURE    ((void*)(-1))
31 
32 /* Use the supplied emulation of mmap and munmap */
33 #define HAVE_MMAP 1
34 #define MUNMAP_FAILURE  (-1)
35 #define MMAP_CLEARS 1
36 
37 /* These values don't really matter in windows mmap emulation */
38 #define MAP_PRIVATE 1
39 #define MAP_ANONYMOUS 2
40 #define PROT_READ 1
41 #define PROT_WRITE 2
42 
43 /* Emulation functions defined at the end of this file */
44 
45 /* If USE_MALLOC_LOCK, use supplied critical-section-based lock functions */
46 #ifdef USE_MALLOC_LOCK
47 static int slwait(int *sl);
48 static int slrelease(int *sl);
49 #endif
50 
51 static long getpagesize(void);
52 static long getregionsize(void);
53 static void *sbrk(long size);
54 static void *mmap(void *ptr, long size, long prot, long type, long handle, long arg);
55 static long munmap(void *ptr, long size);
56 
57 static void vminfo (unsigned long*free, unsigned long*reserved, unsigned long*committed);
58 static int cpuinfo (int whole, unsigned long*kernel, unsigned long*user);
59 
60 #endif
61 
62 
63 #include <strings.h>
64 
65 /*
66   Void_t* is the pointer type that malloc should say it returns
67 */
68 
69 #ifndef Void_t
70 #define Void_t      void
71 #endif /*Void_t*/
72 
73 #include <stddef.h>   /* for size_t */
74 
75 #ifdef __cplusplus
76 extern "C" {
77 #endif
78 
79 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
80 
81 /* #define  LACKS_UNISTD_H */
82 
83 #ifndef LACKS_UNISTD_H
84 #include <unistd.h>
85 #endif
86 
87 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
88 
89 /* #define  LACKS_SYS_PARAM_H */
90 
91 
92 #include <stdio.h>    /* needed for malloc_stats */
93 #include <errno.h>    /* needed for optional MALLOC_FAILURE_ACTION */
94 
95 
96 /*
97   Debugging:
98 
99   Because freed chunks may be overwritten with bookkeeping fields, this
100   malloc will often die when freed memory is overwritten by user
101   programs.  This can be very effective (albeit in an annoying way)
102   in helping track down dangling pointers.
103 
104   If you compile with -DDEBUG, a number of assertion checks are
105   enabled that will catch more memory errors. You probably won't be
106   able to make much sense of the actual assertion errors, but they
107   should help you locate incorrectly overwritten memory.  The
108   checking is fairly extensive, and will slow down execution
109   noticeably. Calling malloc_stats or mallinfo with DEBUG set will
110   attempt to check every non-mmapped allocated and free chunk in the
111   course of computing the summmaries. (By nature, mmapped regions
112   cannot be checked very much automatically.)
113 
114   Setting DEBUG may also be helpful if you are trying to modify
115   this code. The assertions in the check routines spell out in more
116   detail the assumptions and invariants underlying the algorithms.
117 
118   Setting DEBUG does NOT provide an automated mechanism for checking
119   that all accesses to malloced memory stay within their
120   bounds. However, there are several add-ons and adaptations of this
121   or other mallocs available that do this.
122 */
123 
124 #if DEBUG
125 #include <assert.h>
126 /* #define assert(x) if(!(x)) abort() */
127 
128 #else
129 #define assert(x) ((void)0)
130 #endif
131 
132 /*
133   The unsigned integer type used for comparing any two chunk sizes.
134   This should be at least as wide as size_t, but should not be signed.
135 */
136 
137 #ifndef CHUNK_SIZE_T
138 #define CHUNK_SIZE_T unsigned long
139 #endif
140 
141 #define MAX_CHUNK_SIZE  ((CHUNK_SIZE_T)(-1UL))
142 
143 /*
144   The unsigned integer type used to hold addresses when they are are
145   manipulated as integers. Except that it is not defined on all
146   systems, intptr_t would suffice.
147 */
148 #ifndef PTR_UINT
149 #define PTR_UINT unsigned long
150 #endif
151 
152 
153 /*
154   INTERNAL_SIZE_T is the word-size used for internal bookkeeping
155   of chunk sizes.
156 
157   The default version is the same as size_t.
158 
159   While not strictly necessary, it is best to define this as an
160   unsigned type, even if size_t is a signed type. This may avoid some
161   artificial size limitations on some systems.
162 
163   On a 64-bit machine, you may be able to reduce malloc overhead by
164   defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
165   expense of not being able to handle more than 2^32 of malloced
166   space. If this limitation is acceptable, you are encouraged to set
167   this unless you are on a platform requiring 16byte alignments. In
168   this case the alignment requirements turn out to negate any
169   potential advantages of decreasing size_t word size.
170 
171   Implementors: Beware of the possible combinations of:
172      - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
173        and might be the same width as int or as long
174      - size_t might have different width and signedness as INTERNAL_SIZE_T
175      - int and long might be 32 or 64 bits, and might be the same width
176   To deal with this, most comparisons and difference computations
177   among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
178   aware of the fact that casting an unsigned int to a wider long does
179   not sign-extend. (This also makes checking for negative numbers
180   awkward.) Some of these casts result in harmless compiler warnings
181   on some systems.
182 */
183 
184 #ifndef INTERNAL_SIZE_T
185 #define INTERNAL_SIZE_T size_t
186 #endif
187 
188 /* The corresponding word size */
189 #define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
190 
191 
192 
193 /*
194   MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
195   It must be a power of two at least 2 * SIZE_SZ, even on machines
196   for which smaller alignments would suffice. It may be defined as
197   larger than this though. Note however that code and data structures
198   are optimized for the case of 8-byte alignment.
199 */
200 
201 
202 #ifndef MALLOC_ALIGNMENT
203 #define MALLOC_ALIGNMENT       (2 * SIZE_SZ)
204 #endif
205 
206 /* The corresponding bit mask value */
207 #define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
208 
209 
210 
211 /*
212   REALLOC_ZERO_BYTES_FREES should be set if a call to
213   realloc with zero bytes should be the same as a call to free.
214   Some people think it should. Otherwise, since this malloc
215   returns a unique pointer for malloc(0), so does realloc(p, 0).
216 */
217 
218 /*   #define REALLOC_ZERO_BYTES_FREES */
219 
220 /*
221   TRIM_FASTBINS controls whether free() of a very small chunk can
222   immediately lead to trimming. Setting to true (1) can reduce memory
223   footprint, but will almost always slow down programs that use a lot
224   of small chunks.
225 
226   Define this only if you are willing to give up some speed to more
227   aggressively reduce system-level memory footprint when releasing
228   memory in programs that use many small chunks.  You can get
229   essentially the same effect by setting MXFAST to 0, but this can
230   lead to even greater slowdowns in programs using many small chunks.
231   TRIM_FASTBINS is an in-between compile-time option, that disables
232   only those chunks bordering topmost memory from being placed in
233   fastbins.
234 */
235 
236 #ifndef TRIM_FASTBINS
237 #define TRIM_FASTBINS  0
238 #endif
239 
240 
241 /*
242   USE_DL_PREFIX will prefix all public routines with the string 'dl'.
243   This is necessary when you only want to use this malloc in one part
244   of a program, using your regular system malloc elsewhere.
245 */
246 
247 /* #define USE_DL_PREFIX */
248 
249 
250 /*
251   USE_MALLOC_LOCK causes wrapper functions to surround each
252   callable routine with pthread mutex lock/unlock.
253 
254   USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
255 */
256 
257 /* #define USE_MALLOC_LOCK */
258 
259 
260 /*
261   If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
262   actually a wrapper function that first calls MALLOC_PREACTION, then
263   calls the internal routine, and follows it with
264   MALLOC_POSTACTION. This is needed for locking, but you can also use
265   this, without USE_MALLOC_LOCK, for purposes of interception,
266   instrumentation, etc. It is a sad fact that using wrappers often
267   noticeably degrades performance of malloc-intensive programs.
268 */
269 
270 #ifdef USE_MALLOC_LOCK
271 #define USE_PUBLIC_MALLOC_WRAPPERS
272 #else
273 /* #define USE_PUBLIC_MALLOC_WRAPPERS */
274 #endif
275 
276 
277 /*
278    Two-phase name translation.
279    All of the actual routines are given mangled names.
280    When wrappers are used, they become the public callable versions.
281    When DL_PREFIX is used, the callable names are prefixed.
282 */
283 
284 #ifndef USE_PUBLIC_MALLOC_WRAPPERS
285 #define cALLOc      public_cALLOc
286 #define fREe        public_fREe
287 #define cFREe       public_cFREe
288 #define mALLOc      public_mALLOc
289 #define mEMALIGn    public_mEMALIGn
290 #define rEALLOc     public_rEALLOc
291 #define vALLOc      public_vALLOc
292 #define pVALLOc     public_pVALLOc
293 #define mALLINFo    public_mALLINFo
294 #define mALLOPt     public_mALLOPt
295 #define mTRIm       public_mTRIm
296 #define mSTATs      public_mSTATs
297 #define mUSABLe     public_mUSABLe
298 #define iCALLOc     public_iCALLOc
299 #define iCOMALLOc   public_iCOMALLOc
300 #endif
301 
302 #ifdef USE_DL_PREFIX
303 #define public_cALLOc    dlcalloc
304 #define public_fREe      dlfree
305 #define public_cFREe     dlcfree
306 #define public_mALLOc    dlmalloc
307 #define public_mEMALIGn  dlmemalign
308 #define public_rEALLOc   dlrealloc
309 #define public_vALLOc    dlvalloc
310 #define public_pVALLOc   dlpvalloc
311 #define public_mALLINFo  dlmallinfo
312 #define public_mALLOPt   dlmallopt
313 #define public_mTRIm     dlmalloc_trim
314 #define public_mSTATs    dlmalloc_stats
315 #define public_mUSABLe   dlmalloc_usable_size
316 #define public_iCALLOc   dlindependent_calloc
317 #define public_iCOMALLOc dlindependent_comalloc
318 #else /* USE_DL_PREFIX */
319 #define public_cALLOc    calloc
320 #define public_fREe      free
321 #define public_cFREe     cfree
322 #define public_mALLOc    malloc
323 #define public_mEMALIGn  memalign
324 #define public_rEALLOc   realloc
325 #define public_vALLOc    valloc
326 #define public_pVALLOc   pvalloc
327 #define public_mALLINFo  mallinfo
328 #define public_mALLOPt   mallopt
329 #define public_mTRIm     malloc_trim
330 #define public_mSTATs    malloc_stats
331 #define public_mUSABLe   malloc_usable_size
332 #define public_iCALLOc   independent_calloc
333 #define public_iCOMALLOc independent_comalloc
334 #endif /* USE_DL_PREFIX */
335 
336 
337 /*
338   HAVE_MEMCPY should be defined if you are not otherwise using
339   ANSI STD C, but still have memcpy and memset in your C library
340   and want to use them in calloc and realloc. Otherwise simple
341   macro versions are defined below.
342 
343   USE_MEMCPY should be defined as 1 if you actually want to
344   have memset and memcpy called. People report that the macro
345   versions are faster than libc versions on some systems.
346 
347   Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
348   (of <= 36 bytes) are manually unrolled in realloc and calloc.
349 */
350 
351 #define HAVE_MEMCPY
352 
353 #ifndef USE_MEMCPY
354 #ifdef HAVE_MEMCPY
355 #define USE_MEMCPY 1
356 #else
357 #define USE_MEMCPY 0
358 #endif
359 #endif
360 
361 
362 #if (defined(HAVE_MEMCPY))
363 #ifdef WIN32
364 /* On Win32 memset and memcpy are already declared in windows.h */
365 #else
366 void* memset(void*, int, size_t);
367 void* memcpy(void*, const void*, size_t);
368 #endif
369 #endif
370 
371 /*
372   MALLOC_FAILURE_ACTION is the action to take before "return 0" when
373   malloc fails to be able to return memory, either because memory is
374   exhausted or because of illegal arguments.
375 
376   By default, sets errno if running on STD_C platform, else does nothing.
377 */
378 
379 #ifndef MALLOC_FAILURE_ACTION
380 #define MALLOC_FAILURE_ACTION \
381    errno = ENOMEM;
382 #endif
383 
384 /*
385   MORECORE-related declarations. By default, rely on sbrk
386 */
387 
388 
389 #ifdef LACKS_UNISTD_H
390 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
391 extern Void_t*     sbrk(ptrdiff_t);
392 #endif
393 #endif
394 
395 /*
396   MORECORE is the name of the routine to call to obtain more memory
397   from the system.  See below for general guidance on writing
398   alternative MORECORE functions, as well as a version for WIN32 and a
399   sample version for pre-OSX macos.
400 */
401 
402 #ifndef MORECORE
403 #define MORECORE sbrk
404 #endif
405 
406 /*
407   MORECORE_FAILURE is the value returned upon failure of MORECORE
408   as well as mmap. Since it cannot be an otherwise valid memory address,
409   and must reflect values of standard sys calls, you probably ought not
410   try to redefine it.
411 */
412 
413 #ifndef MORECORE_FAILURE
414 #define MORECORE_FAILURE (-1)
415 #endif
416 
417 /*
418   If MORECORE_CONTIGUOUS is true, take advantage of fact that
419   consecutive calls to MORECORE with positive arguments always return
420   contiguous increasing addresses.  This is true of unix sbrk.  Even
421   if not defined, when regions happen to be contiguous, malloc will
422   permit allocations spanning regions obtained from different
423   calls. But defining this when applicable enables some stronger
424   consistency checks and space efficiencies.
425 */
426 
427 #ifndef MORECORE_CONTIGUOUS
428 #define MORECORE_CONTIGUOUS 1
429 #endif
430 
431 /*
432   Define MORECORE_CANNOT_TRIM if your version of MORECORE
433   cannot release space back to the system when given negative
434   arguments. This is generally necessary only if you are using
435   a hand-crafted MORECORE function that cannot handle negative arguments.
436 */
437 
438 /* #define MORECORE_CANNOT_TRIM */
439 
440 
441 /*
442   Define HAVE_MMAP as true to optionally make malloc() use mmap() to
443   allocate very large blocks.  These will be returned to the
444   operating system immediately after a free(). Also, if mmap
445   is available, it is used as a backup strategy in cases where
446   MORECORE fails to provide space from system.
447 
448   This malloc is best tuned to work with mmap for large requests.
449   If you do not have mmap, operations involving very large chunks (1MB
450   or so) may be slower than you'd like.
451 */
452 
453 #define HAVE_MMAP 0
454 
455 #ifndef HAVE_MMAP
456 #define HAVE_MMAP 1
457 #endif
458 
459 #if HAVE_MMAP
460 /*
461    Standard unix mmap using /dev/zero clears memory so calloc doesn't
462    need to.
463 */
464 
465 #ifndef MMAP_CLEARS
466 #define MMAP_CLEARS 1
467 #endif
468 
469 #else /* no mmap */
470 #ifndef MMAP_CLEARS
471 #define MMAP_CLEARS 0
472 #endif
473 #endif
474 
475 
476 /*
477    MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
478    sbrk fails, and mmap is used as a backup (which is done only if
479    HAVE_MMAP).  The value must be a multiple of page size.  This
480    backup strategy generally applies only when systems have "holes" in
481    address space, so sbrk cannot perform contiguous expansion, but
482    there is still space available on system.  On systems for which
483    this is known to be useful (i.e. most linux kernels), this occurs
484    only when programs allocate huge amounts of memory.  Between this,
485    and the fact that mmap regions tend to be limited, the size should
486    be large, to avoid too many mmap calls and thus avoid running out
487    of kernel resources.
488 */
489 
490 #ifndef MMAP_AS_MORECORE_SIZE
491 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
492 #endif
493 
494 /*
495   Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
496   large blocks.  This is currently only possible on Linux with
497   kernel versions newer than 1.3.77.
498 */
499 
500 #ifndef HAVE_MREMAP
501 #ifdef linux
502 #define HAVE_MREMAP 1
503 #else
504 #define HAVE_MREMAP 0
505 #endif
506 
507 #endif /* HAVE_MMAP */
508 
509 
510 /*
511   The system page size. To the extent possible, this malloc manages
512   memory from the system in page-size units.  Note that this value is
513   cached during initialization into a field of malloc_state. So even
514   if malloc_getpagesize is a function, it is only called once.
515 
516   The following mechanics for getpagesize were adapted from bsd/gnu
517   getpagesize.h. If none of the system-probes here apply, a value of
518   4096 is used, which should be OK: If they don't apply, then using
519   the actual value probably doesn't impact performance.
520 */
521 
522 #ifndef malloc_getpagesize
523 
524 #ifndef LACKS_UNISTD_H
525 #  include <unistd.h>
526 #endif
527 
528 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
529 #    ifndef _SC_PAGE_SIZE
530 #      define _SC_PAGE_SIZE _SC_PAGESIZE
531 #    endif
532 #  endif
533 
534 #  ifdef _SC_PAGE_SIZE
535 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
536 #  else
537 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
538        extern size_t getpagesize();
539 #      define malloc_getpagesize getpagesize()
540 #    else
541 #      ifdef WIN32 /* use supplied emulation of getpagesize */
542 #        define malloc_getpagesize getpagesize()
543 #      else
544 #        ifndef LACKS_SYS_PARAM_H
545 #          include <sys/param.h>
546 #        endif
547 #        ifdef EXEC_PAGESIZE
548 #          define malloc_getpagesize EXEC_PAGESIZE
549 #        else
550 #          ifdef NBPG
551 #            ifndef CLSIZE
552 #              define malloc_getpagesize NBPG
553 #            else
554 #              define malloc_getpagesize (NBPG * CLSIZE)
555 #            endif
556 #          else
557 #            ifdef NBPC
558 #              define malloc_getpagesize NBPC
559 #            else
560 #              ifdef PAGESIZE
561 #                define malloc_getpagesize PAGESIZE
562 #              else /* just guess */
563 #                define malloc_getpagesize (4096)
564 #              endif
565 #            endif
566 #          endif
567 #        endif
568 #      endif
569 #    endif
570 #  endif
571 #endif
572 
573 /*
574   This version of malloc supports the standard SVID/XPG mallinfo
575   routine that returns a struct containing usage properties and
576   statistics. It should work on any SVID/XPG compliant system that has
577   a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
578   install such a thing yourself, cut out the preliminary declarations
579   as described above and below and save them in a malloc.h file. But
580   there's no compelling reason to bother to do this.)
581 
582   The main declaration needed is the mallinfo struct that is returned
583   (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
584   bunch of fields that are not even meaningful in this version of
585   malloc.  These fields are are instead filled by mallinfo() with
586   other numbers that might be of interest.
587 
588   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
589   /usr/include/malloc.h file that includes a declaration of struct
590   mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
591   version is declared below.  These must be precisely the same for
592   mallinfo() to work.  The original SVID version of this struct,
593   defined on most systems with mallinfo, declares all fields as
594   ints. But some others define as unsigned long. If your system
595   defines the fields using a type of different width than listed here,
596   you must #include your system version and #define
597   HAVE_USR_INCLUDE_MALLOC_H.
598 */
599 
600 /* #define HAVE_USR_INCLUDE_MALLOC_H */
601 
602 #ifdef HAVE_USR_INCLUDE_MALLOC_H
603 #include "/usr/include/malloc.h"
604 #else
605 
606 /* SVID2/XPG mallinfo structure */
607 
608 struct mallinfo {
609   int arena;    /* non-mmapped space allocated from system */
610   int ordblks;  /* number of free chunks */
611   int smblks;   /* number of fastbin blocks */
612   int hblks;    /* number of mmapped regions */
613   int hblkhd;   /* space in mmapped regions */
614   int usmblks;  /* maximum total allocated space */
615   int fsmblks;  /* space available in freed fastbin blocks */
616   int uordblks; /* total allocated space */
617   int fordblks; /* total free space */
618   int keepcost; /* top-most, releasable (via malloc_trim) space */
619 };
620 
621 /*
622   SVID/XPG defines four standard parameter numbers for mallopt,
623   normally defined in malloc.h.  Only one of these (M_MXFAST) is used
624   in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
625   so setting them has no effect. But this malloc also supports other
626   options in mallopt described below.
627 */
628 #endif
629 
630 
631 /* ---------- description of public routines ------------ */
632 
633 /*
634   malloc(size_t n)
635   Returns a pointer to a newly allocated chunk of at least n bytes, or null
636   if no space is available. Additionally, on failure, errno is
637   set to ENOMEM on ANSI C systems.
638 
639   If n is zero, malloc returns a minumum-sized chunk. (The minimum
640   size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
641   systems.)  On most systems, size_t is an unsigned type, so calls
642   with negative arguments are interpreted as requests for huge amounts
643   of space, which will often fail. The maximum supported value of n
644   differs across systems, but is in all cases less than the maximum
645   representable value of a size_t.
646 */
647 Void_t*  public_mALLOc(size_t);
648 
649 /*
650   free(Void_t* p)
651   Releases the chunk of memory pointed to by p, that had been previously
652   allocated using malloc or a related routine such as realloc.
653   It has no effect if p is null. It can have arbitrary (i.e., bad!)
654   effects if p has already been freed.
655 
656   Unless disabled (using mallopt), freeing very large spaces will
657   when possible, automatically trigger operations that give
658   back unused memory to the system, thus reducing program footprint.
659 */
660 void     public_fREe(Void_t*);
661 
662 /*
663   calloc(size_t n_elements, size_t element_size);
664   Returns a pointer to n_elements * element_size bytes, with all locations
665   set to zero.
666 */
667 Void_t*  public_cALLOc(size_t, size_t);
668 
669 /*
670   realloc(Void_t* p, size_t n)
671   Returns a pointer to a chunk of size n that contains the same data
672   as does chunk p up to the minimum of (n, p's size) bytes, or null
673   if no space is available.
674 
675   The returned pointer may or may not be the same as p. The algorithm
676   prefers extending p when possible, otherwise it employs the
677   equivalent of a malloc-copy-free sequence.
678 
679   If p is null, realloc is equivalent to malloc.
680 
681   If space is not available, realloc returns null, errno is set (if on
682   ANSI) and p is NOT freed.
683 
684   if n is for fewer bytes than already held by p, the newly unused
685   space is lopped off and freed if possible.  Unless the #define
686   REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
687   zero (re)allocates a minimum-sized chunk.
688 
689   Large chunks that were internally obtained via mmap will always
690   be reallocated using malloc-copy-free sequences unless
691   the system supports MREMAP (currently only linux).
692 
693   The old unix realloc convention of allowing the last-free'd chunk
694   to be used as an argument to realloc is not supported.
695 */
696 
697 Void_t*  public_rEALLOc(Void_t*, size_t);
698 
699 /*
700   memalign(size_t alignment, size_t n);
701   Returns a pointer to a newly allocated chunk of n bytes, aligned
702   in accord with the alignment argument.
703 
704   The alignment argument should be a power of two. If the argument is
705   not a power of two, the nearest greater power is used.
706   8-byte alignment is guaranteed by normal malloc calls, so don't
707   bother calling memalign with an argument of 8 or less.
708 
709   Overreliance on memalign is a sure way to fragment space.
710 */
711 Void_t*  public_mEMALIGn(size_t, size_t);
712 
713 /*
714   valloc(size_t n);
715   Equivalent to memalign(pagesize, n), where pagesize is the page
716   size of the system. If the pagesize is unknown, 4096 is used.
717 */
718 Void_t*  public_vALLOc(size_t);
719 
720 
721 /*
722   mallopt(int parameter_number, int parameter_value)
723   Sets tunable parameters The format is to provide a
724   (parameter-number, parameter-value) pair.  mallopt then sets the
725   corresponding parameter to the argument value if it can (i.e., so
726   long as the value is meaningful), and returns 1 if successful else
727   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
728   normally defined in malloc.h.  Only one of these (M_MXFAST) is used
729   in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
730   so setting them has no effect. But this malloc also supports four
731   other options in mallopt. See below for details.  Briefly, supported
732   parameters are as follows (listed defaults are for "typical"
733   configurations).
734 
735   Symbol            param #   default    allowed param values
736   M_MXFAST          1         64         0-64  (0 disables fastbins)
737   M_TRIM_THRESHOLD -1         256*1024   any   (-1U disables trimming)
738   M_TOP_PAD        -2         0          any
739   M_MMAP_THRESHOLD -3         256*1024   any   (or 0 if no MMAP support)
740   M_MMAP_MAX       -4         65536      any   (0 disables use of mmap)
741 */
742 int      public_mALLOPt(int, int);
743 
744 
745 /*
746   mallinfo()
747   Returns (by copy) a struct containing various summary statistics:
748 
749   arena:     current total non-mmapped bytes allocated from system
750   ordblks:   the number of free chunks
751   smblks:    the number of fastbin blocks (i.e., small chunks that
752                have been freed but not use resused or consolidated)
753   hblks:     current number of mmapped regions
754   hblkhd:    total bytes held in mmapped regions
755   usmblks:   the maximum total allocated space. This will be greater
756                 than current total if trimming has occurred.
757   fsmblks:   total bytes held in fastbin blocks
758   uordblks:  current total allocated space (normal or mmapped)
759   fordblks:  total free space
760   keepcost:  the maximum number of bytes that could ideally be released
761                back to system via malloc_trim. ("ideally" means that
762                it ignores page restrictions etc.)
763 
764   Because these fields are ints, but internal bookkeeping may
765   be kept as longs, the reported values may wrap around zero and
766   thus be inaccurate.
767 */
768 struct mallinfo public_mALLINFo(void);
769 
770 /*
771   independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
772 
773   independent_calloc is similar to calloc, but instead of returning a
774   single cleared space, it returns an array of pointers to n_elements
775   independent elements that can hold contents of size elem_size, each
776   of which starts out cleared, and can be independently freed,
777   realloc'ed etc. The elements are guaranteed to be adjacently
778   allocated (this is not guaranteed to occur with multiple callocs or
779   mallocs), which may also improve cache locality in some
780   applications.
781 
782   The "chunks" argument is optional (i.e., may be null, which is
783   probably the most typical usage). If it is null, the returned array
784   is itself dynamically allocated and should also be freed when it is
785   no longer needed. Otherwise, the chunks array must be of at least
786   n_elements in length. It is filled in with the pointers to the
787   chunks.
788 
789   In either case, independent_calloc returns this pointer array, or
790   null if the allocation failed.  If n_elements is zero and "chunks"
791   is null, it returns a chunk representing an array with zero elements
792   (which should be freed if not wanted).
793 
794   Each element must be individually freed when it is no longer
795   needed. If you'd like to instead be able to free all at once, you
796   should instead use regular calloc and assign pointers into this
797   space to represent elements.  (In this case though, you cannot
798   independently free elements.)
799 
800   independent_calloc simplifies and speeds up implementations of many
801   kinds of pools.  It may also be useful when constructing large data
802   structures that initially have a fixed number of fixed-sized nodes,
803   but the number is not known at compile time, and some of the nodes
804   may later need to be freed. For example:
805 
806   struct Node { int item; struct Node* next; };
807 
808   struct Node* build_list() {
809     struct Node** pool;
810     int n = read_number_of_nodes_needed();
811     if (n <= 0) return 0;
812     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
813     if (pool == 0) die();
814     // organize into a linked list...
815     struct Node* first = pool[0];
816     for (i = 0; i < n-1; ++i)
817       pool[i]->next = pool[i+1];
818     free(pool);     // Can now free the array (or not, if it is needed later)
819     return first;
820   }
821 */
822 Void_t** public_iCALLOc(size_t, size_t, Void_t**);
823 
824 /*
825   independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
826 
827   independent_comalloc allocates, all at once, a set of n_elements
828   chunks with sizes indicated in the "sizes" array.    It returns
829   an array of pointers to these elements, each of which can be
830   independently freed, realloc'ed etc. The elements are guaranteed to
831   be adjacently allocated (this is not guaranteed to occur with
832   multiple callocs or mallocs), which may also improve cache locality
833   in some applications.
834 
835   The "chunks" argument is optional (i.e., may be null). If it is null
836   the returned array is itself dynamically allocated and should also
837   be freed when it is no longer needed. Otherwise, the chunks array
838   must be of at least n_elements in length. It is filled in with the
839   pointers to the chunks.
840 
841   In either case, independent_comalloc returns this pointer array, or
842   null if the allocation failed.  If n_elements is zero and chunks is
843   null, it returns a chunk representing an array with zero elements
844   (which should be freed if not wanted).
845 
846   Each element must be individually freed when it is no longer
847   needed. If you'd like to instead be able to free all at once, you
848   should instead use a single regular malloc, and assign pointers at
849   particular offsets in the aggregate space. (In this case though, you
850   cannot independently free elements.)
851 
852   independent_comallac differs from independent_calloc in that each
853   element may have a different size, and also that it does not
854   automatically clear elements.
855 
856   independent_comalloc can be used to speed up allocation in cases
857   where several structs or objects must always be allocated at the
858   same time.  For example:
859 
860   struct Head { ... }
861   struct Foot { ... }
862 
863   void send_message(char* msg) {
864     int msglen = strlen(msg);
865     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
866     void* chunks[3];
867     if (independent_comalloc(3, sizes, chunks) == 0)
868       die();
869     struct Head* head = (struct Head*)(chunks[0]);
870     char*        body = (char*)(chunks[1]);
871     struct Foot* foot = (struct Foot*)(chunks[2]);
872     // ...
873   }
874 
875   In general though, independent_comalloc is worth using only for
876   larger values of n_elements. For small values, you probably won't
877   detect enough difference from series of malloc calls to bother.
878 
879   Overuse of independent_comalloc can increase overall memory usage,
880   since it cannot reuse existing noncontiguous small chunks that
881   might be available for some of the elements.
882 */
883 Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
884 
885 
886 /*
887   pvalloc(size_t n);
888   Equivalent to valloc(minimum-page-that-holds(n)), that is,
889   round up n to nearest pagesize.
890  */
891 Void_t*  public_pVALLOc(size_t);
892 
893 /*
894   cfree(Void_t* p);
895   Equivalent to free(p).
896 
897   cfree is needed/defined on some systems that pair it with calloc,
898   for odd historical reasons (such as: cfree is used in example
899   code in the first edition of K&R).
900 */
901 void     public_cFREe(Void_t*);
902 
903 /*
904   malloc_trim(size_t pad);
905 
906   If possible, gives memory back to the system (via negative
907   arguments to sbrk) if there is unused memory at the `high' end of
908   the malloc pool. You can call this after freeing large blocks of
909   memory to potentially reduce the system-level memory requirements
910   of a program. However, it cannot guarantee to reduce memory. Under
911   some allocation patterns, some large free blocks of memory will be
912   locked between two used chunks, so they cannot be given back to
913   the system.
914 
915   The `pad' argument to malloc_trim represents the amount of free
916   trailing space to leave untrimmed. If this argument is zero,
917   only the minimum amount of memory to maintain internal data
918   structures will be left (one page or less). Non-zero arguments
919   can be supplied to maintain enough trailing space to service
920   future expected allocations without having to re-obtain memory
921   from the system.
922 
923   Malloc_trim returns 1 if it actually released any memory, else 0.
924   On systems that do not support "negative sbrks", it will always
925   rreturn 0.
926 */
927 int      public_mTRIm(size_t);
928 
929 /*
930   malloc_usable_size(Void_t* p);
931 
932   Returns the number of bytes you can actually use in
933   an allocated chunk, which may be more than you requested (although
934   often not) due to alignment and minimum size constraints.
935   You can use this many bytes without worrying about
936   overwriting other allocated objects. This is not a particularly great
937   programming practice. malloc_usable_size can be more useful in
938   debugging and assertions, for example:
939 
940   p = malloc(n);
941   assert(malloc_usable_size(p) >= 256);
942 
943 */
944 size_t   public_mUSABLe(Void_t*);
945 
946 /*
947   malloc_stats();
948   Prints on stderr the amount of space obtained from the system (both
949   via sbrk and mmap), the maximum amount (which may be more than
950   current if malloc_trim and/or munmap got called), and the current
951   number of bytes allocated via malloc (or realloc, etc) but not yet
952   freed. Note that this is the number of bytes allocated, not the
953   number requested. It will be larger than the number requested
954   because of alignment and bookkeeping overhead. Because it includes
955   alignment wastage as being in use, this figure may be greater than
956   zero even when no user-level chunks are allocated.
957 
958   The reported current and maximum system memory can be inaccurate if
959   a program makes other calls to system memory allocation functions
960   (normally sbrk) outside of malloc.
961 
962   malloc_stats prints only the most commonly interesting statistics.
963   More information can be obtained by calling mallinfo.
964 
965 */
966 void     public_mSTATs();
967 
968 /* mallopt tuning options */
969 
970 /*
971   M_MXFAST is the maximum request size used for "fastbins", special bins
972   that hold returned chunks without consolidating their spaces. This
973   enables future requests for chunks of the same size to be handled
974   very quickly, but can increase fragmentation, and thus increase the
975   overall memory footprint of a program.
976 
977   This malloc manages fastbins very conservatively yet still
978   efficiently, so fragmentation is rarely a problem for values less
979   than or equal to the default.  The maximum supported value of MXFAST
980   is 64 (also the default). You wouldn't want it any higher than this
981   anyway.  Fastbins are designed especially for use with many small
982   structs, objects or strings -- the default handles
983   structs/objects/arrays with sizes up to 16 4byte fields, or small
984   strings representing words, tokens, etc. Using fastbins for larger
985   objects normally worsens fragmentation without improving speed.
986 
987   M_MXFAST is set in REQUEST size units. It is internally used in
988   chunksize units, which adds padding and alignment.  You can reduce
989   M_MXFAST to 0 to disable all use of fastbins.  This causes the malloc
990   algorithm to be a closer approximation of fifo-best-fit in all cases,
991   not just for larger requests, but will generally cause it to be
992   slower.
993 */
994 
995 
996 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
997 #ifndef M_MXFAST
998 #define M_MXFAST            1
999 #endif
1000 
1001 #ifndef DEFAULT_MXFAST
1002 #define DEFAULT_MXFAST     64
1003 #endif
1004 
1005 
1006 /*
1007   M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1008   to keep before releasing via malloc_trim in free().
1009 
1010   Automatic trimming is mainly useful in long-lived programs.
1011   Because trimming via sbrk can be slow on some systems, and can
1012   sometimes be wasteful (in cases where programs immediately
1013   afterward allocate more large chunks) the value should be high
1014   enough so that your overall system performance would improve by
1015   releasing this much memory.
1016 
1017   The trim threshold and the mmap control parameters (see below)
1018   can be traded off with one another. Trimming and mmapping are
1019   two different ways of releasing unused memory back to the
1020   system. Between these two, it is often possible to keep
1021   system-level demands of a long-lived program down to a bare
1022   minimum. For example, in one test suite of sessions measuring
1023   the XF86 X server on Linux, using a trim threshold of 128K and a
1024   mmap threshold of 192K led to near-minimal long term resource
1025   consumption.
1026 
1027   If you are using this malloc in a long-lived program, it should
1028   pay to experiment with these values.  As a rough guide, you
1029   might set to a value close to the average size of a process
1030   (program) running on your system.  Releasing this much memory
1031   would allow such a process to run in memory.  Generally, it's
1032   worth it to tune for trimming rather tham memory mapping when a
1033   program undergoes phases where several large chunks are
1034   allocated and released in ways that can reuse each other's
1035   storage, perhaps mixed with phases where there are no such
1036   chunks at all.  And in well-behaved long-lived programs,
1037   controlling release of large blocks via trimming versus mapping
1038   is usually faster.
1039 
1040   However, in most programs, these parameters serve mainly as
1041   protection against the system-level effects of carrying around
1042   massive amounts of unneeded memory. Since frequent calls to
1043   sbrk, mmap, and munmap otherwise degrade performance, the default
1044   parameters are set to relatively high values that serve only as
1045   safeguards.
1046 
1047   The trim value must be greater than page size to have any useful
1048   effect.  To disable trimming completely, you can set to
1049   (unsigned long)(-1)
1050 
1051   Trim settings interact with fastbin (MXFAST) settings: Unless
1052   TRIM_FASTBINS is defined, automatic trimming never takes place upon
1053   freeing a chunk with size less than or equal to MXFAST. Trimming is
1054   instead delayed until subsequent freeing of larger chunks. However,
1055   you can still force an attempted trim by calling malloc_trim.
1056 
1057   Also, trimming is not generally possible in cases where
1058   the main arena is obtained via mmap.
1059 
1060   Note that the trick some people use of mallocing a huge space and
1061   then freeing it at program startup, in an attempt to reserve system
1062   memory, doesn't have the intended effect under automatic trimming,
1063   since that memory will immediately be returned to the system.
1064 */
1065 
1066 #define M_TRIM_THRESHOLD       -1
1067 
1068 #ifndef DEFAULT_TRIM_THRESHOLD
1069 
1070 #define DEFAULT_TRIM_THRESHOLD (-1U)
1071 /* #define DEFAULT_TRIM_THRESHOLD (256 * 1024) */
1072 #endif
1073 
1074 /*
1075   M_TOP_PAD is the amount of extra `padding' space to allocate or
1076   retain whenever sbrk is called. It is used in two ways internally:
1077 
1078   * When sbrk is called to extend the top of the arena to satisfy
1079   a new malloc request, this much padding is added to the sbrk
1080   request.
1081 
1082   * When malloc_trim is called automatically from free(),
1083   it is used as the `pad' argument.
1084 
1085   In both cases, the actual amount of padding is rounded
1086   so that the end of the arena is always a system page boundary.
1087 
1088   The main reason for using padding is to avoid calling sbrk so
1089   often. Having even a small pad greatly reduces the likelihood
1090   that nearly every malloc request during program start-up (or
1091   after trimming) will invoke sbrk, which needlessly wastes
1092   time.
1093 
1094   Automatic rounding-up to page-size units is normally sufficient
1095   to avoid measurable overhead, so the default is 0.  However, in
1096   systems where sbrk is relatively slow, it can pay to increase
1097   this value, at the expense of carrying around more memory than
1098   the program needs.
1099 */
1100 
1101 #define M_TOP_PAD              -2
1102 
1103 #ifndef DEFAULT_TOP_PAD
1104 #define DEFAULT_TOP_PAD        (0)
1105 #endif
1106 
1107 /*
1108   M_MMAP_THRESHOLD is the request size threshold for using mmap()
1109   to service a request. Requests of at least this size that cannot
1110   be allocated using already-existing space will be serviced via mmap.
1111   (If enough normal freed space already exists it is used instead.)
1112 
1113   Using mmap segregates relatively large chunks of memory so that
1114   they can be individually obtained and released from the host
1115   system. A request serviced through mmap is never reused by any
1116   other request (at least not directly; the system may just so
1117   happen to remap successive requests to the same locations).
1118 
1119   Segregating space in this way has the benefits that:
1120 
1121    1. Mmapped space can ALWAYS be individually released back
1122       to the system, which helps keep the system level memory
1123       demands of a long-lived program low.
1124    2. Mapped memory can never become `locked' between
1125       other chunks, as can happen with normally allocated chunks, which
1126       means that even trimming via malloc_trim would not release them.
1127    3. On some systems with "holes" in address spaces, mmap can obtain
1128       memory that sbrk cannot.
1129 
1130   However, it has the disadvantages that:
1131 
1132    1. The space cannot be reclaimed, consolidated, and then
1133       used to service later requests, as happens with normal chunks.
1134    2. It can lead to more wastage because of mmap page alignment
1135       requirements
1136    3. It causes malloc performance to be more dependent on host
1137       system memory management support routines which may vary in
1138       implementation quality and may impose arbitrary
1139       limitations. Generally, servicing a request via normal
1140       malloc steps is faster than going through a system's mmap.
1141 
1142   The advantages of mmap nearly always outweigh disadvantages for
1143   "large" chunks, but the value of "large" varies across systems.  The
1144   default is an empirically derived value that works well in most
1145   systems.
1146 */
1147 
1148 #define M_MMAP_THRESHOLD      -3
1149 
1150 #ifndef DEFAULT_MMAP_THRESHOLD
1151 #define DEFAULT_MMAP_THRESHOLD (-1U)
1152 /*#define DEFAULT_MMAP_THRESHOLD (128 * 1024) */
1153 #endif
1154 
1155 /*
1156   M_MMAP_MAX is the maximum number of requests to simultaneously
1157   service using mmap. This parameter exists because
1158 . Some systems have a limited number of internal tables for
1159   use by mmap, and using more than a few of them may degrade
1160   performance.
1161 
1162   The default is set to a value that serves only as a safeguard.
1163   Setting to 0 disables use of mmap for servicing large requests.  If
1164   HAVE_MMAP is not set, the default value is 0, and attempts to set it
1165   to non-zero values in mallopt will fail.
1166 */
1167 
1168 #define M_MMAP_MAX             -4
1169 
1170 #ifndef DEFAULT_MMAP_MAX
1171 #if HAVE_MMAP
1172 #define DEFAULT_MMAP_MAX       (65536)
1173 #else
1174 #define DEFAULT_MMAP_MAX       (0)
1175 #endif
1176 #endif
1177 
1178 #ifdef __cplusplus
1179 };  /* end of extern "C" */
1180 #endif
1181 
1182 /*
1183   ========================================================================
1184   To make a fully customizable malloc.h header file, cut everything
1185   above this line, put into file malloc.h, edit to suit, and #include it
1186   on the next line, as well as in programs that use this malloc.
1187   ========================================================================
1188 */
1189 
1190 /* #include "malloc.h" */
1191 
1192 /* --------------------- public wrappers ---------------------- */
1193 
1194 #ifdef USE_PUBLIC_MALLOC_WRAPPERS
1195 
1196 /* Declare all routines as internal */
1197 static Void_t*  mALLOc(size_t);
1198 static void     fREe(Void_t*);
1199 static Void_t*  rEALLOc(Void_t*, size_t);
1200 static Void_t*  mEMALIGn(size_t, size_t);
1201 static Void_t*  vALLOc(size_t);
1202 static Void_t*  pVALLOc(size_t);
1203 static Void_t*  cALLOc(size_t, size_t);
1204 static Void_t** iCALLOc(size_t, size_t, Void_t**);
1205 static Void_t** iCOMALLOc(size_t, size_t*, Void_t**);
1206 static void     cFREe(Void_t*);
1207 static int      mTRIm(size_t);
1208 static size_t   mUSABLe(Void_t*);
1209 static void     mSTATs();
1210 static int      mALLOPt(int, int);
1211 static struct mallinfo mALLINFo(void);
1212 
1213 /*
1214   MALLOC_PREACTION and MALLOC_POSTACTION should be
1215   defined to return 0 on success, and nonzero on failure.
1216   The return value of MALLOC_POSTACTION is currently ignored
1217   in wrapper functions since there is no reasonable default
1218   action to take on failure.
1219 */
1220 
1221 
1222 #ifdef USE_MALLOC_LOCK
1223 
1224 #ifdef WIN32
1225 
1226 static int mALLOC_MUTEx;
1227 #define MALLOC_PREACTION   slwait(&mALLOC_MUTEx)
1228 #define MALLOC_POSTACTION  slrelease(&mALLOC_MUTEx)
1229 
1230 #else
1231 
1232 #include <pthread.h>
1233 
1234 static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
1235 
1236 #define MALLOC_PREACTION   pthread_mutex_lock(&mALLOC_MUTEx)
1237 #define MALLOC_POSTACTION  pthread_mutex_unlock(&mALLOC_MUTEx)
1238 
1239 #endif /* USE_MALLOC_LOCK */
1240 
1241 #else
1242 
1243 /* Substitute anything you like for these */
1244 
1245 #define MALLOC_PREACTION   (0)
1246 #define MALLOC_POSTACTION  (0)
1247 
1248 #endif
1249 
public_mALLOc(size_t bytes)1250 Void_t* public_mALLOc(size_t bytes) {
1251   Void_t* m;
1252   if (MALLOC_PREACTION != 0) {
1253     return 0;
1254   }
1255   m = mALLOc(bytes);
1256   if (MALLOC_POSTACTION != 0) {
1257   }
1258   return m;
1259 }
1260 
public_fREe(Void_t * m)1261 void public_fREe(Void_t* m) {
1262   if (MALLOC_PREACTION != 0) {
1263     return;
1264   }
1265   fREe(m);
1266   if (MALLOC_POSTACTION != 0) {
1267   }
1268 }
1269 
public_rEALLOc(Void_t * m,size_t bytes)1270 Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
1271   if (MALLOC_PREACTION != 0) {
1272     return 0;
1273   }
1274   m = rEALLOc(m, bytes);
1275   if (MALLOC_POSTACTION != 0) {
1276   }
1277   return m;
1278 }
1279 
public_mEMALIGn(size_t alignment,size_t bytes)1280 Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
1281   Void_t* m;
1282   if (MALLOC_PREACTION != 0) {
1283     return 0;
1284   }
1285   m = mEMALIGn(alignment, bytes);
1286   if (MALLOC_POSTACTION != 0) {
1287   }
1288   return m;
1289 }
1290 
public_vALLOc(size_t bytes)1291 Void_t* public_vALLOc(size_t bytes) {
1292   Void_t* m;
1293   if (MALLOC_PREACTION != 0) {
1294     return 0;
1295   }
1296   m = vALLOc(bytes);
1297   if (MALLOC_POSTACTION != 0) {
1298   }
1299   return m;
1300 }
1301 
public_pVALLOc(size_t bytes)1302 Void_t* public_pVALLOc(size_t bytes) {
1303   Void_t* m;
1304   if (MALLOC_PREACTION != 0) {
1305     return 0;
1306   }
1307   m = pVALLOc(bytes);
1308   if (MALLOC_POSTACTION != 0) {
1309   }
1310   return m;
1311 }
1312 
public_cALLOc(size_t n,size_t elem_size)1313 Void_t* public_cALLOc(size_t n, size_t elem_size) {
1314   Void_t* m;
1315   if (MALLOC_PREACTION != 0) {
1316     return 0;
1317   }
1318   m = cALLOc(n, elem_size);
1319   if (MALLOC_POSTACTION != 0) {
1320   }
1321   return m;
1322 }
1323 
1324 
public_iCALLOc(size_t n,size_t elem_size,Void_t ** chunks)1325 Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) {
1326   Void_t** m;
1327   if (MALLOC_PREACTION != 0) {
1328     return 0;
1329   }
1330   m = iCALLOc(n, elem_size, chunks);
1331   if (MALLOC_POSTACTION != 0) {
1332   }
1333   return m;
1334 }
1335 
public_iCOMALLOc(size_t n,size_t sizes[],Void_t ** chunks)1336 Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) {
1337   Void_t** m;
1338   if (MALLOC_PREACTION != 0) {
1339     return 0;
1340   }
1341   m = iCOMALLOc(n, sizes, chunks);
1342   if (MALLOC_POSTACTION != 0) {
1343   }
1344   return m;
1345 }
1346 
public_cFREe(Void_t * m)1347 void public_cFREe(Void_t* m) {
1348   if (MALLOC_PREACTION != 0) {
1349     return;
1350   }
1351   cFREe(m);
1352   if (MALLOC_POSTACTION != 0) {
1353   }
1354 }
1355 
public_mTRIm(size_t s)1356 int public_mTRIm(size_t s) {
1357   int result;
1358   if (MALLOC_PREACTION != 0) {
1359     return 0;
1360   }
1361   result = mTRIm(s);
1362   if (MALLOC_POSTACTION != 0) {
1363   }
1364   return result;
1365 }
1366 
public_mUSABLe(Void_t * m)1367 size_t public_mUSABLe(Void_t* m) {
1368   size_t result;
1369   if (MALLOC_PREACTION != 0) {
1370     return 0;
1371   }
1372   result = mUSABLe(m);
1373   if (MALLOC_POSTACTION != 0) {
1374   }
1375   return result;
1376 }
1377 
public_mSTATs()1378 void public_mSTATs() {
1379   if (MALLOC_PREACTION != 0) {
1380     return;
1381   }
1382   mSTATs();
1383   if (MALLOC_POSTACTION != 0) {
1384   }
1385 }
1386 
public_mALLINFo()1387 struct mallinfo public_mALLINFo() {
1388   struct mallinfo m;
1389   if (MALLOC_PREACTION != 0) {
1390     struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1391     return nm;
1392   }
1393   m = mALLINFo();
1394   if (MALLOC_POSTACTION != 0) {
1395   }
1396   return m;
1397 }
1398 
public_mALLOPt(int p,int v)1399 int public_mALLOPt(int p, int v) {
1400   int result;
1401   if (MALLOC_PREACTION != 0) {
1402     return 0;
1403   }
1404   result = mALLOPt(p, v);
1405   if (MALLOC_POSTACTION != 0) {
1406   }
1407   return result;
1408 }
1409 
1410 #endif
1411 
1412 
1413 
1414 /* ------------- Optional versions of memcopy ---------------- */
1415 
1416 
1417 #if USE_MEMCPY
1418 
1419 /*
1420   Note: memcpy is ONLY invoked with non-overlapping regions,
1421   so the (usually slower) memmove is not needed.
1422 */
1423 
1424 #define MALLOC_COPY(dest, src, nbytes)  memcpy(dest, src, nbytes)
1425 #define MALLOC_ZERO(dest, nbytes)       memset(dest, 0,   nbytes)
1426 
1427 #else /* !USE_MEMCPY */
1428 
1429 /* Use Duff's device for good zeroing/copying performance. */
1430 
1431 #define MALLOC_ZERO(charp, nbytes)                                          \
1432 do {                                                                        \
1433   INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                         \
1434   CHUNK_SIZE_T  mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                   \
1435   long mcn;                                                                 \
1436   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }           \
1437   switch (mctmp) {                                                          \
1438     case 0: for(;;) { *mzp++ = 0;                                           \
1439     case 7:           *mzp++ = 0;                                           \
1440     case 6:           *mzp++ = 0;                                           \
1441     case 5:           *mzp++ = 0;                                           \
1442     case 4:           *mzp++ = 0;                                           \
1443     case 3:           *mzp++ = 0;                                           \
1444     case 2:           *mzp++ = 0;                                           \
1445     case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }              \
1446   }                                                                         \
1447 } while(0)
1448 
1449 #define MALLOC_COPY(dest,src,nbytes)                                        \
1450 do {                                                                        \
1451   INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                          \
1452   INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                         \
1453   CHUNK_SIZE_T  mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                   \
1454   long mcn;                                                                 \
1455   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }           \
1456   switch (mctmp) {                                                          \
1457     case 0: for(;;) { *mcdst++ = *mcsrc++;                                  \
1458     case 7:           *mcdst++ = *mcsrc++;                                  \
1459     case 6:           *mcdst++ = *mcsrc++;                                  \
1460     case 5:           *mcdst++ = *mcsrc++;                                  \
1461     case 4:           *mcdst++ = *mcsrc++;                                  \
1462     case 3:           *mcdst++ = *mcsrc++;                                  \
1463     case 2:           *mcdst++ = *mcsrc++;                                  \
1464     case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }     \
1465   }                                                                         \
1466 } while(0)
1467 
1468 #endif
1469 
1470 /* ------------------ MMAP support ------------------  */
1471 
1472 
1473 #if HAVE_MMAP
1474 
1475 #include <fcntl.h>
1476 #ifndef LACKS_SYS_MMAN_H
1477 #include <sys/mman.h>
1478 #endif
1479 
1480 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1481 #define MAP_ANONYMOUS MAP_ANON
1482 #endif
1483 
1484 /*
1485    Nearly all versions of mmap support MAP_ANONYMOUS,
1486    so the following is unlikely to be needed, but is
1487    supplied just in case.
1488 */
1489 
1490 #ifndef MAP_ANONYMOUS
1491 
1492 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1493 
1494 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1495  (dev_zero_fd = open("/dev/zero", O_RDWR), \
1496   mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1497    mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1498 
1499 #else
1500 
1501 #define MMAP(addr, size, prot, flags) \
1502  (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1503 
1504 #endif
1505 
1506 
1507 #endif /* HAVE_MMAP */
1508 
1509 
1510 /*
1511   -----------------------  Chunk representations -----------------------
1512 */
1513 
1514 
1515 /*
1516   This struct declaration is misleading (but accurate and necessary).
1517   It declares a "view" into memory allowing access to necessary
1518   fields at known offsets from a given base. See explanation below.
1519 */
1520 
1521 struct malloc_chunk {
1522 
1523   INTERNAL_SIZE_T      prev_size;  /* Size of previous chunk (if free).  */
1524   INTERNAL_SIZE_T      size;       /* Size in bytes, including overhead. */
1525 
1526   struct malloc_chunk* fd;         /* double links -- used only if free. */
1527   struct malloc_chunk* bk;
1528 };
1529 
1530 
1531 typedef struct malloc_chunk* mchunkptr;
1532 
1533 /* conversion from malloc headers to user pointers, and back */
1534 #define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1535 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1536 
1537 /*
1538    malloc_chunk details:
1539 
1540     (The following includes lightly edited explanations by Colin Plumb.)
1541 
1542     Chunks of memory are maintained using a `boundary tag' method as
1543     described in e.g., Knuth or Standish.  (See the paper by Paul
1544     Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1545     survey of such techniques.)  Sizes of free chunks are stored both
1546     in the front of each chunk and at the end.  This makes
1547     consolidating fragmented chunks into bigger chunks very fast.  The
1548     size fields also hold bits representing whether chunks are free or
1549     in use.
1550 
1551     An allocated chunk looks like this:
1552 
1553 
1554     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1555             |             Size of previous chunk, if allocated            | |
1556             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1557             |             Size of chunk, in bytes                         |P|
1558       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1559             |             User data starts here...                          .
1560             .                                                               .
1561             .             (malloc_usable_space() bytes)                     .
1562             .                                                               |
1563 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1564     `foot:' |             Size of chunk                                     |
1565             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1566 
1567 
1568     Where "chunk" is the front of the chunk for the purpose of most of
1569     the malloc code, but "mem" is the pointer that is returned to the
1570     user.  "Nextchunk" is the beginning of the next contiguous chunk.
1571 
1572     Chunks always begin on even word boundries, so the mem portion
1573     (which is returned to the user) is also on an even word boundary, and
1574     thus at least double-word aligned.
1575 
1576 
1577     The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1578     chunk size (which is always a multiple of two words), is an in-use
1579     bit for the *previous* chunk.  If that bit is *clear*, then the
1580     word before the current chunk size contains the previous chunk
1581     size, and can be used to find the front of the previous chunk.
1582     The very first chunk allocated always has this bit set,
1583     preventing access to non-existent (or non-owned) memory. If
1584     prev_inuse is set for any given chunk, then you CANNOT determine
1585     the size of the previous chunk, and might even get a memory
1586     addressing fault when trying to do so.
1587 
1588     Note that the `foot' of the current chunk is actually represented
1589     as the prev_size of the NEXT chunk. This makes it easier to
1590     deal with alignments etc but can be very confusing when trying
1591     to extend or adapt this code.
1592 
1593     The two exceptions to all this are
1594 
1595      1. The special chunk `top' doesn't bother using the
1596         trailing size field since there is no next contiguous chunk
1597         that would have to index off it. After initialization, `top'
1598         is forced to always exist.  If it would become less than
1599         MINSIZE bytes long, it is replenished.
1600 
1601      2. Chunks allocated via mmap, which have the second-lowest-order
1602         bit (IS_MMAPPED) set in their size fields.  Because they are
1603         allocated one-by-one, each must contain its own trailing size field.
1604 
1605 */
1606 
1607 
1608 
1609 /*
1610   --------------- Operations on size fields ---------------
1611 */
1612 
1613 
1614 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1615 #define PREV_INUSE 0x1
1616 
1617 #if HAVE_MMAP
1618 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap */
1619 #define IS_MMAPPED 0x2
1620 #else
1621 #define IS_MMAPPED 0
1622 #endif
1623 
1624 /* extract inuse bit of previous chunk */
1625 #define prev_inuse(p)       ((p)->size & PREV_INUSE)
1626 
1627 /* check for mmap()'ed chunk */
1628 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1629 
1630 /*
1631   Bits to mask off when extracting size
1632 
1633   Note: IS_MMAPPED is intentionally not masked off from size field in
1634   macros for which mmapped chunks should never be seen. This should
1635   cause helpful core dumps to occur if it is tried by accident by
1636   people extending or adapting this malloc.
1637 */
1638 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
1639 
1640 /* Get size, ignoring use bits */
1641 #define chunksize(p)         ((CHUNK_SIZE_T)((p)->size & ~(SIZE_BITS)))
1642 
1643 
1644 /* Ptr to next physical malloc_chunk. */
1645 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE)))
1646 
1647 /* Ptr to previous physical malloc_chunk */
1648 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1649 
1650 /* Treat space at ptr + offset as a chunk */
1651 #define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
1652 
1653 /* extract p's inuse bit */
1654 #define inuse(p)\
1655 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
1656 
1657 /* set/clear chunk as being inuse without otherwise disturbing */
1658 #define set_inuse(p)\
1659 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
1660 
1661 #define clear_inuse(p)\
1662 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
1663 
1664 
1665 /* check/set/clear inuse bits in known places */
1666 #define inuse_bit_at_offset(p, s)\
1667  (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1668 
1669 #define inuse_addr_at_offset(p, s)\
1670  (INTERNAL_SIZE_T*)(&(((mchunkptr)(((char*)(p)) + (s)))->size))
1671 
1672 #define set_inuse_bit_at_offset(p, s)\
1673  (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1674 
1675 #define clear_inuse_bit_at_offset(p, s)\
1676  (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1677 
1678 
1679 /* Set size at head, without disturbing its use bit */
1680 #define set_head_size(p, s)  ((p)->size = (((p)->size & PREV_INUSE) | (s)))
1681 
1682 /* Set size/use field */
1683 #define set_head(p, s)       ((p)->size = (s))
1684 
1685 /* Set size at footer (only when chunk is not in use) */
1686 #define set_foot(p, s)     (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1687 
1688 
1689 /*
1690   ---------- Size and alignment checks and conversions ----------
1691 */
1692 
1693 
1694 /* The smallest possible chunk */
1695 #define MIN_CHUNK_SIZE        (sizeof(struct malloc_chunk))
1696 
1697 /* The smallest size we can malloc is an aligned minimal chunk */
1698 
1699 #define MINSIZE  \
1700   (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1701 
1702 /* Check if m has acceptable alignment */
1703 
1704 #define aligned_OK(m)  (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1705 
1706 
1707 /*
1708    Check if a request is so large that it would wrap around zero when
1709    padded and aligned. To simplify some other code, the bound is made
1710    low enough so that adding MINSIZE will also not wrap around sero.
1711 */
1712 
1713 #define MAX_REQUEST_SIZE ((CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1714 
1715 #define request_out_of_range(req)                                \
1716   ((CHUNK_SIZE_T)(req) >= MAX_REQUEST_SIZE)
1717 
1718 /* pad request bytes into a usable size -- internal version */
1719 
1720 #define request2size(req)                                         \
1721   (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?             \
1722    MINSIZE :                                                      \
1723    ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1724 
1725 
1726 #define pad_request(req)                                         \
1727    (((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1728 
1729 /*  Same, except also perform argument check */
1730 
1731 #define checked_request2size(req, sz)                             \
1732   if (request_out_of_range(req)) {                                \
1733     MALLOC_FAILURE_ACTION;                                        \
1734     return 0;                                                     \
1735   }                                                               \
1736   (sz) = request2size(req);
1737 
1738 
1739 typedef CHUNK_SIZE_T  bin_index_t;
1740 typedef unsigned int  bitmap_t;
1741 
1742 
1743 /*
1744   ---------- Overlaid Data types ----------
1745 */
1746 
1747 
1748 /*
1749     "Small"  chunks are stored in circular doubly-linked lists, and look
1750     like this:
1751 
1752     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1753             |             Size of previous chunk                            |
1754             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1755     `head:' |             Size of chunk, in bytes                         |P|
1756       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1757             |             Forward pointer to next chunk in list             |
1758             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1759             |             Back pointer to previous chunk in list            |
1760             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1761             |             Unused space (may be 0 bytes long)                .
1762             .                                                               .
1763             .                                                               |
1764 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1765     `foot:' |             Size of chunk, in bytes                           |
1766             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1767 
1768   Larger chunks are kept in a form of bitwise digital trees (aka tries)
1769   keyed on chunksizes, in which
1770 
1771     * As seen as trees, they contains no duplicates (i.e., no
1772       duplicate sizes). If a chunk with the same size an an existing
1773       node is inserted, it is linked off the existing node using
1774       pointers that work in the same way as fd/bk pointers
1775       of small chunks
1776 
1777     * The decision to go left or right when searching is based on a
1778       sliding bit, starting at the most significant bit distinguishing
1779       sizes in the tree, and sliding right each level. All left
1780       children of a node are smaller than all right children, but not
1781       necessarily smaller than the node.
1782 
1783   The worst case number of steps to add or remove a node is thus
1784   bounded by the number of bits differentiating chunks within
1785   bins. Under current bin calculations, this ranges from 6 up to 21
1786   (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
1787   is of course much better.
1788 
1789   Tree chunks are overlaid in the same way as small chunks. Because
1790   malloc_tree_chunks are only for free chunks greater than 256 bytes, their
1791   zie doesn;t impose any constraints on user chunk sizes.
1792 
1793     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1794             |             Size of previous chunk                            |
1795             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1796     `head:' |             Size of chunk, in bytes                         |P|
1797       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1798             |             Forward pointer to next chunk of same size        |
1799             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1800             |             Back pointer to previous chunk of same size       |
1801             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1802             |             Pointer to left child (child[0])                  |
1803             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1804             |             Pointer to right child (child[1])                 |
1805             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1806             |             Pointer to parent                                 |
1807             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1808             |             bin index of this chunk                           |
1809             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1810             |             Unused space                                      .
1811             .                                                               |
1812 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1813     `foot:' |             Size of chunk, in bytes                           |
1814             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1815 
1816 */
1817 
1818 
1819 struct malloc_tree_chunk {
1820   INTERNAL_SIZE_T      prev_size;           /* same as in malloc_chunk */
1821   INTERNAL_SIZE_T      size;
1822 
1823   struct malloc_tree_chunk* fd;    /* double links -- used only if free. */
1824   struct malloc_tree_chunk* bk;
1825 
1826   struct malloc_tree_chunk* child[2];
1827   struct malloc_tree_chunk* parent;
1828 
1829   bin_index_t index;
1830 };
1831 
1832 typedef struct malloc_tree_chunk* tchunkptr;
1833 
1834 typedef struct malloc_tree_chunk* tbinptr;
1835 
1836 
1837 
1838 
1839 /*
1840   -------------------- Internal data structures --------------------
1841 
1842    All internal state is held in an instance of malloc_state defined
1843    below. There are no other static variables, except in two optional
1844    cases:
1845    * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1846    * If HAVE_MMAP is true, but mmap doesn't support
1847      MAP_ANONYMOUS, a dummy file descriptor for mmap.
1848 
1849    Beware of lots of tricks that minimize the total bookkeeping space
1850    requirements. The result is a little under 1K bytes (for 4byte
1851    pointers and size_t.)
1852 */
1853 
1854 /*
1855   SmallBins
1856 
1857     An array of bin headers for free chunks. Most bins hold sizes that are
1858     unusual as malloc request sizes, but are more usual for fragments
1859     and consolidated sets of chunks, which is what these bins hold, so
1860     they can be found quickly.  All procedures maintain the invariant
1861     that no consolidated chunk physically borders another one, so each
1862     chunk in a list is known to be preceeded and followed by either
1863     inuse chunks or the ends of memory.
1864 
1865     To simplify use in double-linked lists, each bin header acts as a
1866     malloc_chunk pointing to the real first node, if it exists (else
1867     juct pointing to itself).  This avoids special-casing for headers.
1868     But to conserve space and improve locality, we allocate only the
1869     fd/bk pointers of bins, and then use repositioning tricks to treat
1870     these as the fields of a malloc_chunk*.
1871 
1872 */
1873 
1874 typedef struct malloc_chunk* mbinptr;
1875 
1876 /* addressing  */
1877 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
1878 
1879 /* analog of ++bin */
1880 #define next_bin(b)  ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
1881 
1882 /* inverse of bin_at */
1883 #define bin2idx(m, b) \
1884   ( ((int) (((char*)(b)) - (char*)(bin_at(m,0)))) / (2 * sizeof(mchunkptr)))
1885 
1886 /*
1887   Indexing
1888 
1889     Bins for sizes < MIN_TREEBIN_SIZE bytes contain chunks of all the
1890     same size, spaced 8 bytes apart. Larger bins are approximately
1891     logarithmically spaced.
1892 */
1893 
1894 #define NBINS              32
1895 #define SMALLBIN_WIDTH      8
1896 #define MIN_TREEBIN_SIZE  256
1897 /* bit-shift corresponding to MIN_TREEBIN_SIZE */
1898 #define TREE_BIN_SHIFT      8
1899 
1900 #define in_smallbin_range(sz)  \
1901   ((CHUNK_SIZE_T)(sz) <= (CHUNK_SIZE_T)(MIN_TREEBIN_SIZE-1))
1902 
1903 
1904 #define MAX_SMALL_REQUEST (MIN_TREEBIN_SIZE-(SIZE_SZ+MALLOC_ALIGNMENT))
1905 
1906 #define smallbin_index(sz)       (bin_index_t)((sz) >> 3)
1907 
1908 #define size_for_smallindex(i)   ((CHUNK_SIZE_T)(i) << 3)
1909 
1910 #define MIN_SMALLBIN_INDEX       (smallbin_index(MINSIZE))
1911 
1912 #define MIN_SMALLBIN_BIT         (idx2bit(smallbin_index(MINSIZE)))
1913 
1914 
1915 /*
1916    There are 2 equally spaced treebins for each power of two from
1917    TREE_BIN_SHIFT to TREE_BIN_SHIFT+16. The last bin holds anything
1918    larger.
1919 */
1920 
1921 
treebin_index(CHUNK_SIZE_T sz)1922 static bin_index_t treebin_index(CHUNK_SIZE_T sz) {
1923   unsigned int  m;
1924   unsigned int x = sz >> TREE_BIN_SHIFT;
1925   if (x == 0)
1926     return 0;
1927   if (x > 0xFFFF)
1928     return NBINS-1;
1929 
1930   /* On intel, use BSRL instruction to find highest bit */
1931 #if defined(__GNUC__) && defined(i386)
1932 
1933   __asm__("bsrl %1,%0\n\t"
1934           : "=r" (m)
1935           : "g"  (x));
1936     return (m << 1) + ((sz >> (m + (TREE_BIN_SHIFT-1)) & 1));
1937 
1938 #else
1939   {
1940     /*
1941       Based on branch-free nlz algorithm in chapter 5 of Henry
1942       S. Warren Jr's book "Hacker's Delight".
1943     */
1944 
1945     unsigned int n = ((x - 0x100) >> 16) & 8;
1946     x <<= n;
1947     m = ((x - 0x1000) >> 16) & 4;
1948     n += m;
1949     x <<= m;
1950     m = ((x - 0x4000) >> 16) & 2;
1951     n += m;
1952     x = (x << m) >> 14;
1953     m = 13 - n + (x & ~(x>>1));
1954     /* shift up n and use the next bit to make finer-granularity bins. */
1955     return (m << 1) + ((sz >> (m + (TREE_BIN_SHIFT-1)) & 1));
1956   }
1957 #endif
1958 }
1959 
bit2idx(bitmap_t x)1960 static bin_index_t bit2idx(bitmap_t x) {
1961 #if defined(__GNUC__) && defined(i386)
1962   int r;
1963   __asm__("bsfl %1,%0\n\t"
1964 	 : "=r" (r) : "g" (x));
1965   return (bin_index_t)r;
1966 #else
1967   return (bin_index_t)(ffs(x)-1);
1968 #endif
1969 }
1970 
1971 
1972 
1973 #define bin_index(sz) \
1974  ((in_smallbin_range(sz)) ? smallbin_index(sz) : treebin_index(sz))
1975 
1976 /*
1977   The most significant bit distinguishing nodes in the tree
1978   associated with a given bin
1979 */
1980 
1981 #define CHUNK_SIZE_BITS (sizeof(CHUNK_SIZE_T) * 8)
1982 
1983 #define bitshift_for_index(idx) \
1984   (idx == NBINS-1)? \
1985     CHUNK_SIZE_BITS-1 : \
1986     (((idx) >> 1) + TREE_BIN_SHIFT-2)
1987 
1988 #define tbin_at(m,i) (&((m)->treebins[i]))
1989 
1990 #define minsize_for_treeindex(i)                             \
1991     (((CHUNK_SIZE_T)(1) << (((i) >> 1) + TREE_BIN_SHIFT)) |  \
1992     (((CHUNK_SIZE_T)((i) & 1)) <<                            \
1993      ( ( (i) >> 1) + TREE_BIN_SHIFT - 1)))
1994 
1995 
1996 
1997 #define is_tbin(M, P) ((tbinptr*)(P) >= &((M)->treebins[0]) && \
1998                        (tbinptr*)(P) <  &((M)->treebins[NBINS]))
1999 
2000 #define leftmost_child(t) \
2001   (((t)->child[0] != 0)? ((t)->child[0]) : ((t)->child[1]))
2002 
2003 
2004 /*
2005   Fastbins
2006 
2007     An array of lists holding recently freed small chunks.  Fastbins
2008     are not doubly linked.  It is faster to single-link them, and
2009     since chunks are never removed from the middles of these lists,
2010     double linking is not necessary. Also, unlike regular bins, they
2011     are not even processed in FIFO order (they use faster LIFO) since
2012     ordering doesn't much matter in the transient contexts in which
2013     fastbins are normally used.
2014 
2015     Chunks in fastbins keep their inuse bit set, so they cannot
2016     be consolidated with other free chunks. malloc_consolidate
2017     releases all chunks in fastbins and consolidates them with
2018     other free chunks.
2019 */
2020 
2021 typedef struct malloc_chunk* mfastbinptr;
2022 
2023 #define fastbin_at(m,i) (&((m)->fastbins[i]))
2024 
2025 /* offset 2 to use otherwise unindexable first 2 bins */
2026 #define fastbin_index(sz)        ((((unsigned int)(sz)) >> 3) - 2)
2027 
2028 
2029 /* The maximum fastbin request size we support */
2030 #define MAX_FAST_REQUEST     64
2031 
2032 #define MAX_FAST_SIZE (request2size(MAX_FAST_REQUEST))
2033 
2034 #define NFASTBINS  (fastbin_index(MAX_FAST_SIZE)+1)
2035 
2036 /*
2037   FASTCHUNKS_BIT held in max_fast indicates that there are probably
2038   some fastbin chunks. It is set true on entering a chunk into any
2039   fastbin, and cleared only in malloc_consolidate.
2040 */
2041 
2042 #define FASTCHUNKS_BIT        (2U)
2043 
2044 #define have_fastchunks(M)   (((M)->max_fast &  FASTCHUNKS_BIT))
2045 #define set_fastchunks(M)    ((M)->max_fast |=  (FASTCHUNKS_BIT))
2046 #define clear_fastchunks(M)  ((M)->max_fast &= ~(FASTCHUNKS_BIT))
2047 
2048 /*
2049    Set value of max_fast.
2050    Use impossibly small value if 0.
2051 */
2052 
2053 #define set_max_fast(M, s) \
2054   (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2055   ((M)->max_fast &  (FASTCHUNKS_BIT))
2056 
2057 #define get_max_fast(M) \
2058   ((M)->max_fast & ~(FASTCHUNKS_BIT))
2059 
2060 
2061 /*
2062   Top
2063 
2064     The top-most available chunk (i.e., the one bordering the end of
2065     available memory) is treated specially. It is never included in
2066     any bin, is used only if no other chunk is available, and is
2067     released back to the system if it is very large (see
2068     M_TRIM_THRESHOLD).
2069 
2070     Top initially points to a dummy bin with zero size, thus forcing
2071     extension on the first malloc request, so we avoid having any
2072     special code in malloc to check whether it even exists yet.
2073 */
2074 
2075 
2076 /*
2077   Binmap
2078 
2079     To help compensate for the large number of bins, a one-level index
2080     structure is used for bin-by-bin searching.  `binmap' is a
2081     bitvector recording whether bins are definitely empty so they can
2082     be skipped over during during traversals.
2083 */
2084 
2085 /* Conservatively use 32 bits per map word, even if on 64bit system */
2086 
2087 #define idx2bit(i)           ((bitmap_t)(1) << (i))
2088 
2089 #define mark_smallbin(m,i)   ((m)->smallbits |= idx2bit(i))
2090 #define mark_treebin(m,i)    ((m)->treebits  |= idx2bit(i))
2091 
2092 #define clear_smallbin(m,i)  ((m)->smallbits &= ~idx2bit(i))
2093 #define clear_treebin(m,i)   ((m)->treebits  &= ~idx2bit(i))
2094 
2095 
2096 /* isolate the least set bit of a bitmap */
2097 
2098 #define least_bit(x)         ((x) & -(x))
2099 
2100 /* create mask with all bits to left of least bit of x on */
2101 
2102 #define left_bits(x)         ((x<<1) | -(x<<1))
2103 
2104 /* create mask with all bits to left of or equal to least bit of x on */
2105 
2106 #define same_or_left_bits(x) ((x) | -(x))
2107 
2108 
2109 /*
2110   sysctl is a status word holding dynamically discovered
2111   or controlled properties of the morecore function
2112 */
2113 
2114 #define MORECORE_CONTIGUOUS_BIT  (1U)
2115 #define TRIM_DISABLE_BIT         (2U)
2116 #define MMAP_DISABLE_BIT         (4U)
2117 
2118 #define contiguous(M) \
2119         (((M)->sysctl &  MORECORE_CONTIGUOUS_BIT))
2120 #define set_contiguous(M) \
2121         ((M)->sysctl |=  MORECORE_CONTIGUOUS_BIT)
2122 #define set_noncontiguous(M) \
2123         ((M)->sysctl &= ~MORECORE_CONTIGUOUS_BIT)
2124 
2125 #define disable_trim(M) \
2126         ((M)->sysctl |=  TRIM_DISABLE_BIT)
2127 #define enable_trim(M) \
2128         ((M)->sysctl &=  ~TRIM_DISABLE_BIT)
2129 #define trim_disabled(M) \
2130         ((M)->sysctl & TRIM_DISABLE_BIT)
2131 
2132 #define enable_mmap(M) \
2133         ((M)->sysctl &=  ~MMAP_DISABLE_BIT)
2134 #define disable_mmap(M) \
2135         ((M)->sysctl |=  MMAP_DISABLE_BIT)
2136 #define mmap_disabled(M) \
2137         ((M)->sysctl &   MMAP_DISABLE_BIT)
2138 
2139 
2140 
2141 
2142 /*
2143    ----------- Internal state representation and initialization -----------
2144 */
2145 
2146 struct malloc_state {
2147   /* The maximum chunk size to be eligible for fastbin */
2148   CHUNK_SIZE_T     max_fast;
2149 
2150   /* Bitmap of bins */
2151   bitmap_t         smallbits;
2152   bitmap_t         treebits;
2153 
2154   /* Base of the topmost chunk -- not otherwise kept in a bin */
2155   mchunkptr        top;
2156 
2157   /* Fastbins */
2158   mfastbinptr      fastbins[NFASTBINS];
2159 
2160   /* Smallbins packed as described above */
2161   mchunkptr        bins[NBINS * 2];
2162 
2163   /* Treebins */
2164   tbinptr          treebins[NBINS];
2165 
2166   /* Padding to allow addressing past end of treebin array */
2167   struct malloc_tree_chunk initial_top;
2168 
2169   /* Tunable parameters */
2170   CHUNK_SIZE_T     trim_threshold;
2171   INTERNAL_SIZE_T  top_pad;
2172   INTERNAL_SIZE_T  mmap_threshold;
2173 
2174   /* Memory map support */
2175   int              n_mmaps;
2176   int              n_mmaps_max;
2177   int              max_n_mmaps;
2178 
2179   /* Cache malloc_getpagesize */
2180   unsigned int     pagesize;
2181 
2182   /* Track properties of MORECORE */
2183   unsigned int     sysctl;
2184 
2185   /* Statistics */
2186   INTERNAL_SIZE_T  mmapped_mem;
2187   INTERNAL_SIZE_T  sbrked_mem;
2188   INTERNAL_SIZE_T  max_sbrked_mem;
2189   INTERNAL_SIZE_T  max_mmapped_mem;
2190   INTERNAL_SIZE_T  max_total_mem;
2191 };
2192 
2193 typedef struct malloc_state *mstate;
2194 
2195 /*
2196    There is exactly one instance of this struct in this malloc.
2197    If you are adapting this malloc in a way that does NOT use a static
2198    malloc_state, you MUST explicitly zero-fill it before using. This
2199    malloc relies on the property that malloc_state is initialized to
2200    all zeroes (as is true of C statics).
2201 */
2202 
2203 static struct malloc_state av_;  /* never directly referenced */
2204 
2205 /*
2206    All uses of av_ are via get_malloc_state().
2207    At most one "call" to get_malloc_state is made per invocation of
2208    the public versions of malloc and free, but other routines
2209    that in turn invoke malloc and/or free may call more then once.
2210    Also, it is called in check* routines if DEBUG is set.
2211 */
2212 
2213 #define get_malloc_state() (&(av_))
2214 
2215 /*
2216   Initialize a malloc_state struct. This is called only
2217   in sysmalloc, to avoid it being inlined everywhere else,
2218   which causes useless code bloat.
2219 */
2220 
malloc_init_state(mstate av)2221 static void malloc_init_state(mstate av) {
2222   int     i;
2223   mbinptr bin;
2224 
2225   /* Establish circular links for bins */
2226   for (i = 0; i < NBINS; ++i) {
2227     bin = bin_at(av,i);
2228     bin->fd = bin->bk = bin;
2229   }
2230 
2231   av->top_pad        = DEFAULT_TOP_PAD;
2232   av->n_mmaps_max    = DEFAULT_MMAP_MAX;
2233   av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
2234   av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
2235 
2236 #if MORECORE_CONTIGUOUS
2237   set_contiguous(av);
2238 #else
2239   set_noncontiguous(av);
2240 #endif
2241 
2242   set_max_fast(av, DEFAULT_MXFAST);
2243 
2244   av->top = (mchunkptr)(&(av->initial_top));
2245   av->pagesize  = malloc_getpagesize;
2246 }
2247 
2248 #define ensure_initialization(M) \
2249   if ((M)->top == 0) sysmalloc(M, 0);
2250 
2251 
2252 /*
2253    Other internal utilities
2254 */
2255 
2256 static Void_t*  sysmalloc(mstate, CHUNK_SIZE_T);
2257 static int  systrim(mstate, size_t);
2258 static Void_t** iALLOc(size_t, size_t*, int, Void_t**);
2259 static void insert_treenode(mstate, tchunkptr, CHUNK_SIZE_T);
2260 #if 0
2261 static void unlink_treenode(mstate, tchunkptr);
2262 static void unlink_small_chunk(mstate av, mchunkptr p, CHUNK_SIZE_T size);
2263 #endif
2264 static void transfer_tree_links(tchunkptr oldt, tchunkptr newt);
2265 static tchunkptr find_replacement(tchunkptr t);
2266 static void unlink_chained_node(tchunkptr t);
2267 static void insert_small_chunk(mstate av, mchunkptr p, CHUNK_SIZE_T nb);
2268 static void insert_chunk(mstate av, mchunkptr p, CHUNK_SIZE_T nb);
2269 static mchunkptr take_from_smallbin(mstate av, mchunkptr bin, bitmap_t bit);
2270 static void unlink_chunk(mstate av, mchunkptr p, CHUNK_SIZE_T size);
2271 static void malloc_consolidate(mstate);
2272 
2273 
2274 #pragma no_inline(systrim)
2275 
2276 
2277 #if HAVE_MMAP
2278 static mchunkptr mmap_malloc(mstate, INTERNAL_SIZE_T);
2279 #endif
2280 
2281 /*
2282   Debugging support
2283 
2284   These routines make a number of assertions about the states
2285   of data structures that should be true at all times. If any
2286   are not true, it's very likely that a user program has somehow
2287   trashed memory. (It's also possible that there is a coding error
2288   in malloc. In which case, please report it!)
2289 */
2290 
2291 
2292 #if ! DEBUG
2293 
2294 #define check_chunk(P)
2295 #define check_free_chunk(P)
2296 #define check_inuse_chunk(P)
2297 #define check_remalloced_chunk(P,N)
2298 #define check_malloced_chunk(P,N)
2299 #define check_malloc_state(M)
2300 #define check_tree(P)
2301 
2302 #else
2303 #define check_chunk(P)              do_check_chunk(P)
2304 #define check_free_chunk(P)         do_check_free_chunk(P)
2305 #define check_inuse_chunk(P)        do_check_inuse_chunk(P)
2306 #define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N)
2307 #define check_malloced_chunk(P,N)   do_check_malloced_chunk(P,N)
2308 #define check_tree(P)          do_check_tree(P)
2309 #define check_malloc_state(M)       do_check_malloc_state(M)
2310 
2311 static void do_check_malloc_state(mstate);
2312 
2313 /*
2314   Find x in a treebin. Used in other check functions.
2315 */
2316 
tree_find(tchunkptr x)2317 static tchunkptr tree_find(tchunkptr x) {
2318   mstate av = get_malloc_state();
2319   CHUNK_SIZE_T nb = chunksize(x);
2320   bin_index_t idx = treebin_index(nb);
2321   tbinptr* bin = tbin_at(av, idx);
2322   tchunkptr t = *bin;
2323   bin_index_t shift = bitshift_for_index(idx);
2324   CHUNK_SIZE_T allbits = 0;
2325 
2326   if (t == 0)
2327     return 0;
2328 
2329   while (t != 0) {
2330     CHUNK_SIZE_T size;
2331     if (t == x)
2332       return t;
2333     size = chunksize(t);
2334     assert((size & allbits) == allbits);
2335     if (size == nb) {
2336       tchunkptr p = t->bk;
2337       for (;;) {
2338         if (p == x)
2339           return p;
2340         else if (p == t)
2341           return 0;
2342         else
2343           p = p->bk;
2344       }
2345     }
2346     if (((nb >> shift) & 1) == 0) {
2347       t = t->child[0];
2348     }
2349     else {
2350       t = t->child[1];
2351       allbits |= 1U << shift;
2352     }
2353 
2354     --shift;
2355   }
2356   return 0;
2357 }
2358 
2359 /*
2360   Properties of all chunks
2361 */
2362 
do_check_chunk(mchunkptr p)2363 static void do_check_chunk(mchunkptr p) {
2364   mstate av = get_malloc_state();
2365   CHUNK_SIZE_T  sz = chunksize(p);
2366   /* min and max possible addresses assuming contiguous allocation */
2367   char* max_address = (char*)(av->top) + chunksize(av->top);
2368   char* min_address = max_address - av->sbrked_mem;
2369 
2370   if (!chunk_is_mmapped(p)) {
2371 
2372     /* Has legal address ... */
2373     if (p != av->top) {
2374       if (contiguous(av)) {
2375         assert(((char*)p) >= min_address);
2376         assert(((char*)p + sz) <= ((char*)(av->top)));
2377       }
2378     }
2379     else {
2380       /* top size is always at least MINSIZE */
2381       assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
2382       /* top predecessor always marked inuse */
2383       assert(prev_inuse(p));
2384     }
2385 
2386   }
2387   else {
2388 #if HAVE_MMAP
2389     /* address is outside main heap  */
2390     if (contiguous(av) && av->top != (mchunkptr)(&(av->initial_top))) {
2391       assert(((char*)p) < min_address || ((char*)p) > max_address);
2392     }
2393     /* chunk is page-aligned */
2394     assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
2395     /* mem is aligned */
2396     assert(aligned_OK(chunk2mem(p)));
2397 #else
2398     /* force an appropriate assert violation if debug set */
2399     assert(!chunk_is_mmapped(p));
2400 #endif
2401   }
2402 }
2403 
check_all_less(tchunkptr t,CHUNK_SIZE_T nb)2404 static void check_all_less(tchunkptr t, CHUNK_SIZE_T nb) {
2405   if (t == 0)
2406     return;
2407   assert(chunksize(t) < nb);
2408   check_all_less(t->child[0], nb);
2409   check_all_less(t->child[1], nb);
2410 }
2411 
check_all_greater(tchunkptr t,CHUNK_SIZE_T nb)2412 static void check_all_greater(tchunkptr t, CHUNK_SIZE_T nb) {
2413   if (t == 0)
2414     return;
2415   assert(chunksize(t) >= nb);
2416   check_all_greater(t->child[0], nb);
2417   check_all_greater(t->child[1], nb);
2418 }
2419 
2420 
check_tree_fields(tchunkptr t)2421 static INTERNAL_SIZE_T check_tree_fields(tchunkptr t) {
2422   INTERNAL_SIZE_T size = chunksize(t);
2423   assert(size >= MIN_TREEBIN_SIZE);
2424   do_check_chunk(((mchunkptr)t));
2425   assert(!inuse(t));
2426   assert(t->fd->bk == t);
2427   assert(t->bk->fd == t);
2428   assert(t->parent != t);
2429   assert(t->child[0] != t);
2430   assert(t->child[1] != t);
2431   if (t->child[0] != 0 && t->child[1] != 0) {
2432     check_all_less(t->child[0], chunksize(t->child[1]));
2433     check_all_greater(t->child[1], chunksize(t->child[0]));
2434   }
2435   return size;
2436 }
2437 
do_check_tree(tchunkptr t)2438 static INTERNAL_SIZE_T do_check_tree(tchunkptr t) {
2439   tchunkptr p;
2440   tchunkptr h;
2441   INTERNAL_SIZE_T total = check_tree_fields(t);
2442 
2443   /* If t is on a same-sized list, another node on list must have a parent */
2444   if (t->parent == 0) {
2445     h = t->bk;
2446     while (h != t && h->parent == 0)
2447       h = h->bk;
2448     assert(h != t);
2449   }
2450   else
2451     h = t;
2452 
2453   assert (h->parent->child[0] == h ||
2454           h->parent->child[1] == h ||
2455           *((tbinptr*)(h->parent)) == h);
2456 
2457 
2458   /* only one node on a same-sized list has parent or children */
2459   p = h->bk;
2460   while (p != h) {
2461     assert(p->child[0] == 0);
2462     assert(p->child[1] == 0);
2463     assert(p->parent == 0);
2464     assert(chunksize(p) == chunksize(h));
2465     total += check_tree_fields(p);
2466     p = p->bk;
2467   }
2468 
2469   if (h->child[0] != 0) {
2470     assert(h->child[0]->parent == h);
2471     total += do_check_tree(h->child[0]);
2472   }
2473 
2474   if (h->child[1] != 0) {
2475     assert(h->child[1]->parent == h);
2476     total += do_check_tree(h->child[1]);
2477   }
2478 
2479   return total;
2480 }
2481 
do_check_links(mchunkptr p)2482 static void do_check_links(mchunkptr p) {
2483   if (in_smallbin_range(chunksize(p))) {
2484     assert(p->fd->bk == p);
2485     assert(p->bk->fd == p);
2486   }
2487   else {
2488     do_check_tree((tchunkptr)p);
2489   }
2490 }
2491 
2492 /*
2493   Properties of free chunks
2494 */
2495 
do_check_free_chunk(mchunkptr p)2496 static void do_check_free_chunk(mchunkptr p) {
2497   mstate av = get_malloc_state();
2498 
2499   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
2500   mchunkptr next = chunk_at_offset(p, sz);
2501 
2502   do_check_chunk(p);
2503 
2504   /* Chunk must claim to be free ... */
2505   assert(!inuse(p));
2506   assert (!chunk_is_mmapped(p));
2507 
2508   /* Unless a special marker, must have OK fields */
2509   if ((CHUNK_SIZE_T)(sz) >= MINSIZE)
2510   {
2511     assert((sz & MALLOC_ALIGN_MASK) == 0);
2512     assert(aligned_OK(chunk2mem(p)));
2513     /* ... matching footer field */
2514     assert(next->prev_size == sz);
2515     /* ... and is fully consolidated */
2516     assert(prev_inuse(p));
2517     assert (next == av->top || inuse(next));
2518 
2519     do_check_links(p);
2520   }
2521   else /* markers are always of size SIZE_SZ */
2522     assert(sz == SIZE_SZ);
2523 }
2524 
2525 /*
2526   Properties of inuse chunks
2527 */
2528 
do_check_inuse_chunk(mchunkptr p)2529 static void do_check_inuse_chunk(mchunkptr p) {
2530   mstate av = get_malloc_state();
2531   mchunkptr next;
2532   do_check_chunk(p);
2533 
2534   if (chunk_is_mmapped(p))
2535     return; /* mmapped chunks have no next/prev */
2536 
2537   /* Check whether it claims to be in use ... */
2538   assert(inuse(p));
2539 
2540   next = next_chunk(p);
2541 
2542   /* ... and is surrounded by OK chunks.
2543     Since more things can be checked with free chunks than inuse ones,
2544     if an inuse chunk borders them and debug is on, it's worth doing them.
2545   */
2546   if (!prev_inuse(p))  {
2547     /* Note that we cannot even look at prev unless it is not inuse */
2548     mchunkptr prv = prev_chunk(p);
2549     assert(next_chunk(prv) == p);
2550     do_check_free_chunk(prv);
2551   }
2552 
2553   if (next == av->top) {
2554     assert(prev_inuse(next));
2555     assert(chunksize(next) >= MINSIZE);
2556   }
2557   else if (!inuse(next))
2558     do_check_free_chunk(next);
2559 }
2560 
2561 
do_check_remalloced_chunk(mchunkptr p,INTERNAL_SIZE_T s)2562 static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) {
2563   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
2564 
2565   do_check_inuse_chunk(p);
2566 
2567   /* Legal size ... */
2568   assert((sz & MALLOC_ALIGN_MASK) == 0);
2569   assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
2570   /* ... and alignment */
2571   assert(aligned_OK(chunk2mem(p)));
2572   /* chunk is less than MINSIZE more than request */
2573   assert((long)(sz) - (long)(s) >= 0);
2574   assert((long)(sz) - (long)(s + MINSIZE) < 0);
2575 }
2576 
2577 /*
2578   Properties of nonrecycled chunks at the point they are malloced
2579 */
2580 
do_check_malloced_chunk(mchunkptr p,INTERNAL_SIZE_T s)2581 static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) {
2582   /* same as recycled case ... */
2583   do_check_remalloced_chunk(p, s);
2584 #if 0
2585   do_check_malloc_state(get_malloc_state());
2586 #endif
2587 
2588   /*
2589     ... plus,  must obey implementation invariant that prev_inuse is
2590     always true of any allocated chunk; i.e., that each allocated
2591     chunk borders either a previously allocated and still in-use
2592     chunk, or the base of its memory arena. This is ensured
2593     by making all allocations from the the `lowest' part of any found
2594     chunk.
2595   */
2596 
2597   assert(prev_inuse(p));
2598 }
2599 
2600 
do_check_smallbin(mstate av,int i,mbinptr b)2601 static CHUNK_SIZE_T do_check_smallbin(mstate av, int i, mbinptr b) {
2602   mchunkptr p = b->bk;
2603   mchunkptr q;
2604   bin_index_t idx;
2605   INTERNAL_SIZE_T size;
2606   CHUNK_SIZE_T  total = 0;
2607   unsigned int empty = (av->smallbits & (1 << i)) == 0;
2608 
2609   if (i >= 2) {
2610     if (empty)
2611       assert(p == b);
2612     if (p == b)
2613       assert(empty);
2614 
2615     if (p != b)
2616       assert(!empty);
2617   }
2618 
2619   for (; p != b; p = p->bk) {
2620     /* each chunk claims to be free */
2621     do_check_free_chunk(p);
2622     size = chunksize(p);
2623     total += size;
2624     if (i >= 2) {
2625       /* chunk belongs in bin */
2626       idx = smallbin_index(size);
2627       assert(idx == i);
2628       assert(p->bk == b ||
2629              (CHUNK_SIZE_T)chunksize(p->bk) ==
2630              (CHUNK_SIZE_T)chunksize(p));
2631     }
2632     /* chunk is followed by a legal chain of inuse chunks */
2633     for (q = next_chunk(p);
2634          (q != av->top && inuse(q) &&
2635           (CHUNK_SIZE_T)(chunksize(q)) >= MINSIZE);
2636          q = next_chunk(q))
2637       do_check_inuse_chunk(q);
2638   }
2639 
2640   return total;
2641 }
2642 
2643 /*
2644   Properties of malloc_state.
2645 
2646   This may be useful for debugging malloc, as well as detecting user
2647   programmer errors that somehow write into malloc_state.
2648 
2649   If you are extending or experimenting with this malloc, you can
2650   probably figure out how to hack this routine to print out or
2651   display chunk addresses, sizes, bins, and other instrumentation.
2652 */
2653 
do_check_malloc_state(mstate av)2654 static void do_check_malloc_state(mstate av) {
2655   int i;
2656   mbinptr b;
2657   CHUNK_SIZE_T  total = 0;
2658   tchunkptr t;
2659   unsigned int empty;
2660   tbinptr* tb;
2661   int max_fast_bin;
2662   mchunkptr p;
2663 
2664   /* internal size_t must be no wider than pointer type */
2665   assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2666 
2667   /* alignment is a power of 2 */
2668   assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2669 
2670   /* cannot run remaining checks until fully initialized */
2671   if (av->top == 0 || av->top == (mchunkptr)(&(av->initial_top)))
2672     return;
2673 
2674   /* pagesize is a power of 2 */
2675   assert((av->pagesize & (av->pagesize-1)) == 0);
2676 
2677   /* check smallbins */
2678   for (i = 1; i < NBINS; ++i) {
2679     b = bin_at(av, i);
2680     total += do_check_smallbin(av, i, b);
2681   }
2682 
2683   /* check treebins */
2684   for (i = 0; i < NBINS; ++i) {
2685     tb = tbin_at(av, i);
2686     t = *tb;
2687     empty = (av->treebits & (1 << i)) == 0;
2688     if (t == 0)
2689       assert(empty);
2690     else if (t != 0) {
2691       assert(!empty);
2692       total += do_check_tree(t);
2693     }
2694   }
2695 
2696 
2697   /* top chunk is OK */
2698   check_chunk(av->top);
2699   /* top not in tree */
2700   if (!in_smallbin_range(chunksize(av->top)))
2701     assert(tree_find((tchunkptr)(av->top)) == 0);
2702 
2703   /* max_fast is in allowed range */
2704   assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE));
2705 
2706   max_fast_bin = fastbin_index(av->max_fast);
2707 
2708   for (i = 0; i < NFASTBINS; ++i) {
2709     p = av->fastbins[i];
2710 
2711     /* all bins past max_fast are empty */
2712     if (i > max_fast_bin)
2713       assert(p == 0);
2714 
2715     while (p != 0) {
2716       /* each chunk claims to be inuse */
2717       do_check_inuse_chunk(p);
2718       total += chunksize(p);
2719       /* chunk belongs in this bin */
2720       assert(fastbin_index(chunksize(p)) == i);
2721       p = p->fd;
2722     }
2723   }
2724 
2725   /* sanity checks for statistics */
2726 
2727   assert(total <= (CHUNK_SIZE_T)(av->max_total_mem));
2728 
2729   assert(av->n_mmaps >= 0);
2730   assert(av->n_mmaps <= av->n_mmaps_max);
2731   assert(av->n_mmaps <= av->max_n_mmaps);
2732 
2733   assert((CHUNK_SIZE_T)(av->sbrked_mem) <=
2734          (CHUNK_SIZE_T)(av->max_sbrked_mem));
2735 
2736   assert((CHUNK_SIZE_T)(av->mmapped_mem) <=
2737          (CHUNK_SIZE_T)(av->max_mmapped_mem));
2738 
2739   assert((CHUNK_SIZE_T)(av->max_total_mem) >=
2740          (CHUNK_SIZE_T)(av->mmapped_mem) + (CHUNK_SIZE_T)(av->sbrked_mem));
2741 }
2742 #endif
2743 
2744 
2745 
2746 /*
2747    ----------- Operations on trees -----------
2748 */
2749 
2750 /*
2751   Insert chunk into corresponding list or tree
2752 */
2753 
insert_treenode(mstate av,tchunkptr x,CHUNK_SIZE_T nb)2754 static void insert_treenode(mstate av, tchunkptr x,
2755                             CHUNK_SIZE_T nb) {
2756   bin_index_t idx = treebin_index(nb);
2757   tbinptr* bin = tbin_at(av, idx);
2758   tchunkptr t = *bin;
2759 
2760   x->child[0] = 0;
2761   x->child[1] = 0;
2762   x->index = idx;
2763 
2764   if (t == 0) {
2765     av->treebits |= idx2bit(idx);
2766     *bin = x;
2767     x->parent = (tchunkptr)bin;
2768     x->fd = x;
2769     x->bk = x;
2770   }
2771   else {
2772     bin_index_t shift = bitshift_for_index(idx);
2773     tchunkptr b;
2774     check_tree(t);
2775 
2776     while (chunksize(t) != nb) {
2777       tchunkptr* pchild = &(t->child[(nb >> shift--) & 1]);
2778       if (*pchild != 0) {
2779         t = *pchild;
2780       }
2781       else {
2782         *pchild = x;
2783         x->parent = t;
2784         x->fd = x;
2785         x->bk = x;
2786         return;
2787       }
2788     }
2789     /* Link in same-sized node */
2790     b = t->bk;
2791     x->parent = 0;
2792     x->fd = t;
2793     x->bk = b;
2794     b->fd = t->bk = x;
2795   }
2796 }
2797 
transfer_tree_links(tchunkptr oldt,tchunkptr newt)2798 static void transfer_tree_links(tchunkptr oldt, tchunkptr newt) {
2799   tchunkptr p = oldt->parent;
2800   newt->parent = p;
2801 
2802   if (p->child[0] == oldt)
2803     p->child[0] = newt;
2804   else if (p->child[1] == oldt)
2805     p->child[1] = newt;
2806   else
2807     *((tbinptr*)p) = newt;
2808 
2809   if ( (newt->child[0] = oldt->child[0]) != 0)
2810     newt->child[0]->parent = newt;
2811 
2812   if ( (newt->child[1] = oldt->child[1]) != 0)
2813     newt->child[1]->parent = newt;
2814 }
2815 
find_replacement(tchunkptr t)2816 static tchunkptr find_replacement(tchunkptr t) {
2817   /*
2818      Unless t is itself a leaf node, we must replace t with a leaf
2819      node (not merely one with an open left or right, as with binary
2820      search trees), to make sure that lefts and rights of descendents
2821      correspond properly to bit masks.  We use the leftmost leaf of
2822      right child, or, if there is no right child, the rightmost leaf
2823      of left child.  We could use any other leaf, but these choices
2824      will tend to maintain nicer trees.
2825   */
2826   tchunkptr* cp;
2827   tchunkptr c;
2828   if ((c = *(cp = &(t->child[1]))) == 0)
2829     if ((c = *(cp = &(t->child[0]->child[1]))) == 0)
2830       c = *(cp = &(t->child[0]));
2831 
2832   assert(c != 0);
2833   for (;;) {
2834     if (c->child[0] != 0)
2835       c = *(cp = &(c->child[0]));
2836     else if (c->child[1] != 0)
2837       c = *(cp = &(c->child[1]));
2838     else {
2839       *cp = 0; /* unlink from parent */
2840       return c;
2841     }
2842   }
2843 }
2844 
unlink_leaf_node(mstate av,tchunkptr t)2845 static void unlink_leaf_node(mstate av, tchunkptr t) {
2846   tchunkptr p = t->parent;
2847   if (p->child[0] == t)
2848     p->child[0] = 0;
2849   else if (p->child[1] == t)
2850     p->child[1] = 0;
2851   else {
2852     assert(is_tbin(av, p));
2853     *((tbinptr*)p) = 0;
2854     av->treebits &= ~idx2bit(t->index);
2855   }
2856 }
2857 
unlink_chained_node(tchunkptr t)2858 static void unlink_chained_node(tchunkptr t) {
2859   tchunkptr bck = t->bk;
2860   tchunkptr fwd = t->fd;
2861   fwd->bk = bck;
2862   bck->fd = fwd;
2863   /* t's parent is nonnull if t was head of chain */
2864   if (t->parent != 0)
2865     transfer_tree_links(t, fwd);
2866   check_tree(fwd);
2867 }
2868 
2869 
2870 
2871 /*
2872    ----------- other helper functions -----------
2873   We expect these to be inlined
2874 */
2875 
2876 
insert_small_chunk(mstate av,mchunkptr p,CHUNK_SIZE_T nb)2877 static void insert_small_chunk(mstate av, mchunkptr p, CHUNK_SIZE_T nb) {
2878   bin_index_t idx = smallbin_index(nb);
2879   mchunkptr bck = bin_at(av, idx);
2880   mchunkptr fwd = bck->fd;
2881   mark_smallbin(av, idx);
2882   p->fd = fwd;
2883   p->bk = bck;
2884   fwd->bk = bck->fd = p;
2885 
2886   assert(in_smallbin_range(nb));
2887 }
2888 
insert_chunk(mstate av,mchunkptr p,CHUNK_SIZE_T nb)2889 static void insert_chunk(mstate av, mchunkptr p, CHUNK_SIZE_T nb) {
2890   if (in_smallbin_range(nb))
2891     insert_small_chunk(av, p, nb);
2892   else
2893     insert_treenode(av, (tchunkptr)p, nb);
2894 }
2895 
2896 
take_from_smallbin(mstate av,mchunkptr bin,bitmap_t bit)2897 static mchunkptr take_from_smallbin(mstate av, mchunkptr bin, bitmap_t bit) {
2898   mchunkptr p = bin->bk;
2899   mchunkptr bck = p->bk;
2900   assert(p != bin);
2901   bin->bk = bck;
2902   bck->fd = bin;
2903   if (bck == bin)
2904     av->smallbits &= ~bit;
2905   return p;
2906 }
2907 
unlink_chunk(mstate av,mchunkptr q,CHUNK_SIZE_T size)2908 static void unlink_chunk(mstate av, mchunkptr q, CHUNK_SIZE_T size) {
2909   mchunkptr fwd = q->fd;
2910   mchunkptr bck = q->bk;
2911   fwd->bk = bck;
2912   bck->fd = fwd;
2913   if (fwd == bck && in_smallbin_range(size)) {
2914     clear_smallbin(av, smallbin_index(size));
2915   }
2916   else if (!in_smallbin_range(size)) {
2917     tchunkptr t = (tchunkptr)q;
2918     tchunkptr c = (tchunkptr)fwd;
2919     if (c == t) {
2920       if (t->child[0] == t->child[1]) {
2921         unlink_leaf_node(av, t);
2922         return;
2923       }
2924       else {
2925         c = find_replacement(t);
2926       }
2927     }
2928     else {
2929       if (t->parent == 0) {
2930         return;
2931       }
2932     }
2933 
2934     transfer_tree_links(t, c);
2935     check_tree(c);
2936   }
2937 }
2938 
use_treechunk(mstate av,CHUNK_SIZE_T nb,tchunkptr bestchunk,CHUNK_SIZE_T bestsize,tchunkptr leaf)2939 static Void_t* use_treechunk(mstate av,
2940                              CHUNK_SIZE_T nb,
2941                              tchunkptr bestchunk,
2942                              CHUNK_SIZE_T bestsize,
2943                              tchunkptr leaf) {
2944 
2945   CHUNK_SIZE_T rsize;
2946 
2947   if (bestchunk->bk != bestchunk)
2948     unlink_chained_node(bestchunk);
2949   else {
2950     unlink_leaf_node(av, leaf);
2951     if (leaf != bestchunk)
2952       transfer_tree_links(bestchunk, leaf);
2953   }
2954 
2955   rsize = bestsize - nb;
2956   if (rsize >= MINSIZE) {
2957     mchunkptr rem = chunk_at_offset(bestchunk, nb);
2958     set_head(bestchunk, nb | PREV_INUSE);
2959     set_head(rem, rsize | PREV_INUSE);
2960     set_foot(rem, rsize);
2961     insert_chunk(av, rem, rsize);
2962   }
2963   else {
2964     set_inuse_bit_at_offset(bestchunk, bestsize);
2965   }
2966   check_malloced_chunk((mchunkptr)(bestchunk), nb);
2967   return chunk2mem(bestchunk);
2968 }
2969 
2970 
2971 /*
2972   ------------------------------ malloc ------------------------------
2973 */
2974 
mALLOc(size_t bytes)2975 Void_t* mALLOc(size_t bytes) {
2976   mstate av = get_malloc_state();
2977   CHUNK_SIZE_T nb;
2978 
2979   checked_request2size(bytes, nb);
2980 
2981   if (nb <= (CHUNK_SIZE_T)(av->max_fast)) {
2982     mfastbinptr*  fb = &(av->fastbins[(fastbin_index(nb))]);
2983     mchunkptr fp = *fb;
2984     if (fp != 0) {
2985       *fb = fp->fd;
2986       check_remalloced_chunk(fp, nb);
2987       return chunk2mem(fp);
2988     }
2989   }
2990 
2991 
2992   for (;;) {
2993     if (in_smallbin_range(nb)) {
2994       bin_index_t sidx = smallbin_index(nb);
2995       bitmap_t sbit = idx2bit(sidx);
2996 
2997       if (sbit <= av->smallbits) {
2998         mchunkptr p;
2999         if ((sbit & av->smallbits) != 0) {
3000           p = take_from_smallbin(av, bin_at(av,sidx), sbit);
3001           set_inuse_bit_at_offset(p, nb);
3002         }
3003         else {
3004           bitmap_t nbit = least_bit(left_bits(sbit) & av->smallbits);
3005           bin_index_t nidx = bit2idx(nbit);
3006           CHUNK_SIZE_T psize = size_for_smallindex(nidx);
3007           CHUNK_SIZE_T qsize = psize - nb;
3008           p = take_from_smallbin(av, bin_at(av, nidx), nbit);
3009           if (qsize < MINSIZE) {
3010             set_inuse_bit_at_offset(p, psize);
3011           }
3012           else {
3013             mchunkptr q = chunk_at_offset(p, nb);
3014             set_head(p, nb | PREV_INUSE);
3015             set_head(q, qsize | PREV_INUSE);
3016             set_foot(q, qsize);
3017             insert_small_chunk(av, q, qsize);
3018           }
3019         }
3020         check_malloced_chunk(p, nb);
3021         return chunk2mem(p);
3022       }
3023 
3024       if (av->treebits != 0) {
3025         bitmap_t vbit = least_bit(av->treebits);
3026         bin_index_t vidx = bit2idx(vbit);
3027         tbinptr* vbin = tbin_at(av, vidx);
3028         tchunkptr bestchunk = *vbin;
3029         tchunkptr c = leftmost_child(bestchunk);
3030         CHUNK_SIZE_T bestsize = chunksize(bestchunk);
3031         tchunkptr leaf;
3032         CHUNK_SIZE_T rsize;
3033 
3034         /*  Fast path if remainder will replace bestchunk */
3035         if (c == 0) {
3036           rsize = bestsize - nb;
3037           leaf = bestchunk;
3038 
3039           if (rsize >= minsize_for_treeindex(vidx) &&
3040               bestchunk->bk == bestchunk) {
3041             tchunkptr r = (tchunkptr)(chunk_at_offset(bestchunk, nb));
3042 
3043             set_head(bestchunk, nb | PREV_INUSE);
3044             set_head(r, rsize | PREV_INUSE);
3045             set_foot(r, rsize);
3046             *vbin = r;
3047             r->fd = r;
3048             r->bk = r;
3049             r->child[0] = 0;
3050             r->child[1] = 0;
3051             r->parent = (tchunkptr)vbin;
3052             r->index = vidx;
3053             check_malloced_chunk((mchunkptr)bestchunk, nb);
3054             return chunk2mem(bestchunk);
3055           }
3056         }
3057         else {
3058           do {
3059             CHUNK_SIZE_T csize = chunksize(c);
3060             if (csize < bestsize) {
3061               bestchunk = c;
3062               bestsize = csize;
3063             }
3064             leaf = c;
3065             c = leftmost_child(c);
3066           } while (c != 0);
3067         }
3068         return use_treechunk(av, nb, bestchunk, bestsize, leaf);
3069       }
3070     }
3071     else {
3072       bin_index_t tidx = treebin_index(nb);
3073       bitmap_t tbit = idx2bit(tidx);
3074 
3075       if (tbit <= av->treebits) {
3076         tchunkptr bestchunk = 0;
3077         CHUNK_SIZE_T bestsize = MAX_CHUNK_SIZE;
3078         tchunkptr leaf;
3079         bitmap_t vbit;
3080 
3081         for (;;) {
3082           if ((tbit & av->treebits) != 0) {
3083             tchunkptr t = *tbin_at(av, tidx);
3084             bin_index_t shift = bitshift_for_index(tidx);
3085             for (;;) {
3086               int dir;
3087               CHUNK_SIZE_T tsize = chunksize(t);
3088               leaf = t;
3089               if (tsize >= nb && tsize < bestsize) {
3090                 bestchunk = t;
3091                 bestsize = tsize;
3092                 if (tsize == nb && t->bk != t)
3093                   break;
3094               }
3095 
3096               dir = (shift == 0)? 0 : (nb >> shift--) & 1;
3097               t = leaf->child[dir];
3098               if (t == 0) {
3099                 shift = 0; /* if forced right, go leftmost from now on */
3100                 t = leaf->child[1-dir];
3101                 if (t == 0)
3102                   break;
3103               }
3104             }
3105             if (bestchunk != 0)
3106               return use_treechunk(av, nb, bestchunk, bestsize, leaf);
3107           }
3108           if (have_fastchunks(av))
3109             malloc_consolidate(av);
3110           else
3111             break;
3112         }
3113 
3114         vbit = least_bit(left_bits(tbit) & av->treebits);
3115         if (vbit != 0) {
3116           bin_index_t vidx = bit2idx(vbit);
3117           tbinptr* vbin = tbin_at(av, vidx);
3118           tchunkptr c = *vbin;
3119           do {
3120             CHUNK_SIZE_T csize = chunksize(c);
3121             leaf = c;
3122             if (csize < bestsize) {
3123               bestchunk = c;
3124               bestsize = csize;
3125             }
3126             c = leftmost_child(c);
3127           } while (c != 0);
3128           return use_treechunk(av, nb, bestchunk, bestsize, leaf);
3129         }
3130       }
3131     }
3132 
3133     /*
3134       If large enough, split off the chunk bordering the end of memory
3135       (held in av->top). This is called in accord with the best-fit
3136       search rule.  In effect, av->top is treated as larger (and thus
3137       less well fitting) than any other available chunk since it can
3138       be extended to be as large as necessary (up to system
3139       limitations).
3140 
3141       We require that av->top always exists (i.e., has size >=
3142       MINSIZE) after initialization, so if it would otherwise be
3143       exhuasted by current request, it is replenished. (The main
3144       reason for ensuring it exists is that we may need MINSIZE space
3145       to put in fenceposts in sysmalloc.)
3146     */
3147 
3148     if (av->top != 0) {
3149       mchunkptr topchunk = av->top;
3150       CHUNK_SIZE_T topsize = chunksize(topchunk);
3151 
3152       if (topsize >= nb + MINSIZE) {
3153         CHUNK_SIZE_T remainder_size = topsize - nb;
3154         mchunkptr remainder = chunk_at_offset(topchunk, nb);
3155 
3156         av->top = remainder;
3157         set_head(topchunk, nb | PREV_INUSE);
3158         set_head(remainder, remainder_size | PREV_INUSE);
3159 
3160         check_malloced_chunk(topchunk, nb);
3161         return chunk2mem(topchunk);
3162       }
3163       else if (have_fastchunks(av)) {
3164         malloc_consolidate(av);
3165       }
3166       else
3167         break;
3168     }
3169     else
3170       break;
3171   }
3172   return sysmalloc(av, nb);
3173 }
3174 
3175 /*
3176   ------------------------------ free ------------------------------
3177 */
3178 
3179 
fREe(Void_t * mem)3180 void fREe(Void_t* mem) {
3181   mstate av = get_malloc_state();
3182 
3183   mchunkptr p = mem2chunk(mem);
3184 
3185   if (mem != 0) {
3186     INTERNAL_SIZE_T rawsize = p->size;
3187     CHUNK_SIZE_T size = chunksize(p);
3188     check_inuse_chunk(p);
3189 
3190     /*
3191       If eligible, place chunk on a fastbin so it can be found
3192       and used quickly in malloc.
3193     */
3194 
3195     if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
3196 
3197 #if TRIM_FASTBINS
3198         /*
3199            If TRIM_FASTBINS set, don't place chunks
3200            bordering top into fastbins
3201         */
3202         && (chunk_at_offset(p, size) != av->top)
3203 #endif
3204         ) {
3205 
3206       mfastbinptr* fb;
3207       set_fastchunks(av);
3208       fb = &(av->fastbins[fastbin_index(size)]);
3209       p->fd = *fb;
3210       *fb = p;
3211     }
3212 
3213     else if ((rawsize & IS_MMAPPED) == 0) {
3214       mchunkptr nextchunk = chunk_at_offset(p, size);
3215       CHUNK_SIZE_T nextsize;
3216 
3217       if ((rawsize & PREV_INUSE) == 0) {
3218         CHUNK_SIZE_T prevsize = p->prev_size;
3219         size += prevsize;
3220         p = chunk_at_offset(p, -((long) prevsize));
3221         unlink_chunk(av, p, prevsize);
3222       }
3223 
3224       nextsize = chunksize(nextchunk);
3225       if (nextchunk == av->top) {
3226         size += nextsize;
3227         set_head(p, size | PREV_INUSE);
3228         av->top = p;
3229         if (size >= av->trim_threshold) {
3230           systrim(av, av->top_pad);
3231         }
3232       }
3233       else {
3234         if (!inuse_bit_at_offset(nextchunk, nextsize)) {
3235           size += nextsize;
3236           unlink_chunk(av, nextchunk, nextsize);
3237         }
3238         else
3239           set_head(nextchunk, nextsize);
3240 
3241         set_head(p, size | PREV_INUSE);
3242         set_foot(p, size);
3243         insert_chunk(av, p, size);
3244       }
3245     }
3246     else {
3247 #if HAVE_MMAP
3248       int ret;
3249       INTERNAL_SIZE_T offset = p->prev_size;
3250       av->n_mmaps--;
3251       av->mmapped_mem -= (size + offset);
3252       ret = munmap((char*)p - offset, size + offset);
3253       /* munmap returns non-zero on failure */
3254       assert(ret == 0);
3255 #endif
3256     }
3257   }
3258 }
3259 
3260 /*
3261   ------------------------- malloc_consolidate -------------------------
3262 
3263   malloc_consolidate tears down chunks held in fastbins.
3264 */
3265 
malloc_consolidate(mstate av)3266 static void malloc_consolidate(mstate av) {
3267   int i;
3268   clear_fastchunks(av);
3269 
3270   for (i = 0; i < NFASTBINS; ++i) {
3271     mfastbinptr* fb = &(av->fastbins[i]);
3272     mchunkptr p = *fb;
3273 
3274     if (p != 0) {
3275       *fb = 0;
3276       do {
3277         mchunkptr nextp = p->fd;
3278         INTERNAL_SIZE_T rawsize = p->size;
3279         CHUNK_SIZE_T size = chunksize(p);
3280         mchunkptr nextchunk = chunk_at_offset(p, size);
3281         CHUNK_SIZE_T nextsize;
3282 
3283         if ((rawsize & PREV_INUSE) == 0) {
3284           CHUNK_SIZE_T prevsize = p->prev_size;
3285           size += prevsize;
3286           p = chunk_at_offset(p, -((long) prevsize));
3287           unlink_chunk(av, p, prevsize);
3288         }
3289 
3290         nextsize = chunksize(nextchunk);
3291         if (nextchunk == av->top) {
3292           size += nextsize;
3293           set_head(p, size | PREV_INUSE);
3294           av->top = p;
3295         }
3296         else {
3297           if (!inuse_bit_at_offset(nextchunk, nextsize)) {
3298             size += nextsize;
3299             unlink_chunk(av, nextchunk, nextsize);
3300           }
3301           else
3302             set_head(nextchunk, nextsize);
3303 
3304           set_head(p, size | PREV_INUSE);
3305           set_foot(p, size);
3306 
3307           insert_chunk(av, p, size);
3308         }
3309         p = nextp;
3310       } while (p != 0);
3311     }
3312   }
3313 }
3314 
3315 
3316 /*
3317   ------------------------------ realloc ------------------------------
3318 */
3319 
3320 
rEALLOc(Void_t * oldmem,size_t bytes)3321 Void_t* rEALLOc(Void_t* oldmem, size_t bytes) {
3322   mstate av = get_malloc_state();
3323 
3324   INTERNAL_SIZE_T  nb;              /* padded request size */
3325 
3326   mchunkptr        oldp;            /* chunk corresponding to oldmem */
3327   CHUNK_SIZE_T     oldsize;         /* its size */
3328 
3329   mchunkptr        newp;            /* chunk to return */
3330   CHUNK_SIZE_T     newsize;         /* its size */
3331   Void_t*          newmem;          /* corresponding user mem */
3332 
3333   mchunkptr        next;            /* next contiguous chunk after oldp */
3334 
3335   mchunkptr        remainder;       /* extra space at end of newp */
3336   CHUNK_SIZE_T     remainder_size;  /* its size */
3337 
3338   CHUNK_SIZE_T     copysize;        /* bytes to copy */
3339   unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
3340   INTERNAL_SIZE_T* s;               /* copy source */
3341   INTERNAL_SIZE_T* d;               /* copy destination */
3342 
3343 
3344 #ifdef REALLOC_ZERO_BYTES_FREES
3345   if (bytes == 0) {
3346     fREe(oldmem);
3347     return 0;
3348   }
3349 #endif
3350 
3351   /* realloc of null is supposed to be same as malloc */
3352   if (oldmem == 0) return mALLOc(bytes);
3353 
3354   checked_request2size(bytes, nb);
3355 
3356   oldp    = mem2chunk(oldmem);
3357   oldsize = chunksize(oldp);
3358 
3359   check_inuse_chunk(oldp);
3360 
3361   if (!chunk_is_mmapped(oldp)) {
3362 
3363     if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb)) {
3364       /* already big enough; split below */
3365       newp = oldp;
3366       newsize = oldsize;
3367     }
3368 
3369     else {
3370       next = chunk_at_offset(oldp, oldsize);
3371 
3372       /* Try to expand forward into top */
3373       if (next == av->top &&
3374           (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
3375           (CHUNK_SIZE_T)(nb + MINSIZE)) {
3376         set_head_size(oldp, nb);
3377         av->top = chunk_at_offset(oldp, nb);
3378         set_head(av->top, (newsize - nb) | PREV_INUSE);
3379         return chunk2mem(oldp);
3380       }
3381 
3382       /* Try to expand forward into next chunk;  split off remainder below */
3383       else if (next != av->top &&
3384                !inuse(next) &&
3385                (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
3386                (CHUNK_SIZE_T)(nb)) {
3387         newp = oldp;
3388         unlink_chunk(av, next, chunksize(next));
3389       }
3390 
3391       /* allocate, copy, free */
3392       else {
3393         newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
3394         if (newmem == 0)
3395           return 0; /* propagate failure */
3396 
3397         newp = mem2chunk(newmem);
3398         newsize = chunksize(newp);
3399 
3400         /*
3401           Avoid copy if newp is next chunk after oldp.
3402         */
3403         if (newp == next) {
3404           newsize += oldsize;
3405           newp = oldp;
3406         }
3407         else {
3408           /*
3409             Unroll copy of <= 36 bytes (72 if 8byte sizes)
3410             We know that contents have an odd number of
3411             INTERNAL_SIZE_T-sized words; minimally 3.
3412           */
3413 
3414           copysize = oldsize - SIZE_SZ;
3415           s = (INTERNAL_SIZE_T*)(oldmem);
3416           d = (INTERNAL_SIZE_T*)(newmem);
3417           ncopies = copysize / sizeof(INTERNAL_SIZE_T);
3418           assert(ncopies >= 3);
3419 
3420           if (ncopies > 9)
3421             MALLOC_COPY(d, s, copysize);
3422 
3423           else {
3424             *(d+0) = *(s+0);
3425             *(d+1) = *(s+1);
3426             *(d+2) = *(s+2);
3427             if (ncopies > 4) {
3428               *(d+3) = *(s+3);
3429               *(d+4) = *(s+4);
3430               if (ncopies > 6) {
3431                 *(d+5) = *(s+5);
3432                 *(d+6) = *(s+6);
3433                 if (ncopies > 8) {
3434                   *(d+7) = *(s+7);
3435                   *(d+8) = *(s+8);
3436                 }
3437               }
3438             }
3439           }
3440 
3441           fREe(oldmem);
3442           check_inuse_chunk(newp);
3443           return chunk2mem(newp);
3444         }
3445       }
3446     }
3447 
3448     /* If possible, free extra space in old or extended chunk */
3449 
3450     assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
3451 
3452     remainder_size = newsize - nb;
3453 
3454     if (remainder_size < MINSIZE) { /* not enough extra to split off */
3455       set_head_size(newp, newsize);
3456       set_inuse_bit_at_offset(newp, newsize);
3457     }
3458     else { /* split remainder */
3459       remainder = chunk_at_offset(newp, nb);
3460       set_head_size(newp, nb);
3461       set_head(remainder, remainder_size | PREV_INUSE);
3462       /* Mark remainder as inuse so free() won't complain */
3463       set_inuse_bit_at_offset(remainder, remainder_size);
3464       fREe(chunk2mem(remainder));
3465     }
3466 
3467     check_inuse_chunk(newp);
3468     return chunk2mem(newp);
3469   }
3470 
3471   /*
3472     Handle mmap cases
3473   */
3474 
3475   else {
3476 #if HAVE_MMAP
3477 
3478 #if HAVE_MREMAP
3479     INTERNAL_SIZE_T offset = oldp->prev_size;
3480     size_t pagemask = av->pagesize - 1;
3481     char *cp;
3482     CHUNK_SIZE_T  sum;
3483 
3484     /* Note the extra SIZE_SZ overhead */
3485     newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
3486 
3487     /* don't need to remap if still within same page */
3488     if (oldsize == newsize - offset)
3489       return oldmem;
3490 
3491     cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
3492 
3493     if (cp != (char*)MORECORE_FAILURE) {
3494 
3495       newp = (mchunkptr)(cp + offset);
3496       set_head(newp, (newsize - offset)|IS_MMAPPED);
3497 
3498       assert(aligned_OK(chunk2mem(newp)));
3499       assert((newp->prev_size == offset));
3500 
3501       /* update statistics */
3502       sum = av->mmapped_mem += newsize - oldsize;
3503       if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
3504         av->max_mmapped_mem = sum;
3505       sum += av->sbrked_mem;
3506       if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3507         av->max_total_mem = sum;
3508 
3509       return chunk2mem(newp);
3510     }
3511 #endif
3512 
3513     /* Note the extra SIZE_SZ overhead. */
3514     if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))
3515       newmem = oldmem; /* do nothing */
3516     else {
3517       /* Must alloc, copy, free. */
3518       newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
3519       if (newmem != 0) {
3520         MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
3521         fREe(oldmem);
3522       }
3523     }
3524     return newmem;
3525 
3526 #else
3527     /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
3528     check_malloc_state(av);
3529     MALLOC_FAILURE_ACTION;
3530     return 0;
3531 #endif
3532   }
3533 }
3534 
3535 /*
3536   ------------------------------ memalign ------------------------------
3537 */
3538 
mEMALIGn(size_t alignment,size_t bytes)3539 Void_t* mEMALIGn(size_t alignment, size_t bytes) {
3540   INTERNAL_SIZE_T nb;             /* padded  request size */
3541   char*           m;              /* memory returned by malloc call */
3542   mchunkptr       p;              /* corresponding chunk */
3543   char*           brk;            /* alignment point within p */
3544   mchunkptr       newp;           /* chunk to return */
3545   INTERNAL_SIZE_T newsize;        /* its size */
3546   INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
3547   mchunkptr       remainder;      /* spare room at end to split off */
3548   CHUNK_SIZE_T    remainder_size; /* its size */
3549   INTERNAL_SIZE_T size;
3550 
3551   /* If need less alignment than we give anyway, just relay to malloc */
3552 
3553   if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
3554 
3555   /* Otherwise, ensure that it is at least a minimum chunk size */
3556 
3557   if (alignment <  MINSIZE) alignment = MINSIZE;
3558 
3559   /* Make sure alignment is power of 2 (in case MINSIZE is not).  */
3560   if ((alignment & (alignment - 1)) != 0) {
3561     size_t a = MALLOC_ALIGNMENT * 2;
3562     while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
3563     alignment = a;
3564   }
3565 
3566   checked_request2size(bytes, nb);
3567 
3568   /*
3569     Strategy: find a spot within that chunk that meets the alignment
3570     request, and then possibly free the leading and trailing space.
3571   */
3572 
3573 
3574   /* Call malloc with worst case padding to hit alignment. */
3575 
3576   m  = (char*)(mALLOc(nb + alignment + MINSIZE));
3577 
3578   if (m == 0) return 0; /* propagate failure */
3579 
3580   p = mem2chunk(m);
3581 
3582   if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
3583 
3584     /*
3585       Find an aligned spot inside chunk.  Since we need to give back
3586       leading space in a chunk of at least MINSIZE, if the first
3587       calculation places us at a spot with less than MINSIZE leader,
3588       we can move to the next aligned spot -- we've allocated enough
3589       total room so that this is always possible.
3590     */
3591 
3592     brk = (char*)mem2chunk((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
3593                            -((signed long) alignment)));
3594     if ((CHUNK_SIZE_T)(brk - (char*)(p)) < MINSIZE)
3595       brk += alignment;
3596 
3597     newp = (mchunkptr)brk;
3598     leadsize = brk - (char*)(p);
3599     newsize = chunksize(p) - leadsize;
3600 
3601     /* For mmapped chunks, just adjust offset */
3602     if (chunk_is_mmapped(p)) {
3603       newp->prev_size = p->prev_size + leadsize;
3604       set_head(newp, newsize|IS_MMAPPED);
3605       return chunk2mem(newp);
3606     }
3607 
3608     /* Otherwise, give back leader, use the rest */
3609     set_head(newp, newsize | PREV_INUSE);
3610     set_inuse_bit_at_offset(newp, newsize);
3611     set_head_size(p, leadsize);
3612     fREe(chunk2mem(p));
3613     p = newp;
3614 
3615     assert (newsize >= nb &&
3616             (((PTR_UINT)(chunk2mem(p))) % alignment) == 0);
3617   }
3618 
3619   /* Also give back spare room at the end */
3620   if (!chunk_is_mmapped(p)) {
3621     size = chunksize(p);
3622     if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
3623       remainder_size = size - nb;
3624       remainder = chunk_at_offset(p, nb);
3625       set_head(remainder, remainder_size | PREV_INUSE);
3626       set_head_size(p, nb);
3627       fREe(chunk2mem(remainder));
3628     }
3629   }
3630 
3631   check_inuse_chunk(p);
3632   return chunk2mem(p);
3633 }
3634 
3635 /*
3636   ------------------------------ calloc ------------------------------
3637 */
3638 
cALLOc(size_t n_elements,size_t elem_size)3639 Void_t* cALLOc(size_t n_elements, size_t elem_size) {
3640   Void_t* mem = mALLOc(n_elements * elem_size);
3641 
3642   if (mem != 0) {
3643     mchunkptr p = mem2chunk(mem);
3644     INTERNAL_SIZE_T* d = (INTERNAL_SIZE_T*)mem;
3645 
3646     if (!chunk_is_mmapped(p))
3647     {
3648       /*
3649         Unroll clear of <= 36 bytes (72 if 8byte sizes)
3650         We know that contents have an odd number of
3651         INTERNAL_SIZE_T-sized words; minimally 3.
3652       */
3653 
3654       CHUNK_SIZE_T clearsize = chunksize(p) - SIZE_SZ;
3655       CHUNK_SIZE_T nclears = clearsize / sizeof(INTERNAL_SIZE_T);
3656       assert(nclears >= 3);
3657 
3658       if (nclears > 9)
3659         MALLOC_ZERO(d, clearsize);
3660 
3661       else {
3662         *(d+0) = 0;
3663         *(d+1) = 0;
3664         *(d+2) = 0;
3665         if (nclears > 4) {
3666           *(d+3) = 0;
3667           *(d+4) = 0;
3668           if (nclears > 6) {
3669             *(d+5) = 0;
3670             *(d+6) = 0;
3671             if (nclears > 8) {
3672               *(d+7) = 0;
3673               *(d+8) = 0;
3674             }
3675           }
3676         }
3677       }
3678     }
3679 #if ! MMAP_CLEARS
3680     else
3681     {
3682       /*
3683         Note the additional SIZE_SZ
3684       */
3685       CHUNK_SIZE_T clearsize = chunksize(p) - 2*SIZE_SZ;
3686       MALLOC_ZERO(d, clearsize);
3687     }
3688 #endif
3689   }
3690   return mem;
3691 }
3692 
3693 /*
3694   ------------------------------ cfree ------------------------------
3695 */
3696 
cFREe(Void_t * mem)3697 void cFREe(Void_t *mem) {
3698   fREe(mem);
3699 }
3700 
3701 /*
3702   ------------------------- independent_calloc -------------------------
3703 */
3704 
3705 
iCALLOc(size_t n_elements,size_t elem_size,Void_t * chunks[])3706 Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[]) {
3707   size_t sz = elem_size; /* serves as 1-element array */
3708   /* opts arg of 3 means all elements are same size, and should be cleared */
3709   return iALLOc(n_elements, &sz, 3, chunks);
3710 }
3711 
3712 /*
3713   ------------------------- independent_comalloc -------------------------
3714 */
3715 
iCOMALLOc(size_t n_elements,size_t sizes[],Void_t * chunks[])3716 Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[]) {
3717   return iALLOc(n_elements, sizes, 0, chunks);
3718 }
3719 
3720 
3721 /*
3722   ------------------------------ ialloc ------------------------------
3723   ialloc provides common support for independent_X routines, handling all of
3724   the combinations that can result.
3725 
3726   The opts arg has:
3727     bit 0 set if all elements are same size (using sizes[0])
3728     bit 1 set if elements should be zeroed
3729 */
3730 
3731 
iALLOc(size_t n_elements,size_t * sizes,int opts,Void_t * chunks[])3732 static Void_t** iALLOc(size_t n_elements,
3733                        size_t* sizes,
3734                        int opts,
3735                        Void_t* chunks[]) {
3736   mstate av = get_malloc_state();
3737   INTERNAL_SIZE_T element_size;   /* chunksize of each element, if all same */
3738   INTERNAL_SIZE_T contents_size;  /* total size of elements */
3739   INTERNAL_SIZE_T array_size;     /* request size of pointer array */
3740   Void_t*         mem;            /* malloced aggregate space */
3741   mchunkptr       p;              /* corresponding chunk */
3742   CHUNK_SIZE_T remainder_size; /* remaining bytes while splitting */
3743   Void_t**        marray;         /* either "chunks" or malloced ptr array */
3744   mchunkptr       array_chunk;    /* chunk for malloced ptr array */
3745   unsigned int    mprops;         /* to disable mmap */
3746   CHUNK_SIZE_T size;
3747   size_t          i;
3748 
3749   ensure_initialization(av);
3750 
3751   /* compute array length, if needed */
3752   if (chunks != 0) {
3753     if (n_elements == 0)
3754       return chunks; /* nothing to do */
3755     marray = chunks;
3756     array_size = 0;
3757   }
3758   else {
3759     /* if empty req, must still return chunk representing empty array */
3760     if (n_elements == 0)
3761       return (Void_t**) mALLOc(0);
3762     marray = 0;
3763     array_size = request2size(n_elements * (sizeof(Void_t*)));
3764   }
3765 
3766   /* compute total element size */
3767   if (opts & 0x1) { /* all-same-size */
3768     element_size = request2size(*sizes);
3769     contents_size = n_elements * element_size;
3770   }
3771   else { /* add up all the sizes */
3772     element_size = 0;
3773     contents_size = 0;
3774     for (i = 0; i != n_elements; ++i)
3775       contents_size += request2size(sizes[i]);
3776   }
3777 
3778   /* subtract out alignment bytes from total to minimize overallocation */
3779   size = contents_size + array_size - MALLOC_ALIGN_MASK;
3780 
3781   /*
3782      Allocate the aggregate chunk.
3783      But first disable mmap so malloc won't use it, since
3784      we would not be able to later free/realloc space internal
3785      to a segregated mmap region.
3786  */
3787 
3788   mprops = av->sysctl;   /* disable mmap */
3789   disable_mmap(av);
3790   mem = mALLOc(size);
3791   av->sysctl = mprops; /* reset mmap */
3792   if (mem == 0)
3793     return 0;
3794 
3795   p = mem2chunk(mem);
3796   assert(!chunk_is_mmapped(p));
3797   remainder_size = chunksize(p);
3798 
3799 
3800   if (opts & 0x2) {       /* optionally clear the elements */
3801     MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
3802   }
3803 
3804   /* If not provided, allocate the pointer array as final part of chunk */
3805   if (marray == 0) {
3806     array_chunk = chunk_at_offset(p, contents_size);
3807     marray = (Void_t**) (chunk2mem(array_chunk));
3808     set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE);
3809     remainder_size = contents_size;
3810   }
3811 
3812   /* split out elements */
3813   for (i = 0; ; ++i) {
3814     marray[i] = chunk2mem(p);
3815     if (i != n_elements-1) {
3816       if (element_size != 0)
3817         size = element_size;
3818       else
3819         size = request2size(sizes[i]);
3820       remainder_size -= size;
3821       set_head(p, size | PREV_INUSE);
3822       p = chunk_at_offset(p, size);
3823     }
3824     else { /* the final element absorbs any overallocation slop */
3825       set_head(p, remainder_size | PREV_INUSE);
3826       break;
3827     }
3828   }
3829 
3830 #if DEBUG
3831   if (marray != chunks) {
3832     /* final element must have exactly exhausted chunk */
3833     if (element_size != 0)
3834       assert(remainder_size == element_size);
3835     else
3836       assert(remainder_size == request2size(sizes[i]));
3837     check_inuse_chunk(mem2chunk(marray));
3838   }
3839 
3840   for (i = 0; i != n_elements; ++i)
3841     check_inuse_chunk(mem2chunk(marray[i]));
3842 #endif
3843 
3844   return marray;
3845 }
3846 
3847 
3848 /*
3849   ------------------------------ valloc ------------------------------
3850 */
3851 
vALLOc(size_t bytes)3852 Void_t* vALLOc(size_t bytes) {
3853   mstate av = get_malloc_state();
3854   ensure_initialization(av);
3855   return mEMALIGn(av->pagesize, bytes);
3856 }
3857 
3858 /*
3859   ------------------------------ pvalloc ------------------------------
3860 */
3861 
3862 
pVALLOc(size_t bytes)3863 Void_t* pVALLOc(size_t bytes) {
3864   mstate av = get_malloc_state();
3865   size_t pagesz;
3866 
3867   ensure_initialization(av);
3868   pagesz = av->pagesize;
3869   return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
3870 }
3871 
3872 
3873 /*
3874   ------------------------------ malloc_trim ------------------------------
3875 */
3876 
mTRIm(size_t pad)3877 int mTRIm(size_t pad) {
3878   mstate av = get_malloc_state();
3879   return systrim(av, pad);
3880 }
3881 
3882 
3883 /*
3884   ------------------------- malloc_usable_size -------------------------
3885 */
3886 
mUSABLe(Void_t * mem)3887 size_t mUSABLe(Void_t* mem) {
3888   mchunkptr p;
3889   if (mem != 0) {
3890     p = mem2chunk(mem);
3891     if (chunk_is_mmapped(p))
3892       return chunksize(p) - 2*SIZE_SZ;
3893     else if (inuse(p))
3894       return chunksize(p) - SIZE_SZ;
3895   }
3896   return 0;
3897 }
3898 
3899 /*
3900   ------------------------------ mallinfo ------------------------------
3901 */
3902 
3903 /*
3904   Recursive helper function for mallinfo
3905 */
3906 
count_tree_blocks(tchunkptr t,int * pcount,INTERNAL_SIZE_T * pavail)3907 static void count_tree_blocks(tchunkptr t, int* pcount, INTERNAL_SIZE_T* pavail) {
3908   while (t != 0) {
3909     tchunkptr p = t->bk;
3910     do {
3911       (*pcount)++;
3912       *pavail += chunksize(p);
3913       p = p->bk;
3914     } while (p != t);
3915     if (t->child[0] != 0)
3916       count_tree_blocks(t->child[0], pcount, pavail);
3917     t = t->child[1];
3918   }
3919 }
3920 
3921 
3922 
mALLINFo()3923 struct mallinfo mALLINFo()
3924 {
3925   mstate av = get_malloc_state();
3926   struct mallinfo mi;
3927   INTERNAL_SIZE_T avail;
3928   INTERNAL_SIZE_T topsize;
3929   int nblocks;
3930   INTERNAL_SIZE_T fastavail;
3931   int nfastblocks;
3932   mchunkptr p;
3933 
3934   if (av->top == 0) {
3935     avail = 0;
3936     topsize = 0;
3937     nblocks = 0;
3938   }
3939   else {
3940     int i;
3941     check_malloc_state(av);
3942 
3943     topsize = chunksize(av->top);
3944     avail = topsize;
3945     nblocks = 1;  /* top always exists */
3946 
3947     /* traverse fastbins */
3948     nfastblocks = 0;
3949     fastavail = 0;
3950 
3951     for (i = 0; i < NFASTBINS; ++i) {
3952       for (p = av->fastbins[i]; p != 0; p = p->fd) {
3953         ++nfastblocks;
3954         fastavail += chunksize(p);
3955       }
3956     }
3957 
3958     avail += fastavail;
3959 
3960     /* traverse small bins */
3961     for (i = 2; i < NBINS; ++i) {
3962       mbinptr b = bin_at(av, i);
3963       mchunkptr p;
3964       for (p = b->bk; p != b; p = p->bk) {
3965         ++nblocks;
3966         avail += chunksize(p);
3967       }
3968     }
3969 
3970     /* traverse tree bins */
3971     for (i = 0; i < NBINS; ++i) {
3972       tchunkptr t = *(tbin_at(av, i));
3973       if (t != 0)
3974         count_tree_blocks(t, &nblocks, &avail);
3975     }
3976   }
3977 
3978   mi.smblks = nfastblocks;
3979   mi.smblks = 0;
3980   mi.ordblks = nblocks;
3981   mi.fordblks = avail;
3982   mi.uordblks = av->sbrked_mem - avail;
3983   mi.arena = av->sbrked_mem;
3984   mi.hblks = av->n_mmaps;
3985   mi.hblkhd = av->mmapped_mem;
3986   mi.fsmblks = 0;
3987   mi.keepcost = topsize;
3988   mi.usmblks = av->max_total_mem;
3989   return mi;
3990 }
3991 
3992 /*
3993   ------------------------------ malloc_stats ------------------------------
3994 */
3995 
mSTATs()3996 void mSTATs() {
3997   struct mallinfo mi = mALLINFo();
3998 
3999 #ifdef WIN32
4000   {
4001     CHUNK_SIZE_T  free, reserved, committed;
4002     vminfo (&free, &reserved, &committed);
4003     fprintf(stderr, "free bytes       = %10lu\n",
4004             free);
4005     fprintf(stderr, "reserved bytes   = %10lu\n",
4006             reserved);
4007     fprintf(stderr, "committed bytes  = %10lu\n",
4008             committed);
4009   }
4010 #endif
4011 
4012 
4013   fprintf(stderr, "max system bytes = %10lu\n",
4014           (CHUNK_SIZE_T)(mi.usmblks));
4015   fprintf(stderr, "system bytes     = %10lu\n",
4016           (CHUNK_SIZE_T)(mi.arena + mi.hblkhd));
4017   fprintf(stderr, "in use bytes     = %10lu\n",
4018           (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
4019 
4020 #if 0
4021   fprintf(stderr, "n0     = %10u\n", n0);
4022   fprintf(stderr, "n1     = %10u\n", n1);
4023   fprintf(stderr, "n2     = %10u\n", n2);
4024   fprintf(stderr, "n3     = %10u\n", n3);
4025   fprintf(stderr, "n4     = %10u\n", n4);
4026   fprintf(stderr, "n5     = %10u\n", n5);
4027   fprintf(stderr, "n6     = %10u\n", n6);
4028   fprintf(stderr, "n7     = %10u\n", n7);
4029   fprintf(stderr, "n8     = %10u\n", n8);
4030 #endif
4031 
4032 
4033 #ifdef WIN32
4034   {
4035     CHUNK_SIZE_T  kernel, user;
4036     if (cpuinfo (TRUE, &kernel, &user)) {
4037       fprintf(stderr, "kernel ms        = %10lu\n",
4038               kernel);
4039       fprintf(stderr, "user ms          = %10lu\n",
4040               user);
4041     }
4042   }
4043 #endif
4044 }
4045 
4046 
4047 /*
4048   ------------------------------ mallopt ------------------------------
4049 */
4050 
mALLOPt(int param_number,int value)4051 int mALLOPt(int param_number, int value) {
4052   mstate av = get_malloc_state();
4053 
4054   ensure_initialization(av);
4055 
4056   switch(param_number) {
4057   case M_MXFAST:
4058     malloc_consolidate(av);
4059     if (value >= 0 && value <= MAX_FAST_SIZE) {
4060       set_max_fast(av, value);
4061       return 1;
4062     }
4063     else
4064       return 0;
4065 
4066   case M_TRIM_THRESHOLD:
4067     av->trim_threshold = value;
4068     return 1;
4069 
4070   case M_TOP_PAD:
4071     av->top_pad = value;
4072     return 1;
4073 
4074   case M_MMAP_THRESHOLD:
4075     av->mmap_threshold = value;
4076     return 1;
4077 
4078   case M_MMAP_MAX:
4079 #if !HAVE_MMAP
4080     if (value != 0)
4081       return 0;
4082 #endif
4083     av->n_mmaps_max = value;
4084     return 1;
4085 
4086   default:
4087     return 0;
4088   }
4089 }
4090 
4091 /* ----------- Routines dealing with system allocation -------------- */
4092 
4093 #if HAVE_MMAP
mmap_malloc(mstate av,INTERNAL_SIZE_T nb)4094 static mchunkptr mmap_malloc(mstate av, INTERNAL_SIZE_T nb) {
4095   char* mm;                       /* return value from mmap call*/
4096   CHUNK_SIZE_T    sum;            /* for updating stats */
4097   mchunkptr       p;              /* the allocated/returned chunk */
4098   long            size;
4099   INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
4100   long            correction;
4101   size_t          pagemask  = av->pagesize - 1;
4102 
4103   /*
4104     Round up size to nearest page.  For mmapped chunks, the overhead
4105     is one SIZE_SZ unit larger than for normal chunks, because there
4106     is no following chunk whose prev_size field could be used.
4107   */
4108   size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
4109 
4110   /* Don't try if size wraps around 0 */
4111   if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
4112 
4113     mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
4114 
4115     if (mm != (char*)(MORECORE_FAILURE)) {
4116 
4117       /*
4118         The offset to the start of the mmapped region is stored
4119         in the prev_size field of the chunk. This allows us to adjust
4120         returned start address to meet alignment requirements here
4121         and in memalign(), and still be able to compute proper
4122         address argument for later munmap in free() and realloc().
4123       */
4124 
4125       front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
4126       if (front_misalign > 0) {
4127         correction = MALLOC_ALIGNMENT - front_misalign;
4128         p = (mchunkptr)(mm + correction);
4129         p->prev_size = correction;
4130         set_head(p, (size - correction) |IS_MMAPPED);
4131       }
4132       else {
4133         p = (mchunkptr)mm;
4134         p->prev_size = 0;
4135         set_head(p, size|IS_MMAPPED);
4136       }
4137 
4138       /* update statistics */
4139 
4140       if (++av->n_mmaps > av->max_n_mmaps)
4141         av->max_n_mmaps = av->n_mmaps;
4142 
4143       sum = av->mmapped_mem += size;
4144       if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
4145         av->max_mmapped_mem = sum;
4146       sum += av->sbrked_mem;
4147       if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
4148         av->max_total_mem = sum;
4149 
4150       check_chunk(p);
4151 
4152       return p;
4153     }
4154   }
4155   return 0;
4156 }
4157 #endif
4158 
4159 
4160 /*
4161   sysmalloc handles malloc cases requiring more memory from the system.
4162   On entry, it is assumed that av->top does not have enough
4163   space to service request for nb bytes, thus requiring that av->top
4164   be extended or replaced.
4165 */
4166 
sysmalloc(mstate av,CHUNK_SIZE_T nb)4167 static Void_t* sysmalloc(mstate av, CHUNK_SIZE_T nb) {
4168   mchunkptr       old_top;        /* incoming value of av->top */
4169   INTERNAL_SIZE_T old_size;       /* its size */
4170   char*           old_end;        /* its end address */
4171 
4172   long            size;           /* arg to first MORECORE or mmap call */
4173   char*           brk;            /* return value from MORECORE */
4174 
4175   long            correction;     /* arg to 2nd MORECORE call */
4176   char*           snd_brk;        /* 2nd return val */
4177 
4178   INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
4179   INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
4180   char*           aligned_brk;    /* aligned offset into brk */
4181 
4182   mchunkptr       p;              /* the allocated/returned chunk */
4183   mchunkptr       remainder;      /* remainder from allocation */
4184   CHUNK_SIZE_T    remainder_size; /* its size */
4185 
4186   CHUNK_SIZE_T    sum;            /* for updating stats */
4187 
4188   size_t          pagemask;
4189 
4190   /*
4191     Initialize av if necessary
4192    */
4193   if (av->top == 0) {
4194     malloc_init_state(av);
4195     /* to allow call solely for initialization */
4196     if (nb == 0)
4197       return 0;
4198   }
4199 
4200 
4201 #if HAVE_MMAP
4202   /*
4203     If have mmap, and the request size meets the mmap threshold, and
4204     the system supports mmap, and there are few enough currently
4205     allocated mmapped regions, try to directly map this request
4206     rather than expanding top.
4207   */
4208 
4209   if ((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
4210       (av->n_mmaps < av->n_mmaps_max) &&
4211       !mmap_disabled(av)) {
4212     Void_t* mp = mmap_malloc(av, nb);
4213     if (mp != 0)
4214       return chunk2mem(mp);
4215   }
4216 #endif
4217 
4218 
4219   pagemask = av->pagesize - 1;
4220 
4221   /* Record incoming configuration of top */
4222 
4223   old_top  = av->top;
4224   old_size = chunksize(old_top);
4225   old_end  = (char*)(chunk_at_offset(old_top, old_size));
4226 
4227   brk = snd_brk = (char*)(MORECORE_FAILURE);
4228 
4229   /*
4230      If not the first time through, we require old_size to be
4231      at least MINSIZE and to have prev_inuse set.
4232   */
4233 
4234   assert((old_top == (mchunkptr)(&(av->initial_top)) && old_size == 0) ||
4235          ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
4236           prev_inuse(old_top)));
4237 
4238   /* Precondition: not enough current space to satisfy nb request */
4239   assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
4240 
4241   /* Request enough space for nb + pad + overhead */
4242 
4243   size = nb + av->top_pad + MINSIZE;
4244 
4245   /*
4246     If contiguous, we can subtract out existing space that we hope to
4247     combine with new space. We add it back later only if
4248     we don't actually get contiguous space.
4249   */
4250 
4251   if (contiguous(av))
4252     size -= old_size;
4253 
4254   /*
4255     Round to a multiple of page size.
4256     If MORECORE is not contiguous, this ensures that we only call it
4257     with whole-page arguments.  And if MORECORE is contiguous and
4258     this is not first time through, this preserves page-alignment of
4259     previous calls. Otherwise, we correct to page-align below.
4260   */
4261 
4262   size = (size + pagemask) & ~pagemask;
4263 
4264   /*
4265     Don't try to call MORECORE if argument is so big as to appear
4266     negative. Note that since mmap takes size_t arg, it may succeed
4267     below even if we cannot call MORECORE.
4268   */
4269 
4270   if (size > 0)
4271     brk = (char*)(MORECORE(size));
4272 
4273   /*
4274     If have mmap, try using it as a backup when MORECORE fails or
4275     cannot be used. This is worth doing on systems that have "holes" in
4276     address space, so sbrk cannot extend to give contiguous space, but
4277     space is available elsewhere.  Note that we ignore mmap max count
4278     and threshold limits, since the space will not be used as a
4279     segregated mmap region.
4280   */
4281 
4282 #if HAVE_MMAP
4283   if (brk == (char*)(MORECORE_FAILURE)) {
4284 
4285     /* Cannot merge with old top, so add its size back in */
4286     if (contiguous(av))
4287       size = (size + old_size + pagemask) & ~pagemask;
4288 
4289     /* If we are relying on mmap as backup, then use larger units */
4290     if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
4291       size = MMAP_AS_MORECORE_SIZE;
4292 
4293     /* Don't try if size wraps around 0 */
4294     if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
4295 
4296       brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
4297 
4298       if (brk != (char*)(MORECORE_FAILURE)) {
4299 
4300         /* We do not need, and cannot use, another sbrk call to find end */
4301         snd_brk = brk + size;
4302 
4303         /*
4304            Record that we no longer have a contiguous sbrk region.
4305            After the first time mmap is used as backup, we do not
4306            ever rely on contiguous space since this could incorrectly
4307            bridge regions.
4308         */
4309         set_noncontiguous(av);
4310       }
4311     }
4312   }
4313 #endif
4314 
4315   if (brk != (char*)(MORECORE_FAILURE)) {
4316     av->sbrked_mem += size;
4317 
4318     /*
4319       If MORECORE extends previous space, we can likewise extend top size.
4320     */
4321 
4322     if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
4323       set_head(old_top, (size + old_size) | PREV_INUSE);
4324     }
4325 
4326     /*
4327       Otherwise, make adjustments:
4328 
4329       * If the first time through or noncontiguous, we need to call sbrk
4330         just to find out where the end of memory lies.
4331 
4332       * We need to ensure that all returned chunks from malloc will meet
4333         MALLOC_ALIGNMENT
4334 
4335       * If there was an intervening foreign sbrk, we need to adjust sbrk
4336         request size to account for fact that we will not be able to
4337         combine new space with existing space in old_top.
4338 
4339       * Almost all systems internally allocate whole pages at a time, in
4340         which case we might as well use the whole last page of request.
4341         So we allocate enough more memory to hit a page boundary now,
4342         which in turn causes future contiguous calls to page-align.
4343     */
4344 
4345     else {
4346       front_misalign = 0;
4347       end_misalign = 0;
4348       correction = 0;
4349       aligned_brk = brk;
4350 
4351       /*
4352         If MORECORE returns an address lower than we have seen before,
4353         we know it isn't really contiguous.  This and some subsequent
4354         checks help cope with non-conforming MORECORE functions and
4355         the presence of "foreign" calls to MORECORE from outside of
4356         malloc or by other threads.  We cannot guarantee to detect
4357         these in all cases, but cope with the ones we do detect.
4358       */
4359       if (contiguous(av) && old_size != 0 && brk < old_end) {
4360         set_noncontiguous(av);
4361       }
4362 
4363       /* handle contiguous cases */
4364       if (contiguous(av)) {
4365 
4366         /*
4367            We can tolerate forward non-contiguities here (usually due
4368            to foreign calls) but treat them as part of our space for
4369            stats reporting.
4370         */
4371         if (old_size != 0)
4372           av->sbrked_mem += brk - old_end;
4373 
4374         /* Guarantee alignment of first new chunk made from this space */
4375 
4376         front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
4377         if (front_misalign > 0) {
4378 
4379           /*
4380             Skip over some bytes to arrive at an aligned position.
4381             We don't need to specially mark these wasted front bytes.
4382             They will never be accessed anyway because
4383             prev_inuse of av->top (and any chunk created from its start)
4384             is always true after initialization.
4385           */
4386 
4387           correction = MALLOC_ALIGNMENT - front_misalign;
4388           aligned_brk += correction;
4389         }
4390 
4391         /*
4392           If this isn't adjacent to existing space, then we will not
4393           be able to merge with old_top space, so must add to 2nd request.
4394         */
4395 
4396         correction += old_size;
4397 
4398         /* Extend the end address to hit a page boundary */
4399         end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
4400         correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
4401 
4402         assert(correction >= 0);
4403         snd_brk = (char*)(MORECORE(correction));
4404 
4405         if (snd_brk == (char*)(MORECORE_FAILURE)) {
4406           /*
4407             If can't allocate correction, try to at least find out current
4408             brk.  It might be enough to proceed without failing.
4409           */
4410           correction = 0;
4411           snd_brk = (char*)(MORECORE(0));
4412         }
4413         else if (snd_brk < brk) {
4414           /*
4415             If the second call gives noncontiguous space even though
4416             it says it won't, the only course of action is to ignore
4417             results of second call, and conservatively estimate where
4418             the first call left us. Also set noncontiguous, so this
4419             won't happen again, leaving at most one hole.
4420 
4421             Note that this check is intrinsically incomplete.  Because
4422             MORECORE is allowed to give more space than we ask for,
4423             there is no reliable way to detect a noncontiguity
4424             producing a forward gap for the second call.
4425           */
4426           snd_brk = brk + size;
4427           correction = 0;
4428           set_noncontiguous(av);
4429         }
4430 
4431       }
4432 
4433       /* handle non-contiguous cases */
4434       else {
4435         /* MORECORE/mmap must correctly align */
4436         assert(aligned_OK(chunk2mem(brk)));
4437 
4438         /* Find out current end of memory */
4439         if (snd_brk == (char*)(MORECORE_FAILURE)) {
4440           snd_brk = (char*)(MORECORE(0));
4441           av->sbrked_mem += snd_brk - brk - size;
4442         }
4443       }
4444 
4445       /* Adjust top based on results of second sbrk */
4446       if (snd_brk != (char*)(MORECORE_FAILURE)) {
4447         av->top = (mchunkptr)aligned_brk;
4448         set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
4449         av->sbrked_mem += correction;
4450 
4451         /*
4452           If not the first time through, we either have a
4453           gap due to foreign sbrk or a non-contiguous region.  Insert a
4454           double fencepost at old_top to prevent consolidation with space
4455           we don't own. These fenceposts are artificial chunks that are
4456           marked as inuse and are in any case too small to use.  We need
4457           two to make sizes and alignments work out.
4458         */
4459 
4460         if (old_size != 0) {
4461           /*
4462              Shrink old_top to insert fenceposts, keeping size a
4463              multiple of MALLOC_ALIGNMENT. We know there is at least
4464              enough space in old_top to do this.
4465           */
4466           old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
4467           set_head(old_top, old_size | PREV_INUSE);
4468 
4469           /*
4470             Note that the following assignments completely overwrite
4471             old_top when old_size was previously MINSIZE.  This is
4472             intentional. We need the fencepost, even if old_top
4473             otherwise gets lost.
4474           */
4475           chunk_at_offset(old_top, old_size          )->size =
4476             SIZE_SZ|PREV_INUSE;
4477 
4478           chunk_at_offset(old_top, old_size + SIZE_SZ)->size =
4479             SIZE_SZ|PREV_INUSE;
4480 
4481           /*
4482              If possible, release the rest, suppressing trimming.
4483           */
4484           if (old_size >= MINSIZE) {
4485             unsigned int mprops = av->sysctl;
4486             disable_trim(av);
4487             fREe(chunk2mem(old_top));
4488             av->sysctl = mprops;
4489           }
4490         }
4491       }
4492     }
4493 
4494     /* Update statistics */
4495     sum = av->sbrked_mem;
4496     if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
4497       av->max_sbrked_mem = sum;
4498 
4499     sum += av->mmapped_mem;
4500     if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
4501       av->max_total_mem = sum;
4502 
4503 
4504     /* finally, do the allocation */
4505 
4506     p = av->top;
4507     size = chunksize(p);
4508 
4509     /* check that one of the above allocation paths succeeded */
4510     if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
4511       remainder_size = size - nb;
4512       remainder = chunk_at_offset(p, nb);
4513       av->top = remainder;
4514       set_head(p, nb | PREV_INUSE);
4515       set_head(remainder, remainder_size | PREV_INUSE);
4516       check_malloced_chunk(p, nb);
4517       check_malloc_state(av);
4518       return chunk2mem(p);
4519     }
4520 
4521   }
4522 
4523   /* catch all failure paths */
4524   check_malloc_state(av);
4525   MALLOC_FAILURE_ACTION;
4526   return 0;
4527 }
4528 
4529 
4530 /*
4531   systrim is an inverse of sorts to sysmalloc.  It gives memory back
4532   to the system (via negative arguments to sbrk) if there is unused
4533   memory at the `high' end of the malloc pool. It is called
4534   automatically by free() when top space exceeds the trim
4535   threshold. It is also called by the public malloc_trim routine.  It
4536   returns 1 if it actually released any memory, else 0.
4537 */
4538 
systrim(mstate av,size_t pad)4539 static int systrim(mstate av, size_t pad) {
4540   long  top_size;        /* Amount of top-most memory */
4541   long  extra;           /* Amount to release */
4542   long  released;        /* Amount actually released */
4543   char* current_brk;     /* address returned by pre-check sbrk call */
4544   char* new_brk;         /* address returned by post-check sbrk call */
4545   size_t pagesz;
4546 
4547   ensure_initialization(av);
4548 
4549   if (have_fastchunks(av))
4550     malloc_consolidate(av);
4551 
4552   if (!trim_disabled(av)) {
4553 
4554 #ifndef MORECORE_CANNOT_TRIM
4555 
4556     pagesz = av->pagesize;
4557     top_size = chunksize(av->top);
4558 
4559     /* Release in pagesize units, keeping at least one page */
4560     extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
4561 
4562     if (extra > 0) {
4563 
4564       /*
4565         Only proceed if end of memory is where we last set it.
4566         This avoids problems if there were foreign sbrk calls.
4567       */
4568       current_brk = (char*)(MORECORE(0));
4569       if (current_brk == (char*)(av->top) + top_size) {
4570 
4571         /*
4572           Attempt to release memory. We ignore MORECORE return value,
4573           and instead call again to find out where new end of memory is.
4574           This avoids problems if first call releases less than we asked,
4575           of if failure somehow altered brk value. (We could still
4576           encounter problems if it altered brk in some very bad way,
4577           but the only thing we can do is adjust anyway, which will cause
4578           some downstream failure.)
4579         */
4580 
4581         MORECORE(-extra);
4582         new_brk = (char*)(MORECORE(0));
4583 
4584         if (new_brk != (char*)MORECORE_FAILURE) {
4585           released = (long)(current_brk - new_brk);
4586 
4587           if (released != 0) {
4588             /* Success. Adjust top. */
4589             av->sbrked_mem -= released;
4590             set_head(av->top, (top_size - released) | PREV_INUSE);
4591             check_malloc_state(av);
4592             return 1;
4593           }
4594         }
4595       }
4596     }
4597   }
4598 #endif
4599   return 0;
4600 }
4601 
4602 
4603 /*
4604   -------------------- Alternative MORECORE functions --------------------
4605 */
4606 
4607 
4608 /*
4609   General Requirements for MORECORE.
4610 
4611   The MORECORE function must have the following properties:
4612 
4613   If MORECORE_CONTIGUOUS is false:
4614 
4615     * MORECORE must allocate in multiples of pagesize. It will
4616       only be called with arguments that are multiples of pagesize.
4617 
4618     * MORECORE(0) must return an address that is at least
4619       MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
4620 
4621   else (i.e. If MORECORE_CONTIGUOUS is true):
4622 
4623     * Consecutive calls to MORECORE with positive arguments
4624       return increasing addresses, indicating that space has been
4625       contiguously extended.
4626 
4627     * MORECORE need not allocate in multiples of pagesize.
4628       Calls to MORECORE need not have args of multiples of pagesize.
4629 
4630     * MORECORE need not page-align.
4631 
4632   In either case:
4633 
4634     * MORECORE may allocate more memory than requested. (Or even less,
4635       but this will generally result in a malloc failure.)
4636 
4637     * MORECORE must not allocate memory when given argument zero, but
4638       instead return one past the end address of memory from previous
4639       nonzero call. This malloc does NOT call MORECORE(0)
4640       until at least one call with positive arguments is made, so
4641       the initial value returned is not important.
4642 
4643     * Even though consecutive calls to MORECORE need not return contiguous
4644       addresses, it must be OK for malloc'ed chunks to span multiple
4645       regions in those cases where they do happen to be contiguous.
4646 
4647     * MORECORE need not handle negative arguments -- it may instead
4648       just return MORECORE_FAILURE when given negative arguments.
4649       Negative arguments are always multiples of pagesize. MORECORE
4650       must not misinterpret negative args as large positive unsigned
4651       args. You can suppress all such calls from even occurring by defining
4652       MORECORE_CANNOT_TRIM,
4653 
4654   There is some variation across systems about the type of the
4655   argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
4656   actually be size_t, because sbrk supports negative args, so it is
4657   normally the signed type of the same width as size_t (sometimes
4658   declared as "intptr_t", and sometimes "ptrdiff_t").  It doesn't much
4659   matter though. Internally, we use "long" as arguments, which should
4660   work across all reasonable possibilities.
4661 
4662   Additionally, if MORECORE ever returns failure for a positive
4663   request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
4664   system allocator. This is a useful backup strategy for systems with
4665   holes in address spaces -- in this case sbrk cannot contiguously
4666   expand the heap, but mmap may be able to map noncontiguous space.
4667 
4668   If you'd like mmap to ALWAYS be used, you can define MORECORE to be
4669   a function that always returns MORECORE_FAILURE.
4670 
4671   Malloc only has limited ability to detect failures of MORECORE
4672   to supply contiguous space when it says it can. In particular,
4673   multithreaded programs that do not use locks may result in
4674   rece conditions across calls to MORECORE that result in gaps
4675   that cannot be detected as such, and subsequent corruption.
4676 
4677   If you are using this malloc with something other than sbrk (or its
4678   emulation) to supply memory regions, you probably want to set
4679   MORECORE_CONTIGUOUS as false.  As an example, here is a custom
4680   allocator kindly contributed for pre-OSX macOS.  It uses virtually
4681   but not necessarily physically contiguous non-paged memory (locked
4682   in, present and won't get swapped out).  You can use it by
4683   uncommenting this section, adding some #includes, and setting up the
4684   appropriate defines above:
4685 
4686       #define MORECORE osMoreCore
4687       #define MORECORE_CONTIGUOUS 0
4688 
4689   There is also a shutdown routine that should somehow be called for
4690   cleanup upon program exit.
4691 
4692   #define MAX_POOL_ENTRIES 100
4693   #define MINIMUM_MORECORE_SIZE  (64 * 1024)
4694   static int next_os_pool;
4695   void *our_os_pools[MAX_POOL_ENTRIES];
4696 
4697   void *osMoreCore(int size)
4698   {
4699     void *ptr = 0;
4700     static void *sbrk_top = 0;
4701 
4702     if (size > 0)
4703     {
4704       if (size < MINIMUM_MORECORE_SIZE)
4705          size = MINIMUM_MORECORE_SIZE;
4706       if (CurrentExecutionLevel() == kTaskLevel)
4707          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4708       if (ptr == 0)
4709       {
4710         return (void *) MORECORE_FAILURE;
4711       }
4712       // save ptrs so they can be freed during cleanup
4713       our_os_pools[next_os_pool] = ptr;
4714       next_os_pool++;
4715       ptr = (void *) ((((CHUNK_SIZE_T) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4716       sbrk_top = (char *) ptr + size;
4717       return ptr;
4718     }
4719     else if (size < 0)
4720     {
4721       // we don't currently support shrink behavior
4722       return (void *) MORECORE_FAILURE;
4723     }
4724     else
4725     {
4726       return sbrk_top;
4727     }
4728   }
4729 
4730   // cleanup any allocated memory pools
4731   // called as last thing before shutting down driver
4732 
4733   void osCleanupMem(void)
4734   {
4735     void **ptr;
4736 
4737     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4738       if (*ptr)
4739       {
4740          PoolDeallocate(*ptr);
4741          *ptr = 0;
4742       }
4743   }
4744 
4745 */
4746 
4747 
4748 /*
4749   --------------------------------------------------------------
4750 
4751   Emulation of sbrk for win32.
4752   Donated by J. Walter <Walter@GeNeSys-e.de>.
4753   For additional information about this code, and malloc on Win32, see
4754      http://www.genesys-e.de/jwalter/
4755 */
4756 
4757 
4758 #ifdef WIN32
4759 
4760 #ifdef _DEBUG
4761 /* #define TRACE */
4762 #endif
4763 
4764 /* Support for USE_MALLOC_LOCK */
4765 #ifdef USE_MALLOC_LOCK
4766 
4767 /* Wait for spin lock */
slwait(int * sl)4768 static int slwait (int *sl) {
4769     while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0)
4770 	    Sleep (0);
4771     return 0;
4772 }
4773 
4774 /* Release spin lock */
slrelease(int * sl)4775 static int slrelease (int *sl) {
4776     InterlockedExchange (sl, 0);
4777     return 0;
4778 }
4779 
4780 #ifdef NEEDED
4781 /* Spin lock for emulation code */
4782 static int g_sl;
4783 #endif
4784 
4785 #endif /* USE_MALLOC_LOCK */
4786 
4787 /* getpagesize for windows */
getpagesize(void)4788 static long getpagesize (void) {
4789     static long g_pagesize = 0;
4790     if (! g_pagesize) {
4791         SYSTEM_INFO system_info;
4792         GetSystemInfo (&system_info);
4793         g_pagesize = system_info.dwPageSize;
4794     }
4795     return g_pagesize;
4796 }
getregionsize(void)4797 static long getregionsize (void) {
4798     static long g_regionsize = 0;
4799     if (! g_regionsize) {
4800         SYSTEM_INFO system_info;
4801         GetSystemInfo (&system_info);
4802         g_regionsize = system_info.dwAllocationGranularity;
4803     }
4804     return g_regionsize;
4805 }
4806 
4807 /* A region list entry */
4808 typedef struct _region_list_entry {
4809     void *top_allocated;
4810     void *top_committed;
4811     void *top_reserved;
4812     long reserve_size;
4813     struct _region_list_entry *previous;
4814 } region_list_entry;
4815 
4816 /* Allocate and link a region entry in the region list */
region_list_append(region_list_entry ** last,void * base_reserved,long reserve_size)4817 static int region_list_append (region_list_entry **last, void *base_reserved, long reserve_size) {
4818     region_list_entry *next = HeapAlloc (GetProcessHeap (), 0, sizeof (region_list_entry));
4819     if (! next)
4820         return FALSE;
4821     next->top_allocated = (char *) base_reserved;
4822     next->top_committed = (char *) base_reserved;
4823     next->top_reserved = (char *) base_reserved + reserve_size;
4824     next->reserve_size = reserve_size;
4825     next->previous = *last;
4826     *last = next;
4827     return TRUE;
4828 }
4829 /* Free and unlink the last region entry from the region list */
region_list_remove(region_list_entry ** last)4830 static int region_list_remove (region_list_entry **last) {
4831     region_list_entry *previous = (*last)->previous;
4832     if (! HeapFree (GetProcessHeap (), sizeof (region_list_entry), *last))
4833         return FALSE;
4834     *last = previous;
4835     return TRUE;
4836 }
4837 
4838 #define CEIL(size,to)	(((size)+(to)-1)&~((to)-1))
4839 #define FLOOR(size,to)	((size)&~((to)-1))
4840 
4841 #define SBRK_SCALE  0
4842 /* #define SBRK_SCALE  1 */
4843 /* #define SBRK_SCALE  2 */
4844 /* #define SBRK_SCALE  4  */
4845 
4846 /* sbrk for windows */
sbrk(long size)4847 static void *sbrk (long size) {
4848     static long g_pagesize, g_my_pagesize;
4849     static long g_regionsize, g_my_regionsize;
4850     static region_list_entry *g_last;
4851     void *result = (void *) MORECORE_FAILURE;
4852 #ifdef TRACE
4853     printf ("sbrk %d\n", size);
4854 #endif
4855 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
4856     /* Wait for spin lock */
4857     slwait (&g_sl);
4858 #endif
4859     /* First time initialization */
4860     if (! g_pagesize) {
4861         g_pagesize = getpagesize ();
4862         g_my_pagesize = g_pagesize << SBRK_SCALE;
4863     }
4864     if (! g_regionsize) {
4865         g_regionsize = getregionsize ();
4866         g_my_regionsize = g_regionsize << SBRK_SCALE;
4867     }
4868     if (! g_last) {
4869         if (! region_list_append (&g_last, 0, 0))
4870            goto sbrk_exit;
4871     }
4872     /* Assert invariants */
4873     assert (g_last);
4874     assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
4875             g_last->top_allocated <= g_last->top_committed);
4876     assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
4877             g_last->top_committed <= g_last->top_reserved &&
4878             (unsigned) g_last->top_committed % g_pagesize == 0);
4879     assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
4880     assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
4881     /* Allocation requested? */
4882     if (size >= 0) {
4883         /* Allocation size is the requested size */
4884         long allocate_size = size;
4885         /* Compute the size to commit */
4886         long to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
4887         /* Do we reach the commit limit? */
4888         if (to_commit > 0) {
4889             /* Round size to commit */
4890             long commit_size = CEIL (to_commit, g_my_pagesize);
4891             /* Compute the size to reserve */
4892             long to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved;
4893             /* Do we reach the reserve limit? */
4894             if (to_reserve > 0) {
4895                 /* Compute the remaining size to commit in the current region */
4896                 long remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed;
4897                 if (remaining_commit_size > 0) {
4898                     /* Assert preconditions */
4899                     assert ((unsigned) g_last->top_committed % g_pagesize == 0);
4900                     assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); {
4901                         /* Commit this */
4902                         void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size,
4903 							                                 MEM_COMMIT, PAGE_READWRITE);
4904                         /* Check returned pointer for consistency */
4905                         if (base_committed != g_last->top_committed)
4906                             goto sbrk_exit;
4907                         /* Assert postconditions */
4908                         assert ((unsigned) base_committed % g_pagesize == 0);
4909 #ifdef TRACE
4910                         printf ("Commit %p %d\n", base_committed, remaining_commit_size);
4911 #endif
4912                         /* Adjust the regions commit top */
4913                         g_last->top_committed = (char *) base_committed + remaining_commit_size;
4914                     }
4915                 } {
4916                     /* Now we are going to search and reserve. */
4917                     int contiguous = -1;
4918                     int found = FALSE;
4919                     MEMORY_BASIC_INFORMATION memory_info;
4920                     void *base_reserved;
4921                     long reserve_size;
4922                     do {
4923                         /* Assume contiguous memory */
4924                         contiguous = TRUE;
4925                         /* Round size to reserve */
4926                         reserve_size = CEIL (to_reserve, g_my_regionsize);
4927                         /* Start with the current region's top */
4928                         memory_info.BaseAddress = g_last->top_reserved;
4929                         /* Assert preconditions */
4930                         assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
4931                         assert (0 < reserve_size && reserve_size % g_regionsize == 0);
4932                         while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
4933                             /* Assert postconditions */
4934                             assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
4935 #ifdef TRACE
4936                             printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize,
4937                                     memory_info.State == MEM_FREE ? "FREE":
4938                                     (memory_info.State == MEM_RESERVE ? "RESERVED":
4939                                      (memory_info.State == MEM_COMMIT ? "COMMITTED": "?")));
4940 #endif
4941                             /* Region is free, well aligned and big enough: we are done */
4942                             if (memory_info.State == MEM_FREE &&
4943                                 (unsigned) memory_info.BaseAddress % g_regionsize == 0 &&
4944                                 memory_info.RegionSize >= (unsigned) reserve_size) {
4945                                 found = TRUE;
4946                                 break;
4947                             }
4948                             /* From now on we can't get contiguous memory! */
4949                             contiguous = FALSE;
4950                             /* Recompute size to reserve */
4951                             reserve_size = CEIL (allocate_size, g_my_regionsize);
4952                             memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
4953                             /* Assert preconditions */
4954                             assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
4955                             assert (0 < reserve_size && reserve_size % g_regionsize == 0);
4956                         }
4957                         /* Search failed? */
4958                         if (! found)
4959                             goto sbrk_exit;
4960                         /* Assert preconditions */
4961                         assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0);
4962                         assert (0 < reserve_size && reserve_size % g_regionsize == 0);
4963                         /* Try to reserve this */
4964                         base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size,
4965 					                                  MEM_RESERVE, PAGE_NOACCESS);
4966                         if (! base_reserved) {
4967                             int rc = GetLastError ();
4968                             if (rc != ERROR_INVALID_ADDRESS)
4969                                 goto sbrk_exit;
4970                         }
4971                         /* A null pointer signals (hopefully) a race condition with another thread. */
4972                         /* In this case, we try again. */
4973                     } while (! base_reserved);
4974                     /* Check returned pointer for consistency */
4975                     if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress)
4976                         goto sbrk_exit;
4977                     /* Assert postconditions */
4978                     assert ((unsigned) base_reserved % g_regionsize == 0);
4979 #ifdef TRACE
4980                     printf ("Reserve %p %d\n", base_reserved, reserve_size);
4981 #endif
4982                     /* Did we get contiguous memory? */
4983                     if (contiguous) {
4984                         long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated;
4985                         /* Adjust allocation size */
4986                         allocate_size -= start_size;
4987                         /* Adjust the regions allocation top */
4988                         g_last->top_allocated = g_last->top_committed;
4989                         /* Recompute the size to commit */
4990                         to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
4991                         /* Round size to commit */
4992                         commit_size = CEIL (to_commit, g_my_pagesize);
4993                     }
4994                     /* Append the new region to the list */
4995                     if (! region_list_append (&g_last, base_reserved, reserve_size))
4996                         goto sbrk_exit;
4997                     /* Didn't we get contiguous memory? */
4998                     if (! contiguous) {
4999                         /* Recompute the size to commit */
5000                         to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
5001                         /* Round size to commit */
5002                         commit_size = CEIL (to_commit, g_my_pagesize);
5003                     }
5004                 }
5005             }
5006             /* Assert preconditions */
5007             assert ((unsigned) g_last->top_committed % g_pagesize == 0);
5008             assert (0 < commit_size && commit_size % g_pagesize == 0); {
5009                 /* Commit this */
5010                 void *base_committed = VirtualAlloc (g_last->top_committed, commit_size,
5011 				    			                     MEM_COMMIT, PAGE_READWRITE);
5012                 /* Check returned pointer for consistency */
5013                 if (base_committed != g_last->top_committed)
5014                     goto sbrk_exit;
5015                 /* Assert postconditions */
5016                 assert ((unsigned) base_committed % g_pagesize == 0);
5017 #ifdef TRACE
5018                 printf ("Commit %p %d\n", base_committed, commit_size);
5019 #endif
5020                 /* Adjust the regions commit top */
5021                 g_last->top_committed = (char *) base_committed + commit_size;
5022             }
5023         }
5024         /* Adjust the regions allocation top */
5025         g_last->top_allocated = (char *) g_last->top_allocated + allocate_size;
5026         result = (char *) g_last->top_allocated - size;
5027     /* Deallocation requested? */
5028     } else if (size < 0) {
5029         long deallocate_size = - size;
5030         /* As long as we have a region to release */
5031         while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) {
5032             /* Get the size to release */
5033             long release_size = g_last->reserve_size;
5034             /* Get the base address */
5035             void *base_reserved = (char *) g_last->top_reserved - release_size;
5036             /* Assert preconditions */
5037             assert ((unsigned) base_reserved % g_regionsize == 0);
5038             assert (0 < release_size && release_size % g_regionsize == 0); {
5039                 /* Release this */
5040                 int rc = VirtualFree (base_reserved, 0,
5041                                       MEM_RELEASE);
5042                 /* Check returned code for consistency */
5043                 if (! rc)
5044                     goto sbrk_exit;
5045 #ifdef TRACE
5046                 printf ("Release %p %d\n", base_reserved, release_size);
5047 #endif
5048             }
5049             /* Adjust deallocation size */
5050             deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved;
5051             /* Remove the old region from the list */
5052             if (! region_list_remove (&g_last))
5053                 goto sbrk_exit;
5054         } {
5055             /* Compute the size to decommit */
5056             long to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size);
5057             if (to_decommit >= g_my_pagesize) {
5058                 /* Compute the size to decommit */
5059                 long decommit_size = FLOOR (to_decommit, g_my_pagesize);
5060                 /*  Compute the base address */
5061                 void *base_committed = (char *) g_last->top_committed - decommit_size;
5062                 /* Assert preconditions */
5063                 assert ((unsigned) base_committed % g_pagesize == 0);
5064                 assert (0 < decommit_size && decommit_size % g_pagesize == 0); {
5065                     /* Decommit this */
5066                     int rc = VirtualFree ((char *) base_committed, decommit_size,
5067                                           MEM_DECOMMIT);
5068                     /* Check returned code for consistency */
5069                     if (! rc)
5070                         goto sbrk_exit;
5071 #ifdef TRACE
5072                     printf ("Decommit %p %d\n", base_committed, decommit_size);
5073 #endif
5074                 }
5075                 /* Adjust deallocation size and regions commit and allocate top */
5076                 deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed;
5077                 g_last->top_committed = base_committed;
5078                 g_last->top_allocated = base_committed;
5079             }
5080         }
5081         /* Adjust regions allocate top */
5082         g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size;
5083         /* Check for underflow */
5084         if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated ||
5085             g_last->top_allocated > g_last->top_committed) {
5086             /* Adjust regions allocate top */
5087             g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size;
5088             goto sbrk_exit;
5089         }
5090         result = g_last->top_allocated;
5091     }
5092     /* Assert invariants */
5093     assert (g_last);
5094     assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
5095             g_last->top_allocated <= g_last->top_committed);
5096     assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
5097             g_last->top_committed <= g_last->top_reserved &&
5098             (unsigned) g_last->top_committed % g_pagesize == 0);
5099     assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
5100     assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
5101 
5102 sbrk_exit:
5103 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5104     /* Release spin lock */
5105     slrelease (&g_sl);
5106 #endif
5107     return result;
5108 }
5109 
5110 /* mmap for windows */
mmap(void * ptr,long size,long prot,long type,long handle,long arg)5111 static void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) {
5112     static long g_pagesize;
5113     static long g_regionsize;
5114 #ifdef TRACE
5115     printf ("mmap %d\n", size);
5116 #endif
5117 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5118     /* Wait for spin lock */
5119     slwait (&g_sl);
5120 #endif
5121     /* First time initialization */
5122     if (! g_pagesize)
5123         g_pagesize = getpagesize ();
5124     if (! g_regionsize)
5125         g_regionsize = getregionsize ();
5126     /* Assert preconditions */
5127     assert ((unsigned) ptr % g_regionsize == 0);
5128     assert (size % g_pagesize == 0);
5129     /* Allocate this */
5130     ptr = VirtualAlloc (ptr, size,
5131 					    MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE);
5132     if (! ptr) {
5133         ptr = (void *) MORECORE_FAILURE;
5134         goto mmap_exit;
5135     }
5136     /* Assert postconditions */
5137     assert ((unsigned) ptr % g_regionsize == 0);
5138 #ifdef TRACE
5139     printf ("Commit %p %d\n", ptr, size);
5140 #endif
5141 mmap_exit:
5142 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5143     /* Release spin lock */
5144     slrelease (&g_sl);
5145 #endif
5146     return ptr;
5147 }
5148 
5149 /* munmap for windows */
munmap(void * ptr,long size)5150 static long munmap (void *ptr, long size) {
5151     static long g_pagesize;
5152     static long g_regionsize;
5153     int rc = MUNMAP_FAILURE;
5154 #ifdef TRACE
5155     printf ("munmap %p %d\n", ptr, size);
5156 #endif
5157 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5158     /* Wait for spin lock */
5159     slwait (&g_sl);
5160 #endif
5161     /* First time initialization */
5162     if (! g_pagesize)
5163         g_pagesize = getpagesize ();
5164     if (! g_regionsize)
5165         g_regionsize = getregionsize ();
5166     /* Assert preconditions */
5167     assert ((unsigned) ptr % g_regionsize == 0);
5168     assert (size % g_pagesize == 0);
5169     /* Free this */
5170     if (! VirtualFree (ptr, 0,
5171                        MEM_RELEASE))
5172         goto munmap_exit;
5173     rc = 0;
5174 #ifdef TRACE
5175     printf ("Release %p %d\n", ptr, size);
5176 #endif
5177 munmap_exit:
5178 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5179     /* Release spin lock */
5180     slrelease (&g_sl);
5181 #endif
5182     return rc;
5183 }
5184 
vminfo(CHUNK_SIZE_T * free,CHUNK_SIZE_T * reserved,CHUNK_SIZE_T * committed)5185 static void vminfo (CHUNK_SIZE_T  *free, CHUNK_SIZE_T  *reserved, CHUNK_SIZE_T  *committed) {
5186     MEMORY_BASIC_INFORMATION memory_info;
5187     memory_info.BaseAddress = 0;
5188     *free = *reserved = *committed = 0;
5189     while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
5190         switch (memory_info.State) {
5191         case MEM_FREE:
5192             *free += memory_info.RegionSize;
5193             break;
5194         case MEM_RESERVE:
5195             *reserved += memory_info.RegionSize;
5196             break;
5197         case MEM_COMMIT:
5198             *committed += memory_info.RegionSize;
5199             break;
5200         }
5201         memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
5202     }
5203 }
5204 
cpuinfo(int whole,CHUNK_SIZE_T * kernel,CHUNK_SIZE_T * user)5205 static int cpuinfo (int whole, CHUNK_SIZE_T  *kernel, CHUNK_SIZE_T  *user) {
5206     if (whole) {
5207         __int64 creation64, exit64, kernel64, user64;
5208         int rc = GetProcessTimes (GetCurrentProcess (),
5209                                   (FILETIME *) &creation64,
5210                                   (FILETIME *) &exit64,
5211                                   (FILETIME *) &kernel64,
5212                                   (FILETIME *) &user64);
5213         if (! rc) {
5214             *kernel = 0;
5215             *user = 0;
5216             return FALSE;
5217         }
5218         *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
5219         *user = (CHUNK_SIZE_T) (user64 / 10000);
5220         return TRUE;
5221     } else {
5222         __int64 creation64, exit64, kernel64, user64;
5223         int rc = GetThreadTimes (GetCurrentThread (),
5224                                  (FILETIME *) &creation64,
5225                                  (FILETIME *) &exit64,
5226                                  (FILETIME *) &kernel64,
5227                                  (FILETIME *) &user64);
5228         if (! rc) {
5229             *kernel = 0;
5230             *user = 0;
5231             return FALSE;
5232         }
5233         *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
5234         *user = (CHUNK_SIZE_T) (user64 / 10000);
5235         return TRUE;
5236     }
5237 }
5238 
5239 #endif /* WIN32 */
5240 
5241 /* ------------------------------------------------------------
5242 History:
5243     V2.8.0 (not yet released)
5244       * Use trees for non-small bins
5245          Also requiring different size->bin algorithm
5246 
5247     V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
5248       * Allow tuning of FIRST_SORTED_BIN_SIZE
5249       * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
5250       * Better detection and support for non-contiguousness of MORECORE.
5251         Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5252       * Bypass most of malloc if no frees. Thanks To Emery Berger.
5253       * Fix freeing of old top non-contiguous chunk im sysmalloc.
5254       * Raised default trim and map thresholds to 256K.
5255       * Fix mmap-related #defines. Thanks to Lubos Lunak.
5256       * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5257       * Branch-free bin calculation
5258       * Default trim and mmap thresholds now 256K.
5259 
5260     V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
5261       * Introduce independent_comalloc and independent_calloc.
5262         Thanks to Michael Pachos for motivation and help.
5263       * Make optional .h file available
5264       * Allow > 2GB requests on 32bit systems.
5265       * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5266         Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5267         and Anonymous.
5268       * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5269         helping test this.)
5270       * memalign: check alignment arg
5271       * realloc: don't try to shift chunks backwards, since this
5272         leads to  more fragmentation in some programs and doesn't
5273         seem to help in any others.
5274       * Collect all cases in malloc requiring system memory into sysmalloc
5275       * Use mmap as backup to sbrk
5276       * Place all internal state in malloc_state
5277       * Introduce fastbins (although similar to 2.5.1)
5278       * Many minor tunings and cosmetic improvements
5279       * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5280       * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5281         Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5282       * Include errno.h to support default failure action.
5283 
5284     V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
5285       * return null for negative arguments
5286       * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5287          * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5288           (e.g. WIN32 platforms)
5289          * Cleanup header file inclusion for WIN32 platforms
5290          * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5291          * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5292            memory allocation routines
5293          * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5294          * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5295            usage of 'assert' in non-WIN32 code
5296          * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5297            avoid infinite loop
5298       * Always call 'fREe()' rather than 'free()'
5299 
5300     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
5301       * Fixed ordering problem with boundary-stamping
5302 
5303     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
5304       * Added pvalloc, as recommended by H.J. Liu
5305       * Added 64bit pointer support mainly from Wolfram Gloger
5306       * Added anonymously donated WIN32 sbrk emulation
5307       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5308       * malloc_extend_top: fix mask error that caused wastage after
5309         foreign sbrks
5310       * Add linux mremap support code from HJ Liu
5311 
5312     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
5313       * Integrated most documentation with the code.
5314       * Add support for mmap, with help from
5315         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5316       * Use last_remainder in more cases.
5317       * Pack bins using idea from  colin@nyx10.cs.du.edu
5318       * Use ordered bins instead of best-fit threshhold
5319       * Eliminate block-local decls to simplify tracing and debugging.
5320       * Support another case of realloc via move into top
5321       * Fix error occuring when initial sbrk_base not word-aligned.
5322       * Rely on page size for units instead of SBRK_UNIT to
5323         avoid surprises about sbrk alignment conventions.
5324       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5325         (raymond@es.ele.tue.nl) for the suggestion.
5326       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5327       * More precautions for cases where other routines call sbrk,
5328         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5329       * Added macros etc., allowing use in linux libc from
5330         H.J. Lu (hjl@gnu.ai.mit.edu)
5331       * Inverted this history list
5332 
5333     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
5334       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5335       * Removed all preallocation code since under current scheme
5336         the work required to undo bad preallocations exceeds
5337         the work saved in good cases for most test programs.
5338       * No longer use return list or unconsolidated bins since
5339         no scheme using them consistently outperforms those that don't
5340         given above changes.
5341       * Use best fit for very large chunks to prevent some worst-cases.
5342       * Added some support for debugging
5343 
5344     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
5345       * Removed footers when chunks are in use. Thanks to
5346         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5347 
5348     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
5349       * Added malloc_trim, with help from Wolfram Gloger
5350         (wmglo@Dent.MED.Uni-Muenchen.DE).
5351 
5352     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
5353 
5354     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
5355       * realloc: try to expand in both directions
5356       * malloc: swap order of clean-bin strategy;
5357       * realloc: only conditionally expand backwards
5358       * Try not to scavenge used bins
5359       * Use bin counts as a guide to preallocation
5360       * Occasionally bin return list chunks in first scan
5361       * Add a few optimizations from colin@nyx10.cs.du.edu
5362 
5363     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
5364       * faster bin computation & slightly different binning
5365       * merged all consolidations to one part of malloc proper
5366          (eliminating old malloc_find_space & malloc_clean_bin)
5367       * Scan 2 returns chunks (not just 1)
5368       * Propagate failure in realloc if malloc returns 0
5369       * Add stuff to allow compilation on non-ANSI compilers
5370           from kpv@research.att.com
5371 
5372     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
5373       * removed potential for odd address access in prev_chunk
5374       * removed dependency on getpagesize.h
5375       * misc cosmetics and a bit more internal documentation
5376       * anticosmetics: mangled names in macros to evade debugger strangeness
5377       * tested on sparc, hp-700, dec-mips, rs6000
5378           with gcc & native cc (hp, dec only) allowing
5379           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5380 
5381     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
5382       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5383          structure of old version,  but most details differ.)
5384 
5385 */
5386 
5387 
5388