1 /* malloc.c - dynamic memory allocation for bash. */
2 
3 /*  Copyright (C) 1985-2020 Free Software Foundation, Inc.
4 
5     This file is part of GNU Bash, the Bourne-Again SHell.
6 
7    Bash is free software: you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation, either version 3 of the License, or
10    (at your option) any later version.
11 
12    Bash is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with Bash.  If not, see <http://www.gnu.org/licenses/>.
19 */
20 
21 /*
22  * @(#)nmalloc.c 1 (Caltech) 2/21/82
23  *
24  *	U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
25  *
26  *	Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
27  *
28  * [VERY] old explanation:
29  *
30  * This is a very fast storage allocator.  It allocates blocks of a small
31  * number of different sizes, and keeps free lists of each size.  Blocks
32  * that don't exactly fit are passed up to the next larger size.  In this
33  * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34  * This is designed for use in a program that uses vast quantities of
35  * memory, but bombs when it runs out.  To make it a little better, it
36  * warns the user when he starts to get near the end.
37  *
38  * June 84, ACT: modified rcheck code to check the range given to malloc,
39  * rather than the range determined by the 2-power used.
40  *
41  * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42  * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43  * You should call malloc_init to reinitialize after loading dumped Emacs.
44  * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
45  * realloc knows how to return same block given, just changing its size,
46  * if the power of 2 is correct.
47  */
48 
49 /*
50  * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
51  * smallest allocatable block is 8 bytes.  The overhead information will
52  * go in the first int of the block, and the returned pointer will point
53  * to the second.
54  */
55 
56 /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
57    uncover callers that refer to freed memory, and to have malloc() write 0xdf
58    into memory as it's allocated to avoid referring to previous contents. */
59 
60 /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
61    handled by configure. */
62 
63 #if defined (HAVE_CONFIG_H)
64 #  include <config.h>
65 #endif /* HAVE_CONFIG_H */
66 
67 #if defined (SHELL)
68 #  include "bashtypes.h"
69 #  include "stdc.h"
70 #else
71 #  include <sys/types.h>
72 #endif
73 
74 #if defined (HAVE_UNISTD_H)
75 #  include <unistd.h>
76 #endif
77 
78 /* Determine which kind of system this is.  */
79 #include <signal.h>
80 
81 #if defined (HAVE_STRING_H)
82 #  include <string.h>
83 #else
84 #  include <strings.h>
85 #endif
86 #include <errno.h>
87 #include <stdio.h>
88 
89 #if !defined (botch)
90 #include <stdlib.h>
91 #endif
92 
93 #if defined (HAVE_MMAP)
94 #include <sys/mman.h>
95 #endif
96 
97 /* Define getpagesize () if the system does not.  */
98 #ifndef HAVE_GETPAGESIZE
99 #  include "getpagesize.h"
100 #endif
101 
102 #include "imalloc.h"
103 #ifdef MALLOC_STATS
104 #  include "mstats.h"
105 #endif
106 #ifdef MALLOC_REGISTER
107 #  include "table.h"
108 #endif
109 #ifdef MALLOC_WATCH
110 #  include "watch.h"
111 #endif
112 
113 #ifdef powerof2
114 #  undef powerof2
115 #endif
116 /* Could also use (((x) & -(x)) == (x)) */
117 #define powerof2(x)	((((x) - 1) & (x)) == 0)
118 
119 /* System-specific omissions. */
120 #ifdef HPUX
121 #  define NO_VALLOC
122 #endif
123 
124 /* SIZEOF_LONG * 4 - 2, usable bins from 1..NBUCKETS-1 */
125 #define NBUCKETS	30
126 
127 #define ISALLOC ((char) 0xf7)	/* magic byte that implies allocation */
128 #define ISFREE ((char) 0x54)	/* magic byte that implies free block */
129 				/* this is for error checking only */
130 #define ISMEMALIGN ((char) 0xd6)  /* Stored before the value returned by
131 				     memalign, with the rest of the word
132 				     being the distance to the true
133 				     beginning of the block.  */
134 
135 
136 /* We have a flag indicating whether memory is allocated, an index in
137    nextf[], a size field, and a sentinel value to determine whether or
138    not a caller wrote before the start of allocated memory; to realloc()
139    memory we either copy mh_nbytes or just change mh_nbytes if there is
140    enough room in the block for the new size.  Range checking is always
141    done. */
142 union mhead {
143 #if SIZEOF_CHAR_P == 8
144   bits64_t mh_align[2];						/* 16 */
145 #else
146   bits64_t mh_align;						/* 8 */
147 #endif
148   struct {
149     char mi_alloc; 		/* ISALLOC or ISFREE */		/* 1 */
150     char mi_index;		/* index in nextf[] */		/* 1 */
151     /* Remainder are valid only when block is allocated */
152     u_bits16_t mi_magic2;	/* should be == MAGIC2 */	/* 2 */
153     u_bits32_t mi_nbytes;	/* # of bytes allocated */	/* 4 */
154 #if SIZEOF_CHAR_P == 8
155     char mi_magic8[8];		/* MAGIC1 guard bytes */	/* 8 */
156 #endif
157   } minfo;
158 };
159 #define mh_alloc	minfo.mi_alloc
160 #define mh_index	minfo.mi_index
161 #define mh_nbytes	minfo.mi_nbytes
162 #define mh_magic2	minfo.mi_magic2
163 #define mh_magic8	minfo.mi_magic8
164 
165 #define MOVERHEAD	sizeof(union mhead)
166 
167 #if SIZEOF_CHAR_P == 8
168 #define MALIGN_MASK	15
169 #else
170 #define MALIGN_MASK	7	/* one less than desired alignment */
171 #endif
172 
173 typedef union _malloc_guard {
174   char s[4];
175   u_bits32_t i;
176 } mguard_t;
177 
178 /* Access free-list pointer of a block.
179    It is stored at block + sizeof (char *).
180    This is not a field in the minfo structure member of union mhead
181    because we want sizeof (union mhead)
182    to describe the overhead for when the block is in use,
183    and we do not want the free-list pointer to count in that.  */
184 
185 /* If SIZEOF_CHAR_P == 8, this goes into the mh_magic8 buffer at the end of
186    the rest of the struct. This may need adjusting. */
187 #define CHAIN(a) \
188   (*(union mhead **) (sizeof (char *) + (char *) (a)))
189 
190 /* To implement range checking, we write magic values in at the beginning
191    and end of each allocated block, and make sure they are undisturbed
192    whenever a free or a realloc occurs. */
193 
194 /* Written in the bytes before the block's real space (-SIZEOF_CHAR_P bytes) */
195 #define MAGIC1 0x55
196 #define MAGIC2 0x5555
197 #define MSLOP  4		/* 4 bytes extra for u_bits32_t size */
198 
199 /* How many bytes are actually allocated for a request of size N --
200    rounded up to nearest multiple of 2*SIZEOF_CHAR_P after accounting for
201    malloc overhead. */
202 #define ALLOCATED_BYTES(n) \
203 	(((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
204 
205 #define ASSERT(p) \
206   do \
207     { \
208       if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, CPP_STRING(p), file, line); \
209     } \
210   while (0)
211 
212 /* Minimum and maximum bucket indices for block splitting (and to bound
213    the search for a block to split). */
214 #define SPLIT_MIN	2	/* XXX - was 3 */
215 #define SPLIT_MID	11
216 #define SPLIT_MAX	14
217 
218 /* Minimum and maximum bucket indices for block coalescing. */
219 #define COMBINE_MIN	2
220 #define COMBINE_MAX	(pagebucket - 1)	/* XXX */
221 
222 #define LESSCORE_MIN	10
223 #define LESSCORE_FRC	13
224 
225 #define STARTBUCK	1
226 
227 /* Should we use mmap for large allocations? */
228 #if defined (HAVE_MMAP)
229 #  if defined (MAP_ANON) && !defined (MAP_ANONYMOUS)
230 #    define MAP_ANONYMOUS MAP_ANON
231 #  endif
232 #endif
233 
234 #if defined (HAVE_MMAP) && defined (MAP_ANONYMOUS)
235 #  define USE_MMAP
236 #endif
237 
238 #if defined (USE_MMAP)
239 #  define MMAP_THRESHOLD	14	/* must be >= SPLIT_MAX, COMBINE_MAX */
240 #else
241 #  define MMAP_THRESHOLD	(8 * SIZEOF_LONG)
242 #endif
243 
244 /* Flags for the internal functions. */
245 #define MALLOC_WRAPPER	0x01	/* wrapper function */
246 #define MALLOC_INTERNAL	0x02	/* internal function calling another */
247 #define MALLOC_NOTRACE	0x04	/* don't trace this allocation or free */
248 #define MALLOC_NOREG	0x08	/* don't register this allocation or free */
249 
250 /* Future use. */
251 #define ERR_DUPFREE		0x01
252 #define ERR_UNALLOC		0x02
253 #define ERR_UNDERFLOW		0x04
254 #define ERR_ASSERT_FAILED	0x08
255 
256 /* Evaluates to true if NB is appropriate for bucket NU.  NB is adjusted
257    appropriately by the caller to account for malloc overhead.  This only
258    checks that the recorded size is not too big for the bucket.  We
259    can't check whether or not it's in between NU and NU-1 because we
260    might have encountered a busy bucket when allocating and moved up to
261    the next size. */
262 #define IN_BUCKET(nb, nu)	((nb) <= binsizes[(nu)])
263 
264 /* Use this when we want to be sure that NB is in bucket NU. */
265 #define RIGHT_BUCKET(nb, nu) \
266 	(((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
267 
268 /* nextf[i] is free list of blocks of size 2**(i + 3)  */
269 
270 static union mhead *nextf[NBUCKETS];
271 
272 /* busy[i] is nonzero while allocation or free of block size i is in progress. */
273 
274 static char busy[NBUCKETS];
275 
276 static int pagesz;	/* system page size. */
277 static int pagebucket;	/* bucket for requests a page in size */
278 static int maxbuck;	/* highest bucket receiving allocation request. */
279 
280 static char *memtop;	/* top of heap */
281 
282 static const unsigned long binsizes[NBUCKETS] = {
283 	8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
284 	8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
285 	1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
286 	67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
287 	2147483648UL, 4294967295UL
288 };
289 
290 /* binsizes[x] == (1 << ((x) + 3)) */
291 #define binsize(x)	binsizes[(x)]
292 
293 #if !defined (errno)
294 extern int errno;
295 #endif
296 
297 /* Declarations for internal functions */
298 static PTR_T internal_malloc PARAMS((size_t, const char *, int, int));
299 static PTR_T internal_realloc PARAMS((PTR_T, size_t, const char *, int, int));
300 static void internal_free PARAMS((PTR_T, const char *, int, int));
301 static PTR_T internal_memalign PARAMS((size_t, size_t, const char *, int, int));
302 #ifndef NO_CALLOC
303 static PTR_T internal_calloc PARAMS((size_t, size_t, const char *, int, int));
304 static void internal_cfree PARAMS((PTR_T, const char *, int, int));
305 #endif
306 #ifndef NO_VALLOC
307 static PTR_T internal_valloc PARAMS((size_t, const char *, int, int));
308 #endif
309 
310 #if defined (botch)
311 extern void botch ();
312 #else
313 static void botch PARAMS((const char *, const char *, int));
314 #endif
315 static void xbotch PARAMS((PTR_T, int, const char *, const char *, int));
316 
317 #if !HAVE_DECL_SBRK
318 extern char *sbrk ();
319 #endif /* !HAVE_DECL_SBRK */
320 
321 #ifdef SHELL
322 extern int running_trap;
323 extern int signal_is_trapped PARAMS((int));
324 #endif
325 
326 #ifdef MALLOC_STATS
327 struct _malstats _mstats;
328 #endif /* MALLOC_STATS */
329 
330 /* Debugging variables available to applications. */
331 int malloc_flags = 0;	/* future use */
332 int malloc_trace = 0;	/* trace allocations and frees to stderr */
333 int malloc_register = 0;	/* future use */
334 
335 /* Use a variable in case we want to dynamically adapt it in the future */
336 int malloc_mmap_threshold = MMAP_THRESHOLD;
337 
338 #ifdef MALLOC_TRACE
339 char _malloc_trace_buckets[NBUCKETS];
340 
341 /* These should really go into a header file. */
342 extern void mtrace_alloc PARAMS((const char *, PTR_T, size_t, const char *, int));
343 extern void mtrace_free PARAMS((PTR_T, int, const char *, int));
344 #endif
345 
346 #if !defined (botch)
347 static void
botch(s,file,line)348 botch (s, file, line)
349      const char *s;
350      const char *file;
351      int line;
352 {
353   fprintf (stderr, _("malloc: failed assertion: %s\n"), s);
354   (void)fflush (stderr);
355   abort ();
356 }
357 #endif
358 
359 /* print the file and line number that caused the assertion failure and
360    call botch() to do whatever the application wants with the information */
361 static void
xbotch(mem,e,s,file,line)362 xbotch (mem, e, s, file, line)
363      PTR_T mem;
364      int e;
365      const char *s;
366      const char *file;
367      int line;
368 {
369   fprintf (stderr, _("\r\nmalloc: %s:%d: assertion botched\r\n"),
370 			file ? file : _("unknown"), line);
371 #ifdef MALLOC_REGISTER
372   if (mem != NULL && malloc_register)
373     mregister_describe_mem (mem, stderr);
374 #endif
375   (void)fflush (stderr);
376   botch(s, file, line);
377 }
378 
379 /* Coalesce two adjacent free blocks off the free list for size NU - 1,
380    as long as we can find two adjacent free blocks.  nextf[NU -1] is
381    assumed to not be busy; the caller (morecore()) checks for this.
382    BUSY[NU] must be set to 1. */
383 static void
bcoalesce(nu)384 bcoalesce (nu)
385      register int nu;
386 {
387   register union mhead *mp, *mp1, *mp2;
388   register int nbuck;
389   unsigned long siz;
390 
391   nbuck = nu - 1;
392   if (nextf[nbuck] == 0 || busy[nbuck])
393     return;
394 
395   busy[nbuck] = 1;
396   siz = binsize (nbuck);
397 
398   mp2 = mp1 = nextf[nbuck];
399   mp = CHAIN (mp1);
400   while (mp && mp != (union mhead *)((char *)mp1 + siz))
401     {
402       mp2 = mp1;
403       mp1 = mp;
404       mp = CHAIN (mp);
405     }
406 
407   if (mp == 0)
408     {
409       busy[nbuck] = 0;
410       return;
411     }
412 
413   /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
414      CHAIN(mp2) must equal mp1.  Check that mp1 and mp are adjacent. */
415   if (mp2 != mp1 && CHAIN(mp2) != mp1)
416     {
417       busy[nbuck] = 0;
418       xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0);
419     }
420 
421 #ifdef MALLOC_DEBUG
422   if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz))
423     {
424       busy[nbuck] = 0;
425       return;	/* not adjacent */
426     }
427 #endif
428 
429   /* Since they are adjacent, remove them from the free list */
430   if (mp1 == nextf[nbuck])
431     nextf[nbuck] = CHAIN (mp);
432   else
433     CHAIN (mp2) = CHAIN (mp);
434   busy[nbuck] = 0;
435 
436 #ifdef MALLOC_STATS
437   _mstats.tbcoalesce++;
438   _mstats.ncoalesce[nbuck]++;
439 #endif
440 
441   /* And add the combined two blocks to nextf[NU]. */
442   mp1->mh_alloc = ISFREE;
443   mp1->mh_index = nu;
444   CHAIN (mp1) = nextf[nu];
445   nextf[nu] = mp1;
446 }
447 
448 /* Split a block at index > NU (but less than SPLIT_MAX) into a set of
449    blocks of the correct size, and attach them to nextf[NU].  nextf[NU]
450    is assumed to be empty.  Must be called with signals blocked (e.g.,
451    by morecore()).  BUSY[NU] must be set to 1. */
452 static void
bsplit(nu)453 bsplit (nu)
454      register int nu;
455 {
456   register union mhead *mp;
457   int nbuck, nblks, split_max;
458   unsigned long siz;
459 
460   split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX;
461 
462   if (nu >= SPLIT_MID)
463     {
464       for (nbuck = split_max; nbuck > nu; nbuck--)
465 	{
466 	  if (busy[nbuck] || nextf[nbuck] == 0)
467 	    continue;
468 	  break;
469 	}
470     }
471   else
472     {
473       for (nbuck = nu + 1; nbuck <= split_max; nbuck++)
474 	{
475 	  if (busy[nbuck] || nextf[nbuck] == 0)
476 	    continue;
477 	  break;
478 	}
479     }
480 
481   if (nbuck > split_max || nbuck <= nu)
482     return;
483 
484   /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
485      and nbuck is below some threshold. */
486 
487   /* Remove the block from the chain of larger blocks. */
488   busy[nbuck] = 1;
489   mp = nextf[nbuck];
490   nextf[nbuck] = CHAIN (mp);
491   busy[nbuck] = 0;
492 
493 #ifdef MALLOC_STATS
494   _mstats.tbsplit++;
495   _mstats.nsplit[nbuck]++;
496 #endif
497 
498   /* Figure out how many blocks we'll get. */
499   siz = binsize (nu);
500   nblks = binsize (nbuck) / siz;
501 
502   /* Split the block and put it on the requested chain. */
503   nextf[nu] = mp;
504   while (1)
505     {
506       mp->mh_alloc = ISFREE;
507       mp->mh_index = nu;
508       if (--nblks <= 0) break;
509       CHAIN (mp) = (union mhead *)((char *)mp + siz);
510       mp = (union mhead *)((char *)mp + siz);
511     }
512   CHAIN (mp) = 0;
513 }
514 
515 /* Take the memory block MP and add it to a chain < NU.  NU is the right bucket,
516    but is busy.  This avoids memory orphaning. */
517 static void
xsplit(mp,nu)518 xsplit (mp, nu)
519      union mhead *mp;
520      int nu;
521 {
522   union mhead *nh;
523   int nbuck, nblks, split_max;
524   unsigned long siz;
525 
526   nbuck = nu - 1;
527   while (nbuck >= SPLIT_MIN && busy[nbuck])
528     nbuck--;
529   if (nbuck < SPLIT_MIN)
530     return;
531 
532 #ifdef MALLOC_STATS
533   _mstats.tbsplit++;
534   _mstats.nsplit[nu]++;
535 #endif
536 
537   /* Figure out how many blocks we'll get. */
538   siz = binsize (nu);			/* original block size */
539   nblks = siz / binsize (nbuck);	/* should be 2 most of the time */
540 
541   /* And add it to nextf[nbuck] */
542   siz = binsize (nbuck);		/* XXX - resetting here */
543   nh = mp;
544   while (1)
545     {
546       mp->mh_alloc = ISFREE;
547       mp->mh_index = nbuck;
548       if (--nblks <= 0) break;
549       CHAIN (mp) = (union mhead *)((char *)mp + siz);
550       mp = (union mhead *)((char *)mp + siz);
551     }
552   busy[nbuck] = 1;
553   CHAIN (mp) = nextf[nbuck];
554   nextf[nbuck] = nh;
555   busy[nbuck] = 0;
556 }
557 
558 void
_malloc_block_signals(setp,osetp)559 _malloc_block_signals (setp, osetp)
560      sigset_t *setp, *osetp;
561 {
562 #ifdef HAVE_POSIX_SIGNALS
563   sigfillset (setp);
564   sigemptyset (osetp);
565   sigprocmask (SIG_BLOCK, setp, osetp);
566 #else
567 #  if defined (HAVE_BSD_SIGNALS)
568   *osetp = sigsetmask (-1);
569 #  endif
570 #endif
571 }
572 
573 void
_malloc_unblock_signals(setp,osetp)574 _malloc_unblock_signals (setp, osetp)
575      sigset_t *setp, *osetp;
576 {
577 #ifdef HAVE_POSIX_SIGNALS
578   sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);
579 #else
580 #  if defined (HAVE_BSD_SIGNALS)
581   sigsetmask (*osetp);
582 #  endif
583 #endif
584 }
585 
586 /* Return some memory to the system by reducing the break.  This is only
587    called with NU > pagebucket, so we're always assured of giving back
588    more than one page of memory. */
589 static void
lesscore(nu)590 lesscore (nu)			/* give system back some memory */
591      register int nu;		/* size index we're discarding  */
592 {
593   long siz;
594 
595   siz = binsize (nu);
596   /* Should check for errors here, I guess. */
597   sbrk (-siz);
598   memtop -= siz;
599 
600 #ifdef MALLOC_STATS
601   _mstats.nsbrk++;
602   _mstats.tsbrk -= siz;
603   _mstats.nlesscore[nu]++;
604 #endif
605 }
606 
607 /* Ask system for more memory; add to NEXTF[NU].  BUSY[NU] must be set to 1. */
608 static void
morecore(nu)609 morecore (nu)
610      register int nu;		/* size index to get more of  */
611 {
612   register union mhead *mp;
613   register int nblks;
614   register long siz;
615   long sbrk_amt;		/* amount to get via sbrk() */
616   sigset_t set, oset;
617   int blocked_sigs;
618 
619   /* Block all signals in case we are executed from a signal handler. */
620   blocked_sigs = 0;
621 #ifdef SHELL
622 #  if defined (SIGCHLD)
623   if (running_trap || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
624 #  else
625   if (running_trap || signal_is_trapped (SIGINT))
626 #  endif
627 #endif
628     {
629       _malloc_block_signals (&set, &oset);
630       blocked_sigs = 1;
631     }
632 
633   siz = binsize (nu);	/* size of desired block for nextf[nu] */
634 
635   if (siz < 0)
636     goto morecore_done;		/* oops */
637 
638 #ifdef MALLOC_STATS
639   _mstats.nmorecore[nu]++;
640 #endif
641 
642   /* Try to split a larger block here, if we're within the range of sizes
643      to split. */
644   if (nu >= SPLIT_MIN && nu <= malloc_mmap_threshold)
645     {
646       bsplit (nu);
647       if (nextf[nu] != 0)
648 	goto morecore_done;
649     }
650 
651   /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
652      if we can, and we're within the range of the block coalescing limits. */
653   if (nu >= COMBINE_MIN && nu < COMBINE_MAX && nu <= malloc_mmap_threshold && busy[nu - 1] == 0 && nextf[nu - 1])
654     {
655       bcoalesce (nu);
656       if (nextf[nu] != 0)
657 	goto morecore_done;
658     }
659 
660   /* Take at least a page, and figure out how many blocks of the requested
661      size we're getting. */
662   if (siz <= pagesz)
663     {
664       sbrk_amt = pagesz;
665       nblks = sbrk_amt / siz;
666     }
667   else
668     {
669       /* We always want to request an integral multiple of the page size
670 	 from the kernel, so let's compute whether or not `siz' is such
671 	 an amount.  If it is, we can just request it.  If not, we want
672 	 the smallest integral multiple of pagesize that is larger than
673 	 `siz' and will satisfy the request. */
674       sbrk_amt = siz & (pagesz - 1);
675       if (sbrk_amt == 0)
676 	sbrk_amt = siz;
677       else
678 	sbrk_amt = siz + pagesz - sbrk_amt;
679       nblks = 1;
680     }
681 
682 #if defined (USE_MMAP)
683   if (nu > malloc_mmap_threshold)
684     {
685       mp = (union mhead *)mmap (0, sbrk_amt, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
686       if ((void *)mp == MAP_FAILED)
687 	goto morecore_done;
688       nextf[nu] = mp;
689       mp->mh_alloc = ISFREE;
690       mp->mh_index = nu;
691       CHAIN (mp) = 0;
692 #ifdef MALLOC_STATS
693       _mstats.nmmap++;
694       _mstats.tmmap += sbrk_amt;
695 #endif
696       goto morecore_done;
697     }
698 #endif
699 
700 
701 #ifdef MALLOC_STATS
702   _mstats.nsbrk++;
703   _mstats.tsbrk += sbrk_amt;
704 #endif
705 
706   mp = (union mhead *) sbrk (sbrk_amt);
707 
708   /* Totally out of memory. */
709   if ((long)mp == -1)
710     goto morecore_done;
711 
712   memtop += sbrk_amt;
713 
714   /* shouldn't happen, but just in case -- require 8- or 16-byte alignment */
715   if ((long)mp & MALIGN_MASK)
716     {
717       mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK);
718       nblks--;
719     }
720 
721   /* save new header and link the nblks blocks together */
722   nextf[nu] = mp;
723   while (1)
724     {
725       mp->mh_alloc = ISFREE;
726       mp->mh_index = nu;
727       if (--nblks <= 0) break;
728       CHAIN (mp) = (union mhead *)((char *)mp + siz);
729       mp = (union mhead *)((char *)mp + siz);
730     }
731   CHAIN (mp) = 0;
732 
733 morecore_done:
734   if (blocked_sigs)
735     _malloc_unblock_signals (&set, &oset);
736 }
737 
738 static void
malloc_debug_dummy()739 malloc_debug_dummy ()
740 {
741   write (1, "malloc_debug_dummy\n", 19);
742 }
743 
744 #if SIZEOF_CHAR_P == 8
745 #define PREPOP_BIN	3
746 #define PREPOP_SIZE	64
747 #else
748 #define PREPOP_BIN	2
749 #define PREPOP_SIZE	32
750 #endif
751 
752 static int
pagealign()753 pagealign ()
754 {
755   register int nunits;
756   register union mhead *mp;
757   long sbrk_needed;
758   char *curbrk;
759 
760   pagesz = getpagesize ();
761   if (pagesz < 1024)
762     pagesz = 1024;
763 
764   /* OK, how much do we need to allocate to make things page-aligned?
765      Some of this partial page will be wasted space, but we'll use as
766      much as we can.  Once we figure out how much to advance the break
767      pointer, go ahead and do it. */
768   memtop = curbrk = sbrk (0);
769   sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1));	/* sbrk(0) % pagesz */
770   if (sbrk_needed < 0)
771     sbrk_needed += pagesz;
772 
773   /* Now allocate the wasted space. */
774   if (sbrk_needed)
775     {
776 #ifdef MALLOC_STATS
777       _mstats.nsbrk++;
778       _mstats.tsbrk += sbrk_needed;
779 #endif
780       curbrk = sbrk (sbrk_needed);
781       if ((long)curbrk == -1)
782 	return -1;
783       memtop += sbrk_needed;
784 
785       /* Take the memory which would otherwise be wasted and populate the most
786 	 popular bin (3 == 64 bytes) with it.  Add whatever we need to curbrk
787 	 to make things 64-byte aligned, compute how many 64-byte chunks we're
788 	 going to get, and set up the bin. */
789       curbrk += sbrk_needed & (PREPOP_SIZE - 1);
790       sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1);
791       nunits = sbrk_needed / PREPOP_SIZE;
792 
793       if (nunits > 0)
794 	{
795 	  mp = (union mhead *)curbrk;
796 
797 	  nextf[PREPOP_BIN] = mp;
798 	  while (1)
799 	    {
800 	      mp->mh_alloc = ISFREE;
801 	      mp->mh_index = PREPOP_BIN;
802 	      if (--nunits <= 0) break;
803 	      CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE);
804 	      mp = (union mhead *)((char *)mp + PREPOP_SIZE);
805 	    }
806 	  CHAIN(mp) = 0;
807 	}
808     }
809 
810   /* compute which bin corresponds to the page size. */
811   for (nunits = 7; nunits < NBUCKETS; nunits++)
812     if (pagesz <= binsize(nunits))
813       break;
814   pagebucket = nunits;
815 
816   return 0;
817 }
818 
819 static PTR_T
internal_malloc(n,file,line,flags)820 internal_malloc (n, file, line, flags)		/* get a block */
821      size_t n;
822      const char *file;
823      int line, flags;
824 {
825   register union mhead *p;
826   register int nunits;
827   register char *m, *z;
828   long nbytes;
829   mguard_t mg;
830 
831   /* Get the system page size and align break pointer so future sbrks will
832      be page-aligned.  The page size must be at least 1K -- anything
833      smaller is increased. */
834   if (pagesz == 0)
835     if (pagealign () < 0)
836       return ((PTR_T)NULL);
837 
838   /* Figure out how many bytes are required, rounding up to the nearest
839      multiple of 8, then figure out which nextf[] area to use.  Try to
840      be smart about where to start searching -- if the number of bytes
841      needed is greater than the page size, we can start at pagebucket. */
842   nbytes = ALLOCATED_BYTES(n);
843   nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket;
844   for ( ; nunits < NBUCKETS; nunits++)
845     if (nbytes <= binsize(nunits))
846       break;
847 
848   /* Silently reject too-large requests. XXX - can increase this if HAVE_MMAP */
849   if (nunits >= NBUCKETS)
850     return ((PTR_T) NULL);
851 
852   /* In case this is reentrant use of malloc from signal handler,
853      pick a block size that no other malloc level is currently
854      trying to allocate.  That's the easiest harmless way not to
855      interfere with the other level of execution.  */
856 #ifdef MALLOC_STATS
857   if (busy[nunits]) _mstats.nrecurse++;
858 #endif
859   while (busy[nunits]) nunits++;
860   busy[nunits] = 1;
861 
862   if (nunits > maxbuck)
863     maxbuck = nunits;
864 
865   /* If there are no blocks of the appropriate size, go get some */
866   if (nextf[nunits] == 0)
867     morecore (nunits);
868 
869   /* Get one block off the list, and set the new list head */
870   if ((p = nextf[nunits]) == NULL)
871     {
872       busy[nunits] = 0;
873       return NULL;
874     }
875   nextf[nunits] = CHAIN (p);
876   busy[nunits] = 0;
877 
878   /* Check for free block clobbered */
879   /* If not for this check, we would gobble a clobbered free chain ptr
880      and bomb out on the NEXT allocate of this size block */
881   if (p->mh_alloc != ISFREE || p->mh_index != nunits)
882     xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line);
883 
884   /* Fill in the info, and set up the magic numbers for range checking. */
885   p->mh_alloc = ISALLOC;
886   p->mh_magic2 = MAGIC2;
887   p->mh_nbytes = n;
888 
889 #if SIZEOF_CHAR_P == 8
890   /* Begin guard */
891   MALLOC_MEMSET ((char *)p->mh_magic8, MAGIC1, 8);
892 #endif
893 
894   /* End guard */
895   mg.i = n;
896   z = mg.s;
897   m = (char *) (p + 1) + n;
898   *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
899 
900 #ifdef MEMSCRAMBLE
901   if (n)
902     MALLOC_MEMSET ((char *)(p + 1), 0xdf, n);	/* scramble previous contents */
903 #endif
904 #ifdef MALLOC_STATS
905   _mstats.nmalloc[nunits]++;
906   _mstats.tmalloc[nunits]++;
907   _mstats.nmal++;
908   _mstats.bytesreq += n;
909 #endif /* MALLOC_STATS */
910 
911 #ifdef MALLOC_TRACE
912   if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
913     mtrace_alloc ("malloc", p + 1, n, file, line);
914   else if (_malloc_trace_buckets[nunits])
915     mtrace_alloc ("malloc", p + 1, n, file, line);
916 #endif
917 
918 #ifdef MALLOC_REGISTER
919   if (malloc_register && (flags & MALLOC_NOREG) == 0)
920     mregister_alloc ("malloc", p + 1, n, file, line);
921 #endif
922 
923 #ifdef MALLOC_WATCH
924   if (_malloc_nwatch > 0)
925     _malloc_ckwatch (p + 1, file, line, W_ALLOC, n);
926 #endif
927 
928 #if defined (MALLOC_DEBUG)
929   z = (char *) (p + 1);
930   /* Check alignment of returned pointer */
931   if ((unsigned long)z & MALIGN_MASK)
932     fprintf (stderr, "malloc: %s:%d: warning: request for %d bytes not aligned on %d byte boundary\r\n",
933 	file ? file : _("unknown"), line, p->mh_nbytes, MALIGN_MASK+1);
934 #endif
935 
936   return (PTR_T) (p + 1);
937 }
938 
939 static void
internal_free(mem,file,line,flags)940 internal_free (mem, file, line, flags)
941      PTR_T mem;
942      const char *file;
943      int line, flags;
944 {
945   register union mhead *p;
946   register char *ap, *z;
947   register int nunits;
948   register unsigned int nbytes;
949   int ubytes;		/* caller-requested size */
950   mguard_t mg;
951 
952   if ((ap = (char *)mem) == 0)
953     return;
954 
955   p = (union mhead *) ap - 1;
956 
957   if (p->mh_alloc == ISMEMALIGN)
958     {
959       ap -= p->mh_nbytes;
960       p = (union mhead *) ap - 1;
961     }
962 
963 #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER) || defined (MALLOC_WATCH)
964   if (malloc_trace || malloc_register || _malloc_nwatch > 0)
965     ubytes = p->mh_nbytes;
966 #endif
967 
968   if (p->mh_alloc != ISALLOC)
969     {
970       if (p->mh_alloc == ISFREE)
971 	xbotch (mem, ERR_DUPFREE,
972 		_("free: called with already freed block argument"), file, line);
973       else
974 	xbotch (mem, ERR_UNALLOC,
975 		_("free: called with unallocated block argument"), file, line);
976     }
977 
978   ASSERT (p->mh_magic2 == MAGIC2);
979 
980   nunits = p->mh_index;
981   nbytes = ALLOCATED_BYTES(p->mh_nbytes);
982   /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
983      are now used for the number of bytes allocated, a simple check of
984      mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
985      We sanity-check the value of mh_nbytes against the size of the blocks
986      in the appropriate bucket before we use it.  This can still cause problems
987      and obscure errors if mh_nbytes is wrong but still within range; the
988      checks against the size recorded at the end of the chunk will probably
989      fail then.  Using MALLOC_REGISTER will help here, since it saves the
990      original number of bytes requested. */
991 
992   if (IN_BUCKET(nbytes, nunits) == 0)
993     xbotch (mem, ERR_UNDERFLOW,
994 	    _("free: underflow detected; mh_nbytes out of range"), file, line);
995 #if SIZEOF_CHAR_P == 8
996   {
997     int i;
998     for (i = 0, z = p->mh_magic8; i < 8; i++)
999       if (*z++ != MAGIC1)
1000 	xbotch (mem, ERR_UNDERFLOW,
1001 		_("free: underflow detected; magic8 corrupted"), file, line);
1002   }
1003 #endif
1004 
1005   ap += p->mh_nbytes;
1006   z = mg.s;
1007   *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++;
1008   if (mg.i != p->mh_nbytes)
1009     xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line);
1010 
1011 #if defined (USE_MMAP)
1012   if (nunits > malloc_mmap_threshold)
1013     {
1014       munmap (p, binsize (nunits));
1015 #if defined (MALLOC_STATS)
1016       _mstats.nlesscore[nunits]++;
1017 #endif
1018       goto free_return;
1019     }
1020 #endif
1021 
1022 #if GLIBC21
1023   if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == sbrk (0)))
1024 #else
1025   if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop))
1026 #endif
1027     {
1028       /* If above LESSCORE_FRC, give back unconditionally.  This should be set
1029 	 high enough to be infrequently encountered.  If between LESSCORE_MIN
1030 	 and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if
1031 	 there's already a block on the free list. */
1032       if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0)
1033 	{
1034 	  lesscore (nunits);
1035 	  /* keeps the tracing and registering code in one place */
1036 	  goto free_return;
1037 	}
1038     }
1039 
1040 #ifdef MEMSCRAMBLE
1041   if (p->mh_nbytes)
1042     MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes);
1043 #endif
1044 
1045   ASSERT (nunits < NBUCKETS);
1046 
1047   if (busy[nunits] == 1)
1048     {
1049       xsplit (p, nunits);	/* split block and add to different chain */
1050       goto free_return;
1051     }
1052 
1053   p->mh_alloc = ISFREE;
1054   /* Protect against signal handlers calling malloc.  */
1055   busy[nunits] = 1;
1056   /* Put this block on the free list.  */
1057   CHAIN (p) = nextf[nunits];
1058   nextf[nunits] = p;
1059   busy[nunits] = 0;
1060 
1061 free_return:
1062   ;		/* Empty statement in case this is the end of the function */
1063 
1064 #ifdef MALLOC_STATS
1065   _mstats.nmalloc[nunits]--;
1066   _mstats.nfre++;
1067 #endif /* MALLOC_STATS */
1068 
1069 #ifdef MALLOC_TRACE
1070   if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
1071     mtrace_free (mem, ubytes, file, line);
1072   else if (_malloc_trace_buckets[nunits])
1073     mtrace_free (mem, ubytes, file, line);
1074 #endif
1075 
1076 #ifdef MALLOC_REGISTER
1077   if (malloc_register && (flags & MALLOC_NOREG) == 0)
1078     mregister_free (mem, ubytes, file, line);
1079 #endif
1080 
1081 #ifdef MALLOC_WATCH
1082   if (_malloc_nwatch > 0)
1083     _malloc_ckwatch (mem, file, line, W_FREE, ubytes);
1084 #endif
1085 }
1086 
1087 static PTR_T
internal_realloc(mem,n,file,line,flags)1088 internal_realloc (mem, n, file, line, flags)
1089      PTR_T mem;
1090      register size_t n;
1091      const char *file;
1092      int line, flags;
1093 {
1094   register union mhead *p;
1095   register u_bits32_t tocopy;
1096   register unsigned int nbytes;
1097   register int nunits;
1098   register char *m, *z;
1099   mguard_t mg;
1100 
1101 #ifdef MALLOC_STATS
1102   _mstats.nrealloc++;
1103 #endif
1104 
1105   if (n == 0)
1106     {
1107       internal_free (mem, file, line, MALLOC_INTERNAL);
1108       return (NULL);
1109     }
1110   if ((p = (union mhead *) mem) == 0)
1111     return internal_malloc (n, file, line, MALLOC_INTERNAL);
1112 
1113   p--;
1114   nunits = p->mh_index;
1115   ASSERT (nunits < NBUCKETS);
1116 
1117   if (p->mh_alloc != ISALLOC)
1118     xbotch (mem, ERR_UNALLOC,
1119 	    _("realloc: called with unallocated block argument"), file, line);
1120 
1121   ASSERT (p->mh_magic2 == MAGIC2);
1122   nbytes = ALLOCATED_BYTES(p->mh_nbytes);
1123   /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
1124      are now used for the number of bytes allocated, a simple check of
1125      mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
1126      We sanity-check the value of mh_nbytes against the size of the blocks
1127      in the appropriate bucket before we use it.  This can still cause problems
1128      and obscure errors if mh_nbytes is wrong but still within range; the
1129      checks against the size recorded at the end of the chunk will probably
1130      fail then.  Using MALLOC_REGISTER will help here, since it saves the
1131      original number of bytes requested. */
1132   if (IN_BUCKET(nbytes, nunits) == 0)
1133     xbotch (mem, ERR_UNDERFLOW,
1134 	    _("realloc: underflow detected; mh_nbytes out of range"), file, line);
1135 #if SIZEOF_CHAR_P == 8
1136   {
1137     int i;
1138     for (i = 0, z = p->mh_magic8; i < 8; i++)
1139       if (*z++ != MAGIC1)
1140 	xbotch (mem, ERR_UNDERFLOW,
1141 		_("realloc: underflow detected; magic8 corrupted"), file, line);
1142 
1143   }
1144 #endif
1145 
1146   m = (char *)mem + (tocopy = p->mh_nbytes);
1147   z = mg.s;
1148   *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++;
1149   if (mg.i != p->mh_nbytes)
1150     xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line);
1151 
1152 #ifdef MALLOC_WATCH
1153   if (_malloc_nwatch > 0)
1154     _malloc_ckwatch (p + 1, file, line, W_REALLOC, n);
1155 #endif
1156 #ifdef MALLOC_STATS
1157   _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy;
1158 #endif
1159 
1160   /* If we're reallocating to the same size as previously, return now */
1161   if (n == p->mh_nbytes)
1162     return mem;
1163 
1164   /* See if desired size rounds to same power of 2 as actual size. */
1165   nbytes = ALLOCATED_BYTES(n);
1166 
1167   /* If ok, use the same block, just marking its size as changed.  */
1168   if (RIGHT_BUCKET(nbytes, nunits) || RIGHT_BUCKET(nbytes, nunits-1))
1169     {
1170       /* Compensate for increment above. */
1171       m -= 4;
1172 
1173       *m++ = 0;  *m++ = 0;  *m++ = 0;  *m++ = 0;
1174       m = (char *)mem + (p->mh_nbytes = n);
1175 
1176       mg.i = n;
1177       z = mg.s;
1178       *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
1179 
1180       return mem;
1181     }
1182 
1183   if (n < tocopy)
1184     tocopy = n;
1185 
1186 #ifdef MALLOC_STATS
1187   _mstats.nrcopy++;
1188 #endif
1189 
1190   /* If we are using mmap and have mremap, we could use it here. */
1191 
1192   if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0)
1193     return 0;
1194   FASTCOPY (mem, m, tocopy);
1195   internal_free (mem, file, line, MALLOC_INTERNAL);
1196 
1197 #ifdef MALLOC_TRACE
1198   if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
1199     mtrace_alloc ("realloc", m, n, file, line);
1200   else if (_malloc_trace_buckets[nunits])
1201     mtrace_alloc ("realloc", m, n, file, line);
1202 #endif
1203 
1204 #ifdef MALLOC_REGISTER
1205   if (malloc_register && (flags & MALLOC_NOREG) == 0)
1206     mregister_alloc ("realloc", m, n, file, line);
1207 #endif
1208 
1209 #ifdef MALLOC_WATCH
1210   if (_malloc_nwatch > 0)
1211     _malloc_ckwatch (m, file, line, W_RESIZED, n);
1212 #endif
1213 
1214   return m;
1215 }
1216 
1217 static PTR_T
internal_memalign(alignment,size,file,line,flags)1218 internal_memalign (alignment, size, file, line, flags)
1219      size_t alignment;
1220      size_t size;
1221      const char *file;
1222      int line, flags;
1223 {
1224   register char *ptr;
1225   register char *aligned;
1226   register union mhead *p;
1227 
1228   ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL);
1229 
1230   if (ptr == 0)
1231     return 0;
1232   /* If entire block has the desired alignment, just accept it.  */
1233   if (((long) ptr & (alignment - 1)) == 0)
1234     return ptr;
1235   /* Otherwise, get address of byte in the block that has that alignment.  */
1236   aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1));
1237 
1238   /* Store a suitable indication of how to free the block,
1239      so that free can find the true beginning of it.  */
1240   p = (union mhead *) aligned - 1;
1241   p->mh_nbytes = aligned - ptr;
1242   p->mh_alloc = ISMEMALIGN;
1243 
1244   return aligned;
1245 }
1246 
1247 int
posix_memalign(memptr,alignment,size)1248 posix_memalign (memptr, alignment, size)
1249      void **memptr;
1250      size_t alignment, size;
1251 {
1252   void *mem;
1253 
1254   /* Perform posix-mandated error checking here */
1255   if ((alignment % sizeof (void *) != 0) || alignment == 0)
1256     return EINVAL;
1257   else if (powerof2 (alignment) == 0)
1258     return EINVAL;
1259 
1260   mem = internal_memalign (alignment, size, (char *)0, 0, 0);
1261   if (mem != 0)
1262     {
1263       *memptr = mem;
1264       return 0;
1265     }
1266   return ENOMEM;
1267 }
1268 
1269 size_t
malloc_usable_size(mem)1270 malloc_usable_size (mem)
1271      void *mem;
1272 {
1273   register union mhead *p;
1274   register char *ap;
1275   register int maxbytes;
1276 
1277 
1278   if ((ap = (char *)mem) == 0)
1279     return 0;
1280 
1281   /* Find the true start of the memory block to discover which bin */
1282   p = (union mhead *) ap - 1;
1283   if (p->mh_alloc == ISMEMALIGN)
1284     {
1285       ap -= p->mh_nbytes;
1286       p = (union mhead *) ap - 1;
1287     }
1288 
1289   /* return 0 if ISFREE */
1290   if (p->mh_alloc == ISFREE)
1291     return 0;
1292 
1293   /* Since we use bounds checking, the usable size is the last requested size. */
1294   return (p->mh_nbytes);
1295 }
1296 
1297 #if !defined (NO_VALLOC)
1298 /* This runs into trouble with getpagesize on HPUX, and Multimax machines.
1299    Patching out seems cleaner than the ugly fix needed.  */
1300 static PTR_T
internal_valloc(size,file,line,flags)1301 internal_valloc (size, file, line, flags)
1302      size_t size;
1303      const char *file;
1304      int line, flags;
1305 {
1306   return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL);
1307 }
1308 #endif /* !NO_VALLOC */
1309 
1310 #ifndef NO_CALLOC
1311 static PTR_T
internal_calloc(n,s,file,line,flags)1312 internal_calloc (n, s, file, line, flags)
1313      size_t n, s;
1314      const char *file;
1315      int line, flags;
1316 {
1317   size_t total;
1318   PTR_T result;
1319 
1320   total = n * s;
1321   result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL);
1322   if (result)
1323     memset (result, 0, total);
1324   return result;
1325 }
1326 
1327 static void
internal_cfree(p,file,line,flags)1328 internal_cfree (p, file, line, flags)
1329      PTR_T p;
1330      const char *file;
1331      int line, flags;
1332 {
1333   internal_free (p, file, line, flags|MALLOC_INTERNAL);
1334 }
1335 #endif /* !NO_CALLOC */
1336 
1337 #ifdef MALLOC_STATS
1338 int
malloc_free_blocks(size)1339 malloc_free_blocks (size)
1340      int size;
1341 {
1342   int nfree;
1343   register union mhead *p;
1344 
1345   nfree = 0;
1346   for (p = nextf[size]; p; p = CHAIN (p))
1347     nfree++;
1348 
1349   return nfree;
1350 }
1351 #endif
1352 
1353 #if defined (MALLOC_WRAPFUNCS)
1354 PTR_T
sh_malloc(bytes,file,line)1355 sh_malloc (bytes, file, line)
1356      size_t bytes;
1357      const char *file;
1358      int line;
1359 {
1360   return internal_malloc (bytes, file, line, MALLOC_WRAPPER);
1361 }
1362 
1363 PTR_T
sh_realloc(ptr,size,file,line)1364 sh_realloc (ptr, size, file, line)
1365      PTR_T ptr;
1366      size_t size;
1367      const char *file;
1368      int line;
1369 {
1370   return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER);
1371 }
1372 
1373 void
sh_free(mem,file,line)1374 sh_free (mem, file, line)
1375      PTR_T mem;
1376      const char *file;
1377      int line;
1378 {
1379   internal_free (mem, file, line, MALLOC_WRAPPER);
1380 }
1381 
1382 PTR_T
sh_memalign(alignment,size,file,line)1383 sh_memalign (alignment, size, file, line)
1384      size_t alignment;
1385      size_t size;
1386      const char *file;
1387      int line;
1388 {
1389   return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER);
1390 }
1391 
1392 #ifndef NO_CALLOC
1393 PTR_T
sh_calloc(n,s,file,line)1394 sh_calloc (n, s, file, line)
1395      size_t n, s;
1396      const char *file;
1397      int line;
1398 {
1399   return internal_calloc (n, s, file, line, MALLOC_WRAPPER);
1400 }
1401 
1402 void
sh_cfree(mem,file,line)1403 sh_cfree (mem, file, line)
1404      PTR_T mem;
1405      const char *file;
1406      int line;
1407 {
1408   internal_cfree (mem, file, line, MALLOC_WRAPPER);
1409 }
1410 #endif
1411 
1412 #ifndef NO_VALLOC
1413 PTR_T
sh_valloc(size,file,line)1414 sh_valloc (size, file, line)
1415      size_t size;
1416      const char *file;
1417      int line;
1418 {
1419   return internal_valloc (size, file, line, MALLOC_WRAPPER);
1420 }
1421 #endif /* !NO_VALLOC */
1422 
1423 #endif /* MALLOC_WRAPFUNCS */
1424 
1425 /* Externally-available functions that call their internal counterparts. */
1426 
1427 PTR_T
malloc(size)1428 malloc (size)
1429      size_t size;
1430 {
1431   return internal_malloc (size, (char *)NULL, 0, 0);
1432 }
1433 
1434 PTR_T
realloc(mem,nbytes)1435 realloc (mem, nbytes)
1436      PTR_T mem;
1437      size_t nbytes;
1438 {
1439   return internal_realloc (mem, nbytes, (char *)NULL, 0, 0);
1440 }
1441 
1442 void
free(mem)1443 free (mem)
1444      PTR_T mem;
1445 {
1446   internal_free (mem,  (char *)NULL, 0, 0);
1447 }
1448 
1449 PTR_T
memalign(alignment,size)1450 memalign (alignment, size)
1451      size_t alignment;
1452      size_t size;
1453 {
1454   return internal_memalign (alignment, size, (char *)NULL, 0, 0);
1455 }
1456 
1457 #ifndef NO_VALLOC
1458 PTR_T
valloc(size)1459 valloc (size)
1460      size_t size;
1461 {
1462   return internal_valloc (size, (char *)NULL, 0, 0);
1463 }
1464 #endif
1465 
1466 #ifndef NO_CALLOC
1467 PTR_T
calloc(n,s)1468 calloc (n, s)
1469      size_t n, s;
1470 {
1471   return internal_calloc (n, s, (char *)NULL, 0, 0);
1472 }
1473 
1474 void
cfree(mem)1475 cfree (mem)
1476      PTR_T mem;
1477 {
1478   internal_cfree (mem, (char *)NULL, 0, 0);
1479 }
1480 #endif
1481