1 /* dynamic memory allocation for GNU.
2    Copyright (C) 1985, 1987 Free Software Foundation, Inc.
3 
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 1, or (at your option)
7     any later version.
8 
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13 
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 
18 In other words, you are welcome to use, share and improve this program.
19 You are forbidden to forbid anyone else to use, share and improve
20 what you give them.   Help stamp out software-hoarding!  */
21 
22 
23 /*
24  * @(#)nmalloc.c 1 (Caltech) 2/21/82
25  *
26  *	U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
27  *
28  *	Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
29  *
30  * This is a very fast storage allocator.  It allocates blocks of a small
31  * number of different sizes, and keeps free lists of each size.  Blocks
32  * that don't exactly fit are passed up to the next larger size.  In this
33  * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34  * This is designed for use in a program that uses vast quantities of
35  * memory, but bombs when it runs out.  To make it a little better, it
36  * warns the user when he starts to get near the end.
37  *
38  * June 84, ACT: modified rcheck code to check the range given to malloc,
39  * rather than the range determined by the 2-power used.
40  *
41  * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42  * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43  * You should call malloc_init to reinitialize after loading dumped Emacs.
44  * Call malloc_stats to get info on memory stats if MSTATS turned on.
45  * realloc knows how to return same block given, just changing its size,
46  * if the power of 2 is correct.
47  */
48 
49 /*
50  * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
51  * smallest allocatable block is 8 bytes.  The overhead information will
52  * go in the first int of the block, and the returned pointer will point
53  * to the second.
54  *
55 #ifdef MSTATS
56  * nmalloc[i] is the difference between the number of mallocs and frees
57  * for a given block size.
58 #endif MSTATS
59  */
60 
61 #ifdef emacs
62 /* config.h specifies which kind of system this is.  */
63 #include "config.h"
64 #include <signal.h>
65 #else
66 
67 /* Determine which kind of system this is.  */
68 #include <sys/types.h>
69 #include <signal.h>
70 
71 #include <string.h>
72 #define bcopy(s,d,n)	memcpy ((d), (s), (n))
73 #define bcmp(s1,s2,n)	memcmp ((s1), (s2), (n))
74 #define bzero(s,n)	memset ((s), 0, (n))
75 
76 #ifndef SIGTSTP
77 #ifndef VMS
78 #ifndef USG
79 #define USG
80 #endif
81 #endif /* not VMS */
82 #else /* SIGTSTP */
83 #ifdef SIGIO
84 #define BSD4_2
85 #endif /* SIGIO */
86 #endif /* SIGTSTP */
87 
88 #endif /* not emacs */
89 
90 /* Define getpagesize () if the system does not.  */
91 #include "getpagesize.h"
92 
93 #ifdef BSD
94 #ifdef BSD4_1
95 #include <sys/vlimit.h>		/* warn the user when near the end */
96 #else /* if 4.2 or newer */
97 #include <sys/time.h>
98 #include <sys/resource.h>
99 #endif /* if 4.2 or newer */
100 #endif
101 
102 #ifdef VMS
103 #include "vlimit.h"
104 #endif
105 
106 extern char *start_of_data ();
107 
108 #ifdef BSD
109 #ifndef DATA_SEG_BITS
110 #define start_of_data() &etext
111 #endif
112 #endif
113 
114 #ifndef emacs
115 #define start_of_data() &etext
116 #endif
117 
118 #define ISALLOC ((char) 0xf7)	/* magic byte that implies allocation */
119 #define ISFREE ((char) 0x54)	/* magic byte that implies free block */
120 				/* this is for error checking only */
121 #define ISMEMALIGN ((char) 0xd6)  /* Stored before the value returned by
122 				     memalign, with the rest of the word
123 				     being the distance to the true
124 				     beginning of the block.  */
125 
126 extern char etext;
127 
128 /* These two are for user programs to look at, when they are interested.  */
129 
130 unsigned int malloc_sbrk_used;       /* amount of data space used now */
131 unsigned int malloc_sbrk_unused;     /* amount more we can have */
132 
133 /* start of data space; can be changed by calling init_malloc */
134 static char *data_space_start;
135 
136 #ifdef MSTATS
137 static int nmalloc[30];
138 static int nmal, nfre;
139 #endif /* MSTATS */
140 
141 /* If range checking is not turned on, all we have is a flag indicating
142    whether memory is allocated, an index in nextf[], and a size field; to
143    realloc() memory we copy either size bytes or 1<<(index+3) bytes depending
144    on whether the former can hold the exact size (given the value of
145    'index').  If range checking is on, we always need to know how much space
146    is allocated, so the 'size' field is never used. */
147 
148 struct mhead {
149 	char     mh_alloc;	/* ISALLOC or ISFREE */
150 	char     mh_index;	/* index in nextf[] */
151 /* Remainder are valid only when block is allocated */
152 	unsigned short mh_size;	/* size, if < 0x10000 */
153 #ifdef rcheck
154 	unsigned mh_nbytes;	/* number of bytes allocated */
155 	int      mh_magic4;	/* should be == MAGIC4 */
156 #endif /* rcheck */
157 };
158 
159 /* Access free-list pointer of a block.
160   It is stored at block + 4.
161   This is not a field in the mhead structure
162   because we want sizeof (struct mhead)
163   to describe the overhead for when the block is in use,
164   and we do not want the free-list pointer to count in that.  */
165 
166 #define CHAIN(a) \
167   (*(struct mhead **) (sizeof (char *) + (char *) (a)))
168 
169 #ifdef rcheck
170 
171 /* To implement range checking, we write magic values in at the beginning and
172    end of each allocated block, and make sure they are undisturbed whenever a
173    free or a realloc occurs. */
174 /* Written in each of the 4 bytes following the block's real space */
175 #define MAGIC1 0x55
176 /* Written in the 4 bytes before the block's real space */
177 #define MAGIC4 0x55555555
178 #define ASSERT(p) if (!(p)) botch("p"); else
179 #define EXTRA  4		/* 4 bytes extra for MAGIC1s */
180 #else
181 #define ASSERT(p) if (!(p)) abort (); else
182 #define EXTRA  0
183 #endif /* rcheck */
184 
185 
186 /* nextf[i] is free list of blocks of size 2**(i + 3)  */
187 
188 static struct mhead *nextf[30];
189 
190 /* busy[i] is nonzero while allocation of block size i is in progress.  */
191 
192 static char busy[30];
193 
194 /* Number of bytes of writable memory we can expect to be able to get */
195 static unsigned int lim_data;
196 
197 /* Level number of warnings already issued.
198   0 -- no warnings issued.
199   1 -- 75% warning already issued.
200   2 -- 85% warning already issued.
201 */
202 static int warnlevel;
203 
204 /* Function to call to issue a warning;
205    0 means don't issue them.  */
206 static void (*warnfunction) ();
207 
208 /* nonzero once initial bunch of free blocks made */
209 static int gotpool;
210 
211 char *_malloc_base;
212 
213 static void getpool ();
214 
215 /* Cause reinitialization based on job parameters;
216   also declare where the end of pure storage is. */
217 void
malloc_init(start,warnfun)218 malloc_init (start, warnfun)
219      char *start;
220      void (*warnfun) ();
221 {
222   if (start)
223     data_space_start = start;
224   lim_data = 0;
225   warnlevel = 0;
226   warnfunction = warnfun;
227 }
228 
229 /* Return the maximum size to which MEM can be realloc'd
230    without actually requiring copying.  */
231 
232 int
malloc_usable_size(mem)233 malloc_usable_size (mem)
234      char *mem;
235 {
236   struct mhead *p
237     = (struct mhead *) (mem - ((sizeof (struct mhead) + 7) & ~7));
238   int blocksize = 8 << p->mh_index;
239 
240   return blocksize - sizeof (struct mhead) - EXTRA;
241 }
242 
243 static void
morecore(nu)244 morecore (nu)			/* ask system for more memory */
245      register int nu;		/* size index to get more of  */
246 {
247   char *sbrk ();
248   register char *cp;
249   register int nblks;
250   register unsigned int siz;
251   int oldmask;
252 
253 #ifdef BSD
254 #ifndef BSD4_1
255   int newmask = -1;
256   /* Blocking these signals interferes with debugging, at least on BSD on
257      the HP 9000/300.  */
258 #ifdef SIGTRAP
259   newmask &= ~(1 << SIGTRAP);
260 #endif
261 #ifdef SIGILL
262   newmask &= ~(1 << SIGILL);
263 #endif
264 #ifdef SIGTSTP
265   newmask &= ~(1 << SIGTSTP);
266 #endif
267 #ifdef SIGSTOP
268   newmask &= ~(1 << SIGSTOP);
269 #endif
270   oldmask = sigsetmask (newmask);
271 #endif
272 #endif
273 
274   if (!data_space_start)
275     {
276       data_space_start = start_of_data ();
277     }
278 
279   if (lim_data == 0)
280     get_lim_data ();
281 
282  /* On initial startup, get two blocks of each size up to 1k bytes */
283   if (!gotpool)
284     { getpool (); getpool (); gotpool = 1; }
285 
286   /* Find current end of memory and issue warning if getting near max */
287 
288 #ifndef VMS
289   /* Maximum virtual memory on VMS is difficult to calculate since it
290    * depends on several dynmacially changing things. Also, alignment
291    * isn't that important. That is why much of the code here is ifdef'ed
292    * out for VMS systems.
293    */
294   cp = sbrk (0);
295   siz = cp - data_space_start;
296 
297   if (warnfunction)
298     switch (warnlevel)
299       {
300       case 0:
301 	if (siz > (lim_data / 4) * 3)
302 	  {
303 	    warnlevel++;
304 	    (*warnfunction) ("Warning: past 75% of memory limit");
305 	  }
306 	break;
307       case 1:
308 	if (siz > (lim_data / 20) * 17)
309 	  {
310 	    warnlevel++;
311 	    (*warnfunction) ("Warning: past 85% of memory limit");
312 	  }
313 	break;
314       case 2:
315 	if (siz > (lim_data / 20) * 19)
316 	  {
317 	    warnlevel++;
318 	    (*warnfunction) ("Warning: past 95% of memory limit");
319 	  }
320 	break;
321       }
322 
323   if ((int) cp & 0x3ff)	/* land on 1K boundaries */
324     sbrk (1024 - ((int) cp & 0x3ff));
325 #endif /* not VMS */
326 
327  /* Take at least 2k, and figure out how many blocks of the desired size
328     we're about to get */
329   nblks = 1;
330   if ((siz = nu) < 8)
331     nblks = 1 << ((siz = 8) - nu);
332 
333   if ((cp = sbrk (1 << (siz + 3))) == (char *) -1)
334     {
335 #ifdef BSD
336 #ifndef BSD4_1
337       sigsetmask (oldmask);
338 #endif
339 #endif
340       return;			/* no more room! */
341     }
342   malloc_sbrk_used = siz;
343   malloc_sbrk_unused = lim_data - siz;
344 
345 #ifndef VMS
346   if ((int) cp & 7)
347     {		/* shouldn't happen, but just in case */
348       cp = (char *) (((int) cp + 8) & ~7);
349       nblks--;
350     }
351 #endif /* not VMS */
352 
353  /* save new header and link the nblks blocks together */
354   nextf[nu] = (struct mhead *) cp;
355   siz = 1 << (nu + 3);
356   while (1)
357     {
358       ((struct mhead *) cp) -> mh_alloc = ISFREE;
359       ((struct mhead *) cp) -> mh_index = nu;
360       if (--nblks <= 0) break;
361       CHAIN ((struct mhead *) cp) = (struct mhead *) (cp + siz);
362       cp += siz;
363     }
364   CHAIN ((struct mhead *) cp) = 0;
365 
366 #ifdef BSD
367 #ifndef BSD4_1
368   sigsetmask (oldmask);
369 #endif
370 #endif
371 }
372 
373 static void
getpool()374 getpool ()
375 {
376   register int nu;
377   char * sbrk ();
378   register char *cp = sbrk (0);
379 
380   if ((int) cp & 0x3ff)	/* land on 1K boundaries */
381     sbrk (1024 - ((int) cp & 0x3ff));
382 
383   /* Record address of start of space allocated by malloc.  */
384   if (_malloc_base == 0)
385     _malloc_base = cp;
386 
387   /* Get 2k of storage */
388 
389   cp = sbrk (04000);
390   if (cp == (char *) -1)
391     return;
392 
393   /* Divide it into an initial 8-word block
394      plus one block of size 2**nu for nu = 3 ... 10.  */
395 
396   CHAIN (cp) = nextf[0];
397   nextf[0] = (struct mhead *) cp;
398   ((struct mhead *) cp) -> mh_alloc = ISFREE;
399   ((struct mhead *) cp) -> mh_index = 0;
400   cp += 8;
401 
402   for (nu = 0; nu < 7; nu++)
403     {
404       CHAIN (cp) = nextf[nu];
405       nextf[nu] = (struct mhead *) cp;
406       ((struct mhead *) cp) -> mh_alloc = ISFREE;
407       ((struct mhead *) cp) -> mh_index = nu;
408       cp += 8 << nu;
409     }
410 }
411 
412 char *
malloc(n)413 malloc (n)		/* get a block */
414      unsigned n;
415 {
416   register struct mhead *p;
417   register unsigned int nbytes;
418   register int nunits = 0;
419 
420   /* Figure out how many bytes are required, rounding up to the nearest
421      multiple of 8, then figure out which nestf[] area to use.
422      Both the beginning of the header and the beginning of the
423      block should be on an eight byte boundary.  */
424   nbytes = (n + ((sizeof *p + 7) & ~7) + EXTRA + 7) & ~7;
425   {
426     register unsigned int   shiftr = (nbytes - 1) >> 2;
427 
428     while (shiftr >>= 1)
429       nunits++;
430   }
431 
432   /* In case this is reentrant use of malloc from signal handler,
433      pick a block size that no other malloc level is currently
434      trying to allocate.  That's the easiest harmless way not to
435      interfere with the other level of execution.  */
436   while (busy[nunits]) nunits++;
437   busy[nunits] = 1;
438 
439   /* If there are no blocks of the appropriate size, go get some */
440   /* COULD SPLIT UP A LARGER BLOCK HERE ... ACT */
441   if (nextf[nunits] == 0)
442     morecore (nunits);
443 
444   /* Get one block off the list, and set the new list head */
445   if ((p = nextf[nunits]) == 0)
446     {
447       busy[nunits] = 0;
448       return 0;
449     }
450   nextf[nunits] = CHAIN (p);
451   busy[nunits] = 0;
452 
453   /* Check for free block clobbered */
454   /* If not for this check, we would gobble a clobbered free chain ptr */
455   /* and bomb out on the NEXT allocate of this size block */
456   if (p -> mh_alloc != ISFREE || p -> mh_index != nunits)
457 #ifdef rcheck
458     botch ("block on free list clobbered");
459 #else /* not rcheck */
460     abort ();
461 #endif /* not rcheck */
462 
463   /* Fill in the info, and if range checking, set up the magic numbers */
464   p -> mh_alloc = ISALLOC;
465 #ifdef rcheck
466   p -> mh_nbytes = n;
467   p -> mh_magic4 = MAGIC4;
468   {
469     /* Get the location n after the beginning of the user's space.  */
470     register char *m = (char *) p + ((sizeof *p + 7) & ~7) + n;
471 
472     *m++ = MAGIC1, *m++ = MAGIC1, *m++ = MAGIC1, *m = MAGIC1;
473   }
474 #else /* not rcheck */
475   p -> mh_size = n;
476 #endif /* not rcheck */
477 #ifdef MSTATS
478   nmalloc[nunits]++;
479   nmal++;
480 #endif /* MSTATS */
481   return (char *) p + ((sizeof *p + 7) & ~7);
482 }
483 
free(mem)484 free (mem)
485      char *mem;
486 {
487   register struct mhead *p;
488   {
489     register char *ap = mem;
490 
491     if (ap == 0)
492       return;
493 
494     p = (struct mhead *) (ap - ((sizeof *p + 7) & ~7));
495     if (p -> mh_alloc == ISMEMALIGN)
496       {
497 	ap -= p->mh_size;
498 	p = (struct mhead *) (ap - ((sizeof *p + 7) & ~7));
499       }
500 
501 #ifndef rcheck
502     if (p -> mh_alloc != ISALLOC)
503       abort ();
504 
505 #else rcheck
506     if (p -> mh_alloc != ISALLOC)
507       {
508 	if (p -> mh_alloc == ISFREE)
509 	  botch ("free: Called with already freed block argument\n");
510 	else
511 	  botch ("free: Called with bad argument\n");
512       }
513 
514     ASSERT (p -> mh_magic4 == MAGIC4);
515     ap += p -> mh_nbytes;
516     ASSERT (*ap++ == MAGIC1); ASSERT (*ap++ == MAGIC1);
517     ASSERT (*ap++ == MAGIC1); ASSERT (*ap   == MAGIC1);
518 #endif /* rcheck */
519   }
520   {
521     register int nunits = p -> mh_index;
522 
523     ASSERT (nunits <= 29);
524     p -> mh_alloc = ISFREE;
525 
526     /* Protect against signal handlers calling malloc.  */
527     busy[nunits] = 1;
528     /* Put this block on the free list.  */
529     CHAIN (p) = nextf[nunits];
530     nextf[nunits] = p;
531     busy[nunits] = 0;
532 
533 #ifdef MSTATS
534     nmalloc[nunits]--;
535     nfre++;
536 #endif /* MSTATS */
537   }
538 }
539 
540 char *
realloc(mem,n)541 realloc (mem, n)
542      char *mem;
543      register unsigned n;
544 {
545   register struct mhead *p;
546   register unsigned int tocopy;
547   register unsigned int nbytes;
548   register int nunits;
549 
550   if (mem == 0)
551     return malloc (n);
552   p = (struct mhead *) (mem - ((sizeof *p + 7) & ~7));
553   nunits = p -> mh_index;
554   ASSERT (p -> mh_alloc == ISALLOC);
555 #ifdef rcheck
556   ASSERT (p -> mh_magic4 == MAGIC4);
557   {
558     register char *m = mem + (tocopy = p -> mh_nbytes);
559     ASSERT (*m++ == MAGIC1); ASSERT (*m++ == MAGIC1);
560     ASSERT (*m++ == MAGIC1); ASSERT (*m   == MAGIC1);
561   }
562 #else /* not rcheck */
563   if (p -> mh_index >= 13)
564     tocopy = (1 << (p -> mh_index + 3)) - ((sizeof *p + 7) & ~7);
565   else
566     tocopy = p -> mh_size;
567 #endif /* not rcheck */
568 
569   /* See if desired size rounds to same power of 2 as actual size. */
570   nbytes = (n + ((sizeof *p + 7) & ~7) + EXTRA + 7) & ~7;
571 
572   /* If ok, use the same block, just marking its size as changed.  */
573   if (nbytes > (4 << nunits) && nbytes <= (8 << nunits))
574     {
575 #ifdef rcheck
576       register char *m = mem + tocopy;
577       *m++ = 0;  *m++ = 0;  *m++ = 0;  *m++ = 0;
578       p-> mh_nbytes = n;
579       m = mem + n;
580       *m++ = MAGIC1;  *m++ = MAGIC1;  *m++ = MAGIC1;  *m++ = MAGIC1;
581 #else /* not rcheck */
582       p -> mh_size = n;
583 #endif /* not rcheck */
584       return mem;
585     }
586 
587   if (n < tocopy)
588     tocopy = n;
589   {
590     register char *new;
591 
592     if ((new = malloc (n)) == 0)
593       return 0;
594     bcopy (mem, new, tocopy);
595     free (mem);
596     return new;
597   }
598 }
599 
600 /* This is in case something linked with Emacs calls calloc.  */
601 
602 char *
calloc(num,size)603 calloc (num, size)
604      unsigned num, size;
605 {
606   register char *mem;
607 
608   num *= size;
609   mem = malloc (num);
610   if (mem != 0)
611     bzero (mem, num);
612   return mem;
613 }
614 
615 #ifndef VMS
616 
617 char *
memalign(alignment,size)618 memalign (alignment, size)
619      unsigned alignment, size;
620 {
621   register char *ptr = malloc (size + alignment);
622   register char *aligned;
623   register struct mhead *p;
624 
625   if (ptr == 0)
626     return 0;
627   /* If entire block has the desired alignment, just accept it.  */
628   if (((int) ptr & (alignment - 1)) == 0)
629     return ptr;
630   /* Otherwise, get address of byte in the block that has that alignment.  */
631   aligned = (char *) (((int) ptr + alignment - 1) & -alignment);
632 
633   /* Store a suitable indication of how to free the block,
634      so that free can find the true beginning of it.  */
635   p = (struct mhead *) (aligned - ((7 + sizeof (struct mhead)) & ~7));
636   p -> mh_size = aligned - ptr;
637   p -> mh_alloc = ISMEMALIGN;
638   return aligned;
639 }
640 
641 #ifndef HPUX
642 /* This runs into trouble with getpagesize on HPUX.
643    Patching out seems cleaner than the ugly fix needed.  */
644 char *
valloc(size)645 valloc (size)
646 {
647   return memalign (getpagesize (), size);
648 }
649 #endif /* not HPUX */
650 #endif /* not VMS */
651 
652 #ifdef MSTATS
653 /* Return statistics describing allocation of blocks of size 2**n. */
654 
655 struct mstats_value
656   {
657     int blocksize;
658     int nfree;
659     int nused;
660   };
661 
662 struct mstats_value
malloc_stats(size)663 malloc_stats (size)
664      int size;
665 {
666   struct mstats_value v;
667   register int i;
668   register struct mhead *p;
669 
670   v.nfree = 0;
671 
672   if (size < 0 || size >= 30)
673     {
674       v.blocksize = 0;
675       v.nused = 0;
676       return v;
677     }
678 
679   v.blocksize = 1 << (size + 3);
680   v.nused = nmalloc[size];
681 
682   for (p = nextf[size]; p; p = CHAIN (p))
683     v.nfree++;
684 
685   return v;
686 }
687 int
malloc_mem_used()688 malloc_mem_used ()
689 {
690   int i;
691   int size_used;
692 
693   size_used = 0;
694 
695   for (i = 0; i < 30; i++)
696     {
697       int allocation_size = 1 << (i + 3);
698       struct mhead *p;
699 
700       size_used += nmalloc[i] * allocation_size;
701     }
702 
703   return size_used;
704 }
705 
706 int
malloc_mem_free()707 malloc_mem_free ()
708 {
709   int i;
710   int size_unused;
711 
712   size_unused = 0;
713 
714   for (i = 0; i < 30; i++)
715     {
716       int allocation_size = 1 << (i + 3);
717       struct mhead *p;
718 
719       for (p = nextf[i]; p ; p = CHAIN (p))
720 	size_unused += allocation_size;
721     }
722 
723   return size_unused;
724 }
725 #endif /* MSTATS */
726 
727 /*
728  *	This function returns the total number of bytes that the process
729  *	will be allowed to allocate via the sbrk(2) system call.  On
730  *	BSD systems this is the total space allocatable to stack and
731  *	data.  On USG systems this is the data space only.
732  */
733 
734 #ifdef USG
735 
get_lim_data()736 get_lim_data ()
737 {
738   extern long ulimit ();
739 
740 #ifdef ULIMIT_BREAK_VALUE
741   lim_data = ULIMIT_BREAK_VALUE;
742 #else
743   lim_data = ulimit (3, 0);
744 #endif
745 
746   lim_data -= (long) data_space_start;
747 }
748 
749 #else /* not USG */
750 #if defined (BSD4_1) || defined (VMS)
751 
get_lim_data()752 get_lim_data ()
753 {
754   lim_data = vlimit (LIM_DATA, -1);
755 }
756 
757 #else /* not BSD4_1 and not VMS */
758 
get_lim_data()759 get_lim_data ()
760 {
761   struct rlimit XXrlimit;
762 
763   getrlimit (RLIMIT_DATA, &XXrlimit);
764 #ifdef RLIM_INFINITY
765   lim_data = XXrlimit.rlim_cur & RLIM_INFINITY; /* soft limit */
766 #else
767   lim_data = XXrlimit.rlim_cur;	/* soft limit */
768 #endif
769 }
770 
771 #endif /* not BSD4_1 and not VMS */
772 #endif /* not USG */
773 
774 #ifdef VMS
775 /* There is a problem when dumping and restoring things on VMS. Calls
776  * to SBRK don't necessarily result in contiguous allocation. Dumping
777  * doesn't work when it isn't. Therefore, we make the initial
778  * allocation contiguous by allocating a big chunk, and do SBRKs from
779  * there. Once Emacs has dumped there is no reason to continue
780  * contiguous allocation, malloc doesn't depend on it.
781  *
782  * There is a further problem of using brk and sbrk while using VMS C
783  * run time library routines malloc, calloc, etc. The documentation
784  * says that this is a no-no, although I'm not sure why this would be
785  * a problem. In any case, we remove the necessity to call brk and
786  * sbrk, by calling calloc (to assure zero filled data) rather than
787  * sbrk.
788  *
789  * VMS_ALLOCATION_SIZE is the size of the allocation array. This
790  * should be larger than the malloc size before dumping. Making this
791  * too large will result in the startup procedure slowing down since
792  * it will require more space and time to map it in.
793  *
794  * The value for VMS_ALLOCATION_SIZE in the following define was determined
795  * by running emacs linked (and a large allocation) with the debugger and
796  * looking to see how much storage was used. The allocation was 201 pages,
797  * so I rounded it up to a power of two.
798  */
799 #ifndef VMS_ALLOCATION_SIZE
800 #define VMS_ALLOCATION_SIZE	(512*256)
801 #endif
802 
803 /* Use VMS RTL definitions */
804 #undef sbrk
805 #undef brk
806 #undef malloc
807 int vms_out_initial = 0;
808 char vms_initial_buffer[VMS_ALLOCATION_SIZE];
809 static char *vms_current_brk = &vms_initial_buffer;
810 static char *vms_end_brk = &vms_initial_buffer[VMS_ALLOCATION_SIZE-1];
811 
812 #include <stdio.h>
813 
814 char *
sys_sbrk(incr)815 sys_sbrk (incr)
816      int incr;
817 {
818   char *sbrk(), *temp, *ptr;
819 
820   if (vms_out_initial)
821     {
822       /* out of initial allocation... */
823       if (!(temp = malloc (incr)))
824 	temp = (char *) -1;
825     }
826   else
827     {
828       /* otherwise, go out of our area */
829       ptr = vms_current_brk + incr; /* new current_brk */
830       if (ptr <= vms_end_brk)
831 	{
832 	  temp = vms_current_brk;
833 	  vms_current_brk = ptr;
834 	}
835       else
836 	{
837 	  vms_out_initial = 1;	/* mark as out of initial allocation */
838 	  if (!(temp = malloc (incr)))
839 	    temp = (char *) -1;
840 	}
841     }
842   return temp;
843 }
844 #endif /* VMS */
845