1 /******************************** -*- C -*- ****************************
2  *
3  *	Memory allocation for Smalltalk
4  *
5  *
6  ***********************************************************************/
7 
8 /***********************************************************************
9  *
10  * Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
11  * Written by Paolo Bonzini.  Ideas based on Mike Haertel's malloc.
12  *
13  * This file is part of GNU Smalltalk.
14  *
15  * GNU Smalltalk is free software; you can redistribute it and/or modify it
16  * under the terms of the GNU General Public License as published by the Free
17  * Software Foundation; either version 2, or (at your option) any later
18  * version.
19  *
20  * Linking GNU Smalltalk statically or dynamically with other modules is
21  * making a combined work based on GNU Smalltalk.  Thus, the terms and
22  * conditions of the GNU General Public License cover the whole
23  * combination.
24  *
25  * In addition, as a special exception, the Free Software Foundation
26  * give you permission to combine GNU Smalltalk with free software
27  * programs or libraries that are released under the GNU LGPL and with
28  * independent programs running under the GNU Smalltalk virtual machine.
29  *
30  * You may copy and distribute such a system following the terms of the
31  * GNU GPL for GNU Smalltalk and the licenses of the other code
32  * concerned, provided that you include the source code of that other
33  * code when and as the GNU GPL requires distribution of source code.
34  *
35  * Note that people who make modified versions of GNU Smalltalk are not
36  * obligated to grant this special exception for their modified
37  * versions; it is their choice whether to do so.  The GNU General
38  * Public License gives permission to release a modified version without
39  * this exception; this exception also makes it possible to release a
40  * modified version which carries forward this exception.
41  *
42  * GNU Smalltalk is distributed in the hope that it will be useful, but WITHOUT
43  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
44  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
45  * more details.
46  *
47  * You should have received a copy of the GNU General Public License along with
48  * GNU Smalltalk; see the file COPYING.  If not, write to the Free Software
49  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
50  ***********************************************************************/
51 
52 #include "gstpriv.h"
53 
54 #define	SMALL2FREE(B, N)	((heap_freeobj*)(((char *)(B)->vSmall.data) + (N)*(B)->size))
55 
56 #define	MEM2BLOCK(M)		((heap_block*)(((intptr_t)(M)) & -pagesize))
57 #define	MEM2FREE(M)		((heap_freeobj*)(M))
58 
59 #define	BLOCKEND(B)		((heap_block*)(((unsigned char*)(B)) + (B)->size))
60 
61 #define	MAX_SMALL_OBJ_SIZE	16384
62 #define	IS_SMALL_SIZE(S)	((S) <= max_small_object_size)
63 
64 #define	MEMALIGN		8
65 #define	ROUNDUPALIGN(V)		(((intptr_t)(V) + MEMALIGN - 1) & -MEMALIGN)
66 #define	ROUNDUPPAGESIZE(V)	(((intptr_t)(V) + pagesize - 1) & -pagesize)
67 
68 #define	OBJECT_SIZE(M)		(MEM2BLOCK(M)->size)
69 
70 #define MMAP_AREA_SIZE		(sizeof (long) << 26)	/* 256/512 Mb */
71 #define MMAP_THRESHOLD		(sizeof (long) << 15)	/* 128/256 kb */
72 
73 /* Depending on the architecture, heap_block->vSmall.data could be
74    counted as 1 or 4 bytes.  This formula gets it right.  */
75 #define offset_of(field, type) \
76   (((char *) &( ((type *) 8) -> field )) - (char *) 8)
77 
78 #define SMALL_OBJ_HEADER_SIZE       offset_of (vSmall.data, heap_block)
79 #define LARGE_OBJ_HEADER_SIZE       offset_of (vLarge.data, heap_block)
80 
81 static void init_heap (heap_data *h, size_t heap_allocation_size, size_t heap_limit);
82 
83 #define vSmall var.small
84 #define vLarge var.large
85 #define vFree  var.free
86 
87 static heap_block *heap_small_block (heap_data *h, size_t);
88 static heap_block *heap_large_block (heap_data *h, size_t);
89 static void heap_system_alloc (heap_data *h, size_t);
90 
91 static heap_block *heap_primitive_alloc (heap_data *h, size_t);
92 static void heap_add_to_free_list (heap_data *h, heap_block *);
93 static void heap_primitive_free (heap_data *h, heap_block *);
94 static PTR morecore (size_t);
95 
96 
97 
98 /* This list was produced by this command
99 
100    echo 'for (i = (4072 + 7) / 32; i >= 1; i--) (4072 / i) / 32 * 32; 0' |
101      bc | uniq | sed '$!s/$/,/' | fmt -60
102 
103    for 32-bit machines, and similarly with 4064 instead of
104    4072 for 64-bit machines.  8 and 16 were added manually.  */
105 
106 static unsigned short freelist_size[NUM_FREELISTS + 1] = {
107   8, 16, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352,
108   384, 448, 480, 576, 672, 800, 992, 1344, 2016,
109   4096 - SMALL_OBJ_HEADER_SIZE,
110   8192 - SMALL_OBJ_HEADER_SIZE,
111   16384 - SMALL_OBJ_HEADER_SIZE, 0
112 };
113 
114 static unsigned short sztable[MAX_SMALL_OBJ_SIZE + 1];
115 
116 static heap_block *heap_prim_freelist = NULL;
117 static size_t max_small_object_size;
118 static size_t pagesize;
119 
120 
121 /* Create a new memory heap  */
122 heap_data *
_gst_mem_new_heap(size_t heap_allocation_size,size_t heap_limit)123 _gst_mem_new_heap (size_t heap_allocation_size, size_t heap_limit)
124 {
125   heap_data *h = (heap_data *) xcalloc (1, sizeof (*h));
126   init_heap (h, heap_allocation_size, heap_limit);
127   return h;
128 }
129 
130 /* Initialize a memory heap  */
131 static void
init_heap(heap_data * h,size_t heap_allocation_size,size_t heap_limit)132 init_heap (heap_data *h, size_t heap_allocation_size, size_t heap_limit)
133 {
134   int sz;
135   int i;
136 
137   if (!pagesize)
138     {
139       pagesize = getpagesize ();
140 
141       /* Use the preinitialized freelist table to initialize
142          the sztable.  */
143       for (sz = i = 0; freelist_size[i] > 0 && freelist_size[i] < pagesize; i++)
144         for (; sz <= freelist_size[i]; sz++)
145           sztable[sz] = i;
146       max_small_object_size = sz - 1;
147     }
148 
149   for (i = 0; freelist_size[i] > 0; i++)
150     h->freelist[i] = NULL;
151 
152   h->heap_allocation_size = (heap_allocation_size
153 			     ? ROUNDUPPAGESIZE (heap_allocation_size)
154 			     : MMAP_THRESHOLD);
155   h->heap_limit = heap_limit;
156   h->mmap_count = 0;
157   h->heap_total = 0;
158   h->probes = h->splits = h->matches = h->failures = 0;
159   h->after_allocating = NULL;
160   h->after_prim_allocating = NULL;
161   h->before_prim_freeing = NULL;
162   h->nomemory = NULL;
163 }
164 
165 
166 /* _gst_mem_alloc
167    Allocate a piece of memory.  */
168 PTR
_gst_mem_alloc(heap_data * h,size_t sz)169 _gst_mem_alloc (heap_data *h, size_t sz)
170 {
171   size_t lnr;
172   heap_freeobj *mem;
173   heap_block **mptr;
174   heap_block *blk;
175   size_t nsz;
176   int times;
177 
178   times = 0;
179 rerun:
180   times++;
181   if (IS_SMALL_SIZE (sz))
182     {
183       /* Translate size to object free list */
184       sz = ROUNDUPALIGN (sz);
185 
186       lnr = sztable[sz];
187       nsz = freelist_size[lnr];
188 
189       /* No available objects? Allocate some more */
190       mptr = &h->freelist[lnr];
191       blk = *mptr;
192       if (!blk)
193 	{
194 	  blk = heap_small_block (h, nsz);
195 	  if (!blk)
196 	    {
197 	      nsz = pagesize;
198 	      goto nospace;
199 	    }
200 
201 #ifndef OPTIMIZE
202           if (((intptr_t) blk) & (pagesize - 1))
203 	    abort ();
204 #endif
205 
206 	  blk->vSmall.nfree = *mptr;
207 	  *mptr = blk;
208 	}
209 
210       /* Unlink free one and return it */
211       mem = blk->vSmall.free;
212 
213 #ifndef OPTIMIZE
214       if (!blk->vSmall.free || !blk->vSmall.avail)
215 	abort ();
216 
217       if (((intptr_t) mem <= (intptr_t) blk) ||
218 	  ((intptr_t) mem >= (intptr_t) blk + pagesize))
219 	abort ();
220 #endif
221 
222       blk->vSmall.free = mem->next;
223 
224       /* Once we use all the sub-blocks up, remove the whole block
225          from the freelist.  */
226       blk->vSmall.avail--;
227       if (!blk->vSmall.free)
228 	*mptr = blk->vSmall.nfree;
229     }
230 
231   else
232     {
233       nsz = sz;
234       blk = heap_large_block (h, nsz);
235       nsz += LARGE_OBJ_HEADER_SIZE;
236       nsz = ROUNDUPPAGESIZE (nsz);
237       if (blk == 0)
238         goto nospace;
239 
240       mem = (heap_freeobj *) blk->vLarge.data;
241     }
242 
243 #ifndef OPTIMIZE
244   if (OBJECT_SIZE (mem) < sz)
245     abort ();
246 #endif
247 
248   if (h->after_allocating)
249     h->after_allocating (h, blk, sz);
250 
251   return (mem);
252 
253 nospace:
254   /* Failed to find space in any freelists. Must try to get the
255      memory from somewhere.  */
256   switch (times)
257     {
258     case 1:
259       /* Try asking the program to free some memory, but only if
260          it's worth doing.  */
261       if (h->heap_limit && h->heap_total <= h->heap_limit
262 	  && h->heap_total + nsz > h->heap_limit && h->nomemory)
263 	{
264 	  h = h->nomemory (h, nsz);
265 	  if (h)
266 	    break;
267 	  else
268 	    return NULL;
269 	}
270 
271     case 2:
272       /* Get from the system */
273       if (!h->heap_limit || h->heap_total < h->heap_limit)
274         {
275 	  if (nsz < h->heap_allocation_size)
276 	    nsz = h->heap_allocation_size;
277 
278           heap_system_alloc (h, nsz);
279 	  h->failures++;
280           break;
281         }
282 
283     default:
284       return (NULL);
285     }
286 
287   /* Try again */
288   goto rerun;
289 }
290 
291 
292 PTR
_gst_mem_realloc(heap_data * h,PTR mem,size_t size)293 _gst_mem_realloc (heap_data *h, PTR mem, size_t size)
294 {
295   heap_block *info;
296   int pages_to_free;
297   unsigned mmap_block;
298 
299   if (mem == NULL)
300     return _gst_mem_alloc (h, size);
301 
302   if (size == 0)
303     {
304       _gst_mem_free (h, mem);
305       return NULL;
306     }
307 
308   info = MEM2BLOCK (mem);
309 
310   if (size > info->size)
311     {
312       PTR p;
313       p = _gst_mem_alloc (h, size);
314       memcpy (p, mem, info->size);
315       _gst_mem_free (h, mem);
316       return p;
317     }
318 
319   if (IS_SMALL_SIZE (info->size))
320     return mem;
321 
322   mmap_block = info->mmap_block;
323   pages_to_free = (info->size - size) / pagesize;
324   if (!pages_to_free)
325     return mem;
326 
327   info->size -= pages_to_free * pagesize;
328 
329   /* Split into a busy and a free block */
330   info = (heap_block *) &info->vLarge.data[info->size];
331   info->size = pages_to_free * pagesize;
332   info->mmap_block = mmap_block;
333   heap_primitive_free (h, info);
334 
335   return mem;
336 }
337 
338 /* Free a piece of memory.  */
339 void
_gst_mem_free(heap_data * h,PTR mem)340 _gst_mem_free (heap_data *h, PTR mem)
341 {
342   heap_block *info;
343   heap_freeobj *obj;
344   int lnr;
345   int msz;
346 
347   if (!mem)
348     return;
349 
350   info = MEM2BLOCK (mem);
351   msz = info->size;
352   if (IS_SMALL_SIZE (msz))
353     {
354       lnr = sztable[msz];
355       /* If this block contains no free sub-blocks yet, attach
356          it to freelist.  */
357       if (++info->vSmall.avail == 1)
358 	{
359 #ifndef OPTIMIZE
360 	  if ( ((intptr_t) info) & (pagesize - 1))
361 	    abort ();
362 #endif
363 
364 	  info->vSmall.nfree = h->freelist[lnr];
365 	  h->freelist[lnr] = info;
366 	}
367       obj = MEM2FREE (mem);
368       obj->next = info->vSmall.free;
369       info->vSmall.free = obj;
370 
371 #ifndef OPTIMIZE
372       if ((intptr_t) obj < (intptr_t) info ||
373 	  (intptr_t) obj >= (intptr_t) info + pagesize ||
374 	  (intptr_t) obj == (intptr_t) (obj->next))
375 	abort ();
376 
377       if (info->vSmall.avail > info->vSmall.nr)
378 	abort ();
379 #endif
380 
381       /* If we free all sub-blocks, free the block */
382       if (info->vSmall.avail == info->vSmall.nr)
383 	{
384 	  heap_block **finfo = &h->freelist[lnr];
385 
386 	  for (;;)
387 	    {
388 	      if (*finfo == info)
389 		{
390 		  (*finfo) = info->vSmall.nfree;
391 		  info->size = pagesize;
392 		  heap_primitive_free (h, info);
393 		  break;
394 		}
395 	      finfo = &(*finfo)->vSmall.nfree;
396 #ifndef OPTIMIZE
397 	      if (!*finfo)
398 		abort ();
399 #endif
400 	    }
401 	}
402     }
403 
404   else
405     {
406       /* Calculate true size of block */
407       msz += LARGE_OBJ_HEADER_SIZE;
408       msz = ROUNDUPPAGESIZE (msz);
409       info->size = msz;
410       h->mmap_count -= info->mmap_block;
411       heap_primitive_free (h, info);
412     }
413 }
414 
415 
416 /* Allocate a new block of memory.  The block will contain 'nr' objects
417    each of 'sz' bytes.  */
418 static heap_block *
heap_small_block(heap_data * h,size_t sz)419 heap_small_block (heap_data *h, size_t sz)
420 {
421   heap_block *info;
422   int i;
423   int nr;
424   info = heap_primitive_alloc (h, pagesize);
425   if (!info)
426     return (NULL);
427 
428   /* Calculate number of objects in this block */
429   nr = (pagesize - SMALL_OBJ_HEADER_SIZE) / sz;
430   /* Setup the meta-data for the block */
431   info->size = sz;
432   info->vSmall.nr = nr;
433   info->vSmall.avail = nr;
434 
435   /* Build the objects into a free list */
436   for (i = nr - 1; i >= 0; i--)
437     SMALL2FREE (info, i)->next = SMALL2FREE (info, i + 1);
438 
439   SMALL2FREE (info, nr - 1)->next = 0;
440   info->vSmall.free = SMALL2FREE (info, 0);
441 
442   return (info);
443 }
444 
445 /* Allocate a new block of memory.  The block will contain one object */
446 static heap_block *
heap_large_block(heap_data * h,size_t sz)447 heap_large_block (heap_data *h, size_t sz)
448 {
449   heap_block *info;
450   size_t msz;
451   /* Add in management overhead */
452   msz = sz + LARGE_OBJ_HEADER_SIZE;
453   /* Round size up to a number of pages */
454   msz = ROUNDUPPAGESIZE (msz);
455 
456   info = heap_primitive_alloc (h, msz);
457   if (!info)
458     return (NULL);
459 
460   info->size = msz - LARGE_OBJ_HEADER_SIZE;
461   return (info);
462 }
463 
464 
465 /* Allocate a block of memory from the free list or, failing that, the
466    system pool.  */
467 static heap_block *
heap_primitive_alloc(heap_data * h,size_t sz)468 heap_primitive_alloc (heap_data *h, size_t sz)
469 {
470   heap_block *ptr;
471   heap_block **pptr;
472 
473   /* If we will pass the heap boundary, return 0 to indicate that
474      we're run out.  */
475   if (h->heap_limit && h->heap_total <= h->heap_limit
476       && h->heap_total + sz > h->heap_limit)
477     return (NULL);
478 
479 #ifndef OPTIMIZE
480   if (sz & (pagesize - 1))
481     abort ();
482 #endif
483 
484   if (sz > MMAP_THRESHOLD)
485     {
486       ptr = _gst_osmem_alloc (sz);
487       if (ptr)
488         {
489 	  if (h->after_prim_allocating)
490 	    h->after_prim_allocating (h, ptr, sz);
491 
492           h->heap_total += sz;
493           h->mmap_count++;
494 
495 	  /* Setup the meta-data for the block */
496           ptr->mmap_block = 1;
497 	  ptr->user = 0;
498 	  ptr->size = sz;
499           if (((intptr_t) ptr) & (pagesize - 1))
500 	    abort ();
501 
502 	  return ptr;
503 	}
504     }
505 
506   for (pptr = &heap_prim_freelist; (ptr = *pptr); pptr = &(ptr->vFree.next))
507     {
508       h->probes++;
509 #ifndef OPTIMIZE
510       if (((intptr_t) ptr) & (pagesize - 1))
511 	abort ();
512 
513       if (ptr->size & (pagesize - 1))
514 	abort ();
515 #endif
516 
517       /* First fit */
518       if (sz <= ptr->size)
519 	{
520 	  size_t left;
521 	  /* If there's more than a page left, split it */
522 	  left = ptr->size - sz;
523 	  if (left >= pagesize)
524 	    {
525 	      heap_block *nptr;
526 	      ptr->size = sz;
527 	      nptr = BLOCKEND (ptr);
528 	      nptr->size = left;
529 	      nptr->vFree.next = ptr->vFree.next;
530 	      ptr->vFree.next = nptr;
531 	      h->splits++;
532 	    }
533 	  else
534 	    h->matches++;
535 
536 	  *pptr = ptr->vFree.next;
537 
538 	  ptr->mmap_block = 0;
539 	  ptr->user = 0;
540 	  h->heap_total += sz;
541 	  if (h->after_prim_allocating)
542 	    h->after_prim_allocating (h, ptr, sz);
543 
544 	  return (ptr);
545 	}
546     }
547 
548   /* Nothing found on free list */
549   return (NULL);
550 }
551 
552 
553 /* Return a block of memory to the free list.  */
554 static void
heap_primitive_free(heap_data * h,heap_block * mem)555 heap_primitive_free (heap_data *h, heap_block *mem)
556 {
557 #ifndef OPTIMIZE
558   if (mem->size & (pagesize - 1))
559     abort ();
560 #endif
561 
562   if (h->before_prim_freeing)
563     h->before_prim_freeing (h, mem, mem->size);
564 
565   h->heap_total -= mem->size;
566   if (mem->mmap_block)
567     {
568       _gst_osmem_free (mem, mem->size);
569       return;
570     }
571 
572   heap_add_to_free_list (h, mem);
573 }
574 
575 static void
heap_add_to_free_list(heap_data * h,heap_block * mem)576 heap_add_to_free_list (heap_data *h, heap_block *mem)
577 {
578   heap_block *lptr;
579   heap_block *nptr;
580 
581 #ifndef OPTIMIZE
582   if (((intptr_t) mem) & (pagesize - 1))
583     abort ();
584 
585   if (mem->size & (pagesize - 1))
586     abort ();
587 #endif
588 
589   if (mem < heap_prim_freelist || heap_prim_freelist == 0)
590     {
591       /* If this block is directly before the first block on the
592          freelist, merge it into that block.  Otherwise just
593          attach it to the beginning.  */
594       if (BLOCKEND (mem) == heap_prim_freelist)
595 	{
596 	  mem->size += heap_prim_freelist->size;
597 	  mem->vFree.next = heap_prim_freelist->vFree.next;
598 	}
599       else
600 	mem->vFree.next = heap_prim_freelist;
601 
602       heap_prim_freelist = mem;
603       return;
604     }
605 
606   /* Search the freelist for the logical place to put this block */
607   lptr = heap_prim_freelist;
608   while (lptr->vFree.next != 0)
609     {
610 #ifndef OPTIMIZE
611       if (lptr->size & (pagesize - 1))
612 	abort ();
613 #endif
614 
615       nptr = lptr->vFree.next;
616       if (mem > lptr && mem < nptr)
617 	{
618 	  /* Block goes here in the logical scheme of things.
619 	     Work out how to merge it with those which come
620 	     before and after.  */
621 	  if (BLOCKEND (lptr) == mem)
622 	    {
623 	      if (BLOCKEND (mem) == nptr)
624 		{
625 		  /* Merge with last and next */
626 		  lptr->size += mem->size + nptr->size;
627 		  lptr->vFree.next = nptr->vFree.next;
628 		}
629 	      else
630 		/* Merge with last but not next */
631 		lptr->size += mem->size;
632 	    }
633 
634 	  else
635 	    {
636 	      if (BLOCKEND (mem) == nptr)
637 		{
638 		  /* Merge with next but not last */
639 		  mem->size += nptr->size;
640 		  mem->vFree.next = nptr->vFree.next;
641 		  lptr->vFree.next = mem;
642 		}
643 	      else
644 		{
645 		  /* Wont merge with either */
646 		  mem->vFree.next = nptr;
647 		  lptr->vFree.next = mem;
648 		}
649 	    }
650 	  return;
651 	}
652       lptr = nptr;
653     }
654 
655   /* If 'mem' goes directly after the last block, merge it in.
656      Otherwise, just add in onto the list at the end.  */
657   mem->vFree.next = NULL;
658   if (BLOCKEND (lptr) == mem)
659     lptr->size += mem->size;
660   else
661     lptr->vFree.next = mem;
662 }
663 
664 static void
heap_system_alloc(heap_data * h,size_t sz)665 heap_system_alloc (heap_data *h, size_t sz)
666 {
667   heap_block * mem;
668 #ifndef OPTIMIZE
669   if (sz & (pagesize - 1))
670     abort ();
671 #endif
672 
673   mem = (heap_block *) morecore (sz);
674   if (!mem)
675 	nomemory(1);
676   mem->mmap_block = 0;
677   mem->size = sz;
678 
679   /* Free block into the system */
680   heap_add_to_free_list (h, mem);
681 }
682 
683 PTR
morecore(size_t size)684 morecore (size_t size)
685 {
686   heap just_allocated_heap = NULL;
687 
688   /* _gst_heap_sbrk is actually the same as sbrk as long as
689      current_heap is NULL.  But we cannot do that unless we
690      can replace malloc (which we cannot do on MacOS X, see above).  */
691   static heap current_heap = NULL;
692 
693   if (current_heap == NULL)
694     {
695       just_allocated_heap = _gst_heap_create (NULL, MMAP_AREA_SIZE);
696       if (!just_allocated_heap)
697 	return (NULL);
698       current_heap = just_allocated_heap;
699     }
700 
701   for (;;)
702     {
703       char *ptr = _gst_heap_sbrk (current_heap, size);
704 
705       if (ptr != NULL)
706 	{
707           if (((intptr_t) ptr & (pagesize - 1)) > 0)
708             {
709 	      /* Oops, we have to align to a page.  */
710 	      int missed = pagesize - ((intptr_t) ptr & (pagesize - 1));
711 	      _gst_heap_sbrk (current_heap, -size + missed);
712 	      ptr = _gst_heap_sbrk (current_heap, size);
713             }
714 
715           if (ptr != NULL)
716 	    return (ptr);
717 	}
718 
719       /* The data segment we're using might bang against an mmap-ed
720 	 area (the sbrk segment for example cannot grow more than
721 	 960M on Linux).  We try using a new mmap-ed area, but be
722 	 careful not to loop!  */
723       if (just_allocated_heap)
724 	return (NULL);
725 
726       just_allocated_heap = _gst_heap_create (NULL, MMAP_AREA_SIZE);
727       if (!just_allocated_heap)
728 	return (NULL);
729 
730       current_heap = just_allocated_heap;
731     }
732 }
733 
734 
735 char *
xstrdup(const char * str)736 xstrdup (const char *str)
737 {
738   int  length = strlen (str) + 1;
739   char *newstr = (char *) xmalloc (length);
740   memcpy(newstr, str, length);
741   return (newstr);
742 }
743 
744 PTR
xmalloc(size_t n)745 xmalloc (size_t n)
746 {
747   PTR block;
748 
749   block = malloc(n);
750   if (!block && n)
751     nomemory(1);
752 
753   return (block);
754 }
755 
756 PTR
xcalloc(size_t n,size_t s)757 xcalloc (size_t n, size_t s)
758 {
759   PTR block;
760 
761   block = calloc(n, s);
762   if (!block && n && s)
763     nomemory(1);
764 
765   return (block);
766 }
767 
768 PTR
xrealloc(PTR p,size_t n)769 xrealloc (PTR p, size_t n)
770 {
771   PTR block;
772 
773   block = realloc(p, n);
774   if (!block && n)
775     nomemory(1);
776 
777   return (block);
778 }
779 
780 void
xfree(PTR p)781 xfree (PTR p)
782 {
783   if (p)
784     free(p);
785 }
786 
787 void
nomemory(int fatal)788 nomemory (int fatal)
789 {
790   fputs ("\n\n[Memory allocation failure]"
791 	 "\nCan't allocate enough memory to continue.\n",
792 	 stderr);
793 
794   if (fatal)
795     exit (1);
796 }
797 
798