xref: /netbsd/external/gpl2/libmalloc/dist/ralloc.c (revision 5734aa39)
1 /*	$NetBSD: ralloc.c,v 1.2 2016/01/13 21:56:38 christos Exp $	*/
2 
3 /* Block-relocating memory allocator.
4    Copyright (C) 1993, 1995 Free Software Foundation, Inc.
5 
6 
7 This file is part of the GNU C Library.  Its master source is NOT part of
8 the C library, however.  The master source lives in /gd/gnu/lib.
9 
10 The GNU C Library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Library General Public License as
12 published by the Free Software Foundation; either version 2 of the
13 License, or (at your option) any later version.
14 
15 The GNU C Library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 Library General Public License for more details.
19 
20 You should have received a copy of the GNU Library General Public
21 License along with the GNU C Library; see the file COPYING.LIB.  If
22 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
23 Cambridge, MA 02139, USA.  */
24 
25 /* NOTES:
26 
27    Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
28    rather than all of them.  This means allowing for a possible
29    hole between the first bloc and the end of malloc storage.  */
30 
31 #ifdef emacs
32 
33 #include <config.h>
34 #include "lisp.h"		/* Needed for VALBITS.  */
35 
36 #undef NULL
37 
38 /* The important properties of this type are that 1) it's a pointer, and
39    2) arithmetic on it should work as if the size of the object pointed
40    to has a size of 1.  */
41 #if 0 /* Arithmetic on void* is a GCC extension.  */
42 #ifdef __STDC__
43 typedef void *POINTER;
44 #else
45 
46 #ifdef	HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49 
50 typedef char *POINTER;
51 
52 #endif
53 #endif /* 0 */
54 
55 /* Unconditionally use char * for this.  */
56 typedef char *POINTER;
57 
58 typedef unsigned long SIZE;
59 
60 /* Declared in dispnew.c, this version doesn't screw up if regions
61    overlap.  */
62 extern void safe_bcopy ();
63 
64 #include "getpagesize.h"
65 
66 #else	/* Not emacs.  */
67 
68 #include <stddef.h>
69 
70 typedef size_t SIZE;
71 typedef void *POINTER;
72 
73 #include <unistd.h>
74 #include <stdlib.h>
75 #include <malloc.h>
76 #include <string.h>
77 
78 #define safe_bcopy(x, y, z) memmove (y, x, z)
79 
80 #endif	/* emacs.  */
81 
82 #define NIL ((POINTER) 0)
83 
84 /* A flag to indicate whether we have initialized ralloc yet.  For
85    Emacs's sake, please do not make this local to malloc_init; on some
86    machines, the dumping procedure makes all static variables
87    read-only.  On these machines, the word static is #defined to be
88    the empty string, meaning that r_alloc_initialized becomes an
89    automatic variable, and loses its value each time Emacs is started up.  */
90 static int r_alloc_initialized = 0;
91 
92 static void r_alloc_init ();
93 
94 /* Declarations for working with the malloc, ralloc, and system breaks.  */
95 
96 /* Function to set the real break value.  */
97 static POINTER (*real_morecore) ();
98 
99 /* The break value, as seen by malloc.  */
100 static POINTER virtual_break_value;
101 
102 /* The address of the end of the last data in use by ralloc,
103    including relocatable blocs as well as malloc data.  */
104 static POINTER break_value;
105 
106 /* This is the size of a page.  We round memory requests to this boundary.  */
107 static int page_size;
108 
109 /* Whenever we get memory from the system, get this many extra bytes.  This
110    must be a multiple of page_size.  */
111 static int extra_bytes;
112 
113 /* Macros for rounding.  Note that rounding to any value is possible
114    by changing the definition of PAGE.  */
115 #define PAGE (getpagesize ())
116 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
117 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
118 		       & ~(page_size - 1))
119 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
120 
121 #define MEM_ALIGN sizeof(double)
122 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
123 				   & ~(MEM_ALIGN - 1))
124 
125 /* Data structures of heaps and blocs.  */
126 
127 /* The relocatable objects, or blocs, and the malloc data
128    both reside within one or more heaps.
129    Each heap contains malloc data, running from `start' to `bloc_start',
130    and relocatable objects, running from `bloc_start' to `free'.
131 
132    Relocatable objects may relocate within the same heap
133    or may move into another heap; the heaps themselves may grow
134    but they never move.
135 
136    We try to make just one heap and make it larger as necessary.
137    But sometimes we can't do that, because we can't get continguous
138    space to add onto the heap.  When that happens, we start a new heap.  */
139 
140 typedef struct heap
141 {
142   struct heap *next;
143   struct heap *prev;
144   /* Start of memory range of this heap.  */
145   POINTER start;
146   /* End of memory range of this heap.  */
147   POINTER end;
148   /* Start of relocatable data in this heap.  */
149   POINTER bloc_start;
150   /* Start of unused space in this heap.  */
151   POINTER free;
152   /* First bloc in this heap.  */
153   struct bp *first_bloc;
154   /* Last bloc in this heap.  */
155   struct bp *last_bloc;
156 } *heap_ptr;
157 
158 #define NIL_HEAP ((heap_ptr) 0)
159 #define HEAP_PTR_SIZE (sizeof (struct heap))
160 
161 /* This is the first heap object.
162    If we need additional heap objects, each one resides at the beginning of
163    the space it covers.   */
164 static struct heap heap_base;
165 
166 /* Head and tail of the list of heaps.  */
167 static heap_ptr first_heap, last_heap;
168 
169 /* These structures are allocated in the malloc arena.
170    The linked list is kept in order of increasing '.data' members.
171    The data blocks abut each other; if b->next is non-nil, then
172    b->data + b->size == b->next->data.  */
173 typedef struct bp
174 {
175   struct bp *next;
176   struct bp *prev;
177   POINTER *variable;
178   POINTER data;
179   SIZE size;
180   POINTER new_data;		/* tmporarily used for relocation */
181   /* Heap this bloc is in.  */
182   struct heap *heap;
183 } *bloc_ptr;
184 
185 #define NIL_BLOC ((bloc_ptr) 0)
186 #define BLOC_PTR_SIZE (sizeof (struct bp))
187 
188 /* Head and tail of the list of relocatable blocs.  */
189 static bloc_ptr first_bloc, last_bloc;
190 
191 
192 /* Functions to get and return memory from the system.  */
193 
194 /* Find the heap that ADDRESS falls within.  */
195 
196 static heap_ptr
find_heap(address)197 find_heap (address)
198     POINTER address;
199 {
200   heap_ptr heap;
201 
202   for (heap = last_heap; heap; heap = heap->prev)
203     {
204       if (heap->start <= address && address <= heap->end)
205 	return heap;
206     }
207 
208   return NIL_HEAP;
209 }
210 
211 /* Find SIZE bytes of space in a heap.
212    Try to get them at ADDRESS (which must fall within some heap's range)
213    if we can get that many within one heap.
214 
215    If enough space is not presently available in our reserve, this means
216    getting more page-aligned space from the system. If the retuned space
217    is not contiguos to the last heap, allocate a new heap, and append it
218 
219    obtain does not try to keep track of whether space is in use
220    or not in use.  It just returns the address of SIZE bytes that
221    fall within a single heap.  If you call obtain twice in a row
222    with the same arguments, you typically get the same value.
223    to the heap list.  It's the caller's responsibility to keep
224    track of what space is in use.
225 
226    Return the address of the space if all went well, or zero if we couldn't
227    allocate the memory.  */
228 
229 static POINTER
obtain(address,size)230 obtain (address, size)
231     POINTER address;
232     SIZE size;
233 {
234   heap_ptr heap;
235   SIZE already_available;
236 
237   /* Find the heap that ADDRESS falls within.  */
238   for (heap = last_heap; heap; heap = heap->prev)
239     {
240       if (heap->start <= address && address <= heap->end)
241 	break;
242     }
243 
244   if (! heap)
245     abort ();
246 
247   /* If we can't fit SIZE bytes in that heap,
248      try successive later heaps.  */
249   while (heap && address + size > heap->end)
250     {
251       heap = heap->next;
252       if (heap == NIL_HEAP)
253 	break;
254       address = heap->bloc_start;
255     }
256 
257   /* If we can't fit them within any existing heap,
258      get more space.  */
259   if (heap == NIL_HEAP)
260     {
261       POINTER new = (*real_morecore)(0);
262       SIZE get;
263 
264       already_available = (char *)last_heap->end - (char *)address;
265 
266       if (new != last_heap->end)
267 	{
268 	  /* Someone else called sbrk.  Make a new heap.  */
269 
270 	  heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
271 	  POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
272 
273 	  if ((*real_morecore) (bloc_start - new) != new)
274 	    return 0;
275 
276 	  new_heap->start = new;
277 	  new_heap->end = bloc_start;
278 	  new_heap->bloc_start = bloc_start;
279 	  new_heap->free = bloc_start;
280 	  new_heap->next = NIL_HEAP;
281 	  new_heap->prev = last_heap;
282 	  new_heap->first_bloc = NIL_BLOC;
283 	  new_heap->last_bloc = NIL_BLOC;
284 	  last_heap->next = new_heap;
285 	  last_heap = new_heap;
286 
287 	  address = bloc_start;
288 	  already_available = 0;
289 	}
290 
291       /* Add space to the last heap (which we may have just created).
292 	 Get some extra, so we can come here less often.  */
293 
294       get = size + extra_bytes - already_available;
295       get = (char *) ROUNDUP ((char *)last_heap->end + get)
296 	- (char *) last_heap->end;
297 
298       if ((*real_morecore) (get) != last_heap->end)
299 	return 0;
300 
301       last_heap->end += get;
302     }
303 
304   return address;
305 }
306 
307 /* Return unused heap space to the system
308    if there is a lot of unused space now.
309    This can make the last heap smaller;
310    it can also eliminate the last heap entirely.  */
311 
312 static void
relinquish()313 relinquish ()
314 {
315   register heap_ptr h;
316   int excess = 0;
317 
318   /* Add the amount of space beyond break_value
319      in all heaps which have extend beyond break_value at all.  */
320 
321   for (h = last_heap; h && break_value < h->end; h = h->prev)
322     {
323       excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
324 					    ? h->bloc_start : break_value);
325     }
326 
327   if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
328     {
329       /* Keep extra_bytes worth of empty space.
330 	 And don't free anything unless we can free at least extra_bytes.  */
331       excess -= extra_bytes;
332 
333       if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
334 	{
335 	  /* This heap should have no blocs in it.  */
336 	  if (last_heap->first_bloc != NIL_BLOC
337 	      || last_heap->last_bloc != NIL_BLOC)
338 	    abort ();
339 
340 	  /* Return the last heap, with its header, to the system.  */
341 	  excess = (char *)last_heap->end - (char *)last_heap->start;
342 	  last_heap = last_heap->prev;
343 	  last_heap->next = NIL_HEAP;
344 	}
345       else
346 	{
347 	  excess = (char *) last_heap->end
348 			- (char *) ROUNDUP ((char *)last_heap->end - excess);
349 	  last_heap->end -= excess;
350 	}
351 
352       if ((*real_morecore) (- excess) == 0)
353 	abort ();
354     }
355 }
356 
357 /* The meat - allocating, freeing, and relocating blocs.  */
358 
359 /* Find the bloc referenced by the address in PTR.  Returns a pointer
360    to that block.  */
361 
362 static bloc_ptr
find_bloc(ptr)363 find_bloc (ptr)
364      POINTER *ptr;
365 {
366   register bloc_ptr p = first_bloc;
367 
368   while (p != NIL_BLOC)
369     {
370       if (p->variable == ptr && p->data == *ptr)
371 	return p;
372 
373       p = p->next;
374     }
375 
376   return p;
377 }
378 
379 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
380    Returns a pointer to the new bloc, or zero if we couldn't allocate
381    memory for the new block.  */
382 
383 static bloc_ptr
get_bloc(size)384 get_bloc (size)
385      SIZE size;
386 {
387   register bloc_ptr new_bloc;
388   register heap_ptr heap;
389 
390   if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
391       || ! (new_bloc->data = obtain (break_value, size)))
392     {
393       if (new_bloc)
394 	free (new_bloc);
395 
396       return 0;
397     }
398 
399   break_value = new_bloc->data + size;
400 
401   new_bloc->size = size;
402   new_bloc->next = NIL_BLOC;
403   new_bloc->variable = (POINTER *) NIL;
404   new_bloc->new_data = 0;
405 
406   /* Record in the heap that this space is in use.  */
407   heap = find_heap (new_bloc->data);
408   heap->free = break_value;
409 
410   /* Maintain the correspondence between heaps and blocs.  */
411   new_bloc->heap = heap;
412   heap->last_bloc = new_bloc;
413   if (heap->first_bloc == NIL_BLOC)
414     heap->first_bloc = new_bloc;
415 
416   /* Put this bloc on the doubly-linked list of blocs.  */
417   if (first_bloc)
418     {
419       new_bloc->prev = last_bloc;
420       last_bloc->next = new_bloc;
421       last_bloc = new_bloc;
422     }
423   else
424     {
425       first_bloc = last_bloc = new_bloc;
426       new_bloc->prev = NIL_BLOC;
427     }
428 
429   return new_bloc;
430 }
431 
432 /* Calculate new locations of blocs in the list beginning with BLOC,
433    relocating it to start at ADDRESS, in heap HEAP.  If enough space is
434    not presently available in our reserve, call obtain for
435    more space.
436 
437    Store the new location of each bloc in its new_data field.
438    Do not touch the contents of blocs or break_value.  */
439 
440 static int
relocate_blocs(bloc,heap,address)441 relocate_blocs (bloc, heap, address)
442     bloc_ptr bloc;
443     heap_ptr heap;
444     POINTER address;
445 {
446   register bloc_ptr b = bloc;
447 
448   while (b)
449     {
450       /* If bloc B won't fit within HEAP,
451 	 move to the next heap and try again.  */
452       while (heap && address + b->size > heap->end)
453 	{
454 	  heap = heap->next;
455 	  if (heap == NIL_HEAP)
456 	    break;
457 	  address = heap->bloc_start;
458 	}
459 
460       /* If BLOC won't fit in any heap,
461 	 get enough new space to hold BLOC and all following blocs.  */
462       if (heap == NIL_HEAP)
463 	{
464 	  register bloc_ptr tb = b;
465 	  register SIZE s = 0;
466 
467 	  /* Add up the size of all the following blocs.  */
468 	  while (tb != NIL_BLOC)
469 	    {
470 	      s += tb->size;
471 	      tb = tb->next;
472 	    }
473 
474 	  /* Get that space.  */
475 	  address = obtain (address, s);
476 	  if (address == 0)
477 	    return 0;
478 
479 	  heap = last_heap;
480 	}
481 
482       /* Record the new address of this bloc
483 	 and update where the next bloc can start.  */
484       b->new_data = address;
485       address += b->size;
486       b = b->next;
487     }
488 
489   return 1;
490 }
491 
492 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
493    This is necessary if we put the memory of space of BLOC
494    before that of BEFORE.  */
495 
496 static void
reorder_bloc(bloc,before)497 reorder_bloc (bloc, before)
498      bloc_ptr bloc, before;
499 {
500   bloc_ptr prev, next;
501 
502   /* Splice BLOC out from where it is.  */
503   prev = bloc->prev;
504   next = bloc->next;
505 
506   if (prev)
507     prev->next = next;
508   if (next)
509     next->prev = prev;
510 
511   /* Splice it in before BEFORE.  */
512   prev = before->prev;
513 
514   if (prev)
515     prev->next = bloc;
516   bloc->prev = prev;
517 
518   before->prev = bloc;
519   bloc->next = before;
520 }
521 
522 /* Update the records of which heaps contain which blocs, starting
523    with heap HEAP and bloc BLOC.  */
524 
525 static void
update_heap_bloc_correspondence(bloc,heap)526 update_heap_bloc_correspondence (bloc, heap)
527      bloc_ptr bloc;
528      heap_ptr heap;
529 {
530   register bloc_ptr b;
531 
532   /* Initialize HEAP's status to reflect blocs before BLOC.  */
533   if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
534     {
535       /* The previous bloc is in HEAP.  */
536       heap->last_bloc = bloc->prev;
537       heap->free = bloc->prev->data + bloc->prev->size;
538     }
539   else
540     {
541       /* HEAP contains no blocs before BLOC.  */
542       heap->first_bloc = NIL_BLOC;
543       heap->last_bloc = NIL_BLOC;
544       heap->free = heap->bloc_start;
545     }
546 
547   /* Advance through blocs one by one.  */
548   for (b = bloc; b != NIL_BLOC; b = b->next)
549     {
550       /* Advance through heaps, marking them empty,
551 	 till we get to the one that B is in.  */
552       while (heap)
553 	{
554 	  if (heap->bloc_start <= b->data && b->data <= heap->end)
555 	    break;
556 	  heap = heap->next;
557 	  /* We know HEAP is not null now,
558 	     because there has to be space for bloc B.  */
559 	  heap->first_bloc = NIL_BLOC;
560 	  heap->last_bloc = NIL_BLOC;
561 	  heap->free = heap->bloc_start;
562 	}
563 
564       /* Update HEAP's status for bloc B.  */
565       heap->free = b->data + b->size;
566       heap->last_bloc = b;
567       if (heap->first_bloc == NIL_BLOC)
568 	heap->first_bloc = b;
569 
570       /* Record that B is in HEAP.  */
571       b->heap = heap;
572     }
573 
574   /* If there are any remaining heaps and no blocs left,
575      mark those heaps as empty.  */
576   heap = heap->next;
577   while (heap)
578     {
579       heap->first_bloc = NIL_BLOC;
580       heap->last_bloc = NIL_BLOC;
581       heap->free = heap->bloc_start;
582       heap = heap->next;
583     }
584 }
585 
586 /* Resize BLOC to SIZE bytes.  This relocates the blocs
587    that come after BLOC in memory.  */
588 
589 static int
resize_bloc(bloc,size)590 resize_bloc (bloc, size)
591     bloc_ptr bloc;
592     SIZE size;
593 {
594   register bloc_ptr b;
595   heap_ptr heap;
596   POINTER address;
597   SIZE old_size;
598 
599   if (bloc == NIL_BLOC || size == bloc->size)
600     return 1;
601 
602   for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
603     {
604       if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
605 	break;
606     }
607 
608   if (heap == NIL_HEAP)
609     abort ();
610 
611   old_size = bloc->size;
612   bloc->size = size;
613 
614   /* Note that bloc could be moved into the previous heap.  */
615   address = (bloc->prev ? bloc->prev->data + bloc->prev->size
616 	     : first_heap->bloc_start);
617   while (heap)
618     {
619       if (heap->bloc_start <= address && address <= heap->end)
620 	break;
621       heap = heap->prev;
622     }
623 
624   if (! relocate_blocs (bloc, heap, address))
625     {
626       bloc->size = old_size;
627       return 0;
628     }
629 
630   if (size > old_size)
631     {
632       for (b = last_bloc; b != bloc; b = b->prev)
633 	{
634 	  safe_bcopy (b->data, b->new_data, b->size);
635 	  *b->variable = b->data = b->new_data;
636 	}
637       safe_bcopy (bloc->data, bloc->new_data, old_size);
638       bzero (bloc->new_data + old_size, size - old_size);
639       *bloc->variable = bloc->data = bloc->new_data;
640     }
641   else
642     {
643       for (b = bloc; b != NIL_BLOC; b = b->next)
644 	{
645 	  safe_bcopy (b->data, b->new_data, b->size);
646 	  *b->variable = b->data = b->new_data;
647 	}
648     }
649 
650   update_heap_bloc_correspondence (bloc, heap);
651 
652   break_value = (last_bloc ? last_bloc->data + last_bloc->size
653 		 : first_heap->bloc_start);
654   return 1;
655 }
656 
657 /* Free BLOC from the chain of blocs, relocating any blocs above it.
658    This may return space to the system.  */
659 
660 static void
free_bloc(bloc)661 free_bloc (bloc)
662      bloc_ptr bloc;
663 {
664   heap_ptr heap = bloc->heap;
665 
666   resize_bloc (bloc, 0);
667 
668   if (bloc == first_bloc && bloc == last_bloc)
669     {
670       first_bloc = last_bloc = NIL_BLOC;
671     }
672   else if (bloc == last_bloc)
673     {
674       last_bloc = bloc->prev;
675       last_bloc->next = NIL_BLOC;
676     }
677   else if (bloc == first_bloc)
678     {
679       first_bloc = bloc->next;
680       first_bloc->prev = NIL_BLOC;
681     }
682   else
683     {
684       bloc->next->prev = bloc->prev;
685       bloc->prev->next = bloc->next;
686     }
687 
688   /* Update the records of which blocs are in HEAP.  */
689   if (heap->first_bloc == bloc)
690     {
691       if (bloc->next->heap == heap)
692 	heap->first_bloc = bloc->next;
693       else
694 	heap->first_bloc = heap->last_bloc = NIL_BLOC;
695     }
696   if (heap->last_bloc == bloc)
697     {
698       if (bloc->prev->heap == heap)
699 	heap->last_bloc = bloc->prev;
700       else
701 	heap->first_bloc = heap->last_bloc = NIL_BLOC;
702     }
703 
704   relinquish ();
705   free (bloc);
706 }
707 
708 /* Interface routines.  */
709 
710 static int use_relocatable_buffers;
711 static int r_alloc_freeze_level;
712 
713 /* Obtain SIZE bytes of storage from the free pool, or the system, as
714    necessary.  If relocatable blocs are in use, this means relocating
715    them.  This function gets plugged into the GNU malloc's __morecore
716    hook.
717 
718    We provide hysteresis, never relocating by less than extra_bytes.
719 
720    If we're out of memory, we should return zero, to imitate the other
721    __morecore hook values - in particular, __default_morecore in the
722    GNU malloc package.  */
723 
724 POINTER
r_alloc_sbrk(size)725 r_alloc_sbrk (size)
726      long size;
727 {
728   register bloc_ptr b;
729   POINTER address;
730 
731   if (! use_relocatable_buffers)
732     return (*real_morecore) (size);
733 
734   if (size == 0)
735     return virtual_break_value;
736 
737   if (size > 0)
738     {
739       /* Allocate a page-aligned space.  GNU malloc would reclaim an
740 	 extra space if we passed an unaligned one.  But we could
741 	 not always find a space which is contiguos to the previous.  */
742       POINTER new_bloc_start;
743       heap_ptr h = first_heap;
744       SIZE get = ROUNDUP (size);
745 
746       address = (POINTER) ROUNDUP (virtual_break_value);
747 
748       /* Search the list upward for a heap which is large enough.  */
749       while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
750 	{
751 	  h = h->next;
752 	  if (h == NIL_HEAP)
753 	    break;
754 	  address = (POINTER) ROUNDUP (h->start);
755 	}
756 
757       /* If not found, obtain more space.  */
758       if (h == NIL_HEAP)
759 	{
760 	  get += extra_bytes + page_size;
761 
762 	  if (r_alloc_freeze_level > 0 || ! obtain (address, get))
763 	    return 0;
764 
765 	  if (first_heap == last_heap)
766 	    address = (POINTER) ROUNDUP (virtual_break_value);
767 	  else
768 	    address = (POINTER) ROUNDUP (last_heap->start);
769 	  h = last_heap;
770 	}
771 
772       new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
773 
774       if (first_heap->bloc_start < new_bloc_start)
775 	{
776 	  /* Move all blocs upward.  */
777 	  if (r_alloc_freeze_level > 0
778 	      || ! relocate_blocs (first_bloc, h, new_bloc_start))
779 	    return 0;
780 
781 	  /* Note that (POINTER)(h+1) <= new_bloc_start since
782 	     get >= page_size, so the following does not destroy the heap
783 	     header.  */
784 	  for (b = last_bloc; b != NIL_BLOC; b = b->prev)
785 	    {
786 	      safe_bcopy (b->data, b->new_data, b->size);
787 	      *b->variable = b->data = b->new_data;
788 	    }
789 
790 	  h->bloc_start = new_bloc_start;
791 
792 	  update_heap_bloc_correspondence (first_bloc, h);
793 	}
794 
795       if (h != first_heap)
796 	{
797 	  /* Give up managing heaps below the one the new
798 	     virtual_break_value points to.  */
799 	  first_heap->prev = NIL_HEAP;
800 	  first_heap->next = h->next;
801 	  first_heap->start = h->start;
802 	  first_heap->end = h->end;
803 	  first_heap->free = h->free;
804 	  first_heap->first_bloc = h->first_bloc;
805 	  first_heap->last_bloc = h->last_bloc;
806 	  first_heap->bloc_start = h->bloc_start;
807 
808 	  if (first_heap->next)
809 	    first_heap->next->prev = first_heap;
810 	  else
811 	    last_heap = first_heap;
812 	}
813 
814       bzero (address, size);
815     }
816   else /* size < 0 */
817     {
818       SIZE excess = (char *)first_heap->bloc_start
819 		      - ((char *)virtual_break_value + size);
820 
821       address = virtual_break_value;
822 
823       if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
824 	{
825 	  excess -= extra_bytes;
826 	  first_heap->bloc_start
827 	    = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
828 
829 	  relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
830 
831 	  for (b = first_bloc; b != NIL_BLOC; b = b->next)
832 	    {
833 	      safe_bcopy (b->data, b->new_data, b->size);
834 	      *b->variable = b->data = b->new_data;
835 	    }
836 	}
837 
838       if ((char *)virtual_break_value + size < (char *)first_heap->start)
839 	{
840 	  /* We found an additional space below the first heap */
841 	  first_heap->start = (POINTER) ((char *)virtual_break_value + size);
842 	}
843     }
844 
845   virtual_break_value = (POINTER) ((char *)address + size);
846   break_value = (last_bloc
847 		 ? last_bloc->data + last_bloc->size
848 		 : first_heap->bloc_start);
849   if (size < 0)
850     relinquish ();
851 
852   return address;
853 }
854 
855 /* Allocate a relocatable bloc of storage of size SIZE.  A pointer to
856    the data is returned in *PTR.  PTR is thus the address of some variable
857    which will use the data area.
858 
859    If we can't allocate the necessary memory, set *PTR to zero, and
860    return zero.  */
861 
862 POINTER
r_alloc(ptr,size)863 r_alloc (ptr, size)
864      POINTER *ptr;
865      SIZE size;
866 {
867   register bloc_ptr new_bloc;
868 
869   if (! r_alloc_initialized)
870     r_alloc_init ();
871 
872   new_bloc = get_bloc (MEM_ROUNDUP (size));
873   if (new_bloc)
874     {
875       new_bloc->variable = ptr;
876       *ptr = new_bloc->data;
877     }
878   else
879     *ptr = 0;
880 
881   return *ptr;
882 }
883 
884 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
885    Store 0 in *PTR to show there's no block allocated.  */
886 
887 void
r_alloc_free(ptr)888 r_alloc_free (ptr)
889      register POINTER *ptr;
890 {
891   register bloc_ptr dead_bloc;
892 
893   dead_bloc = find_bloc (ptr);
894   if (dead_bloc == NIL_BLOC)
895     abort ();
896 
897   free_bloc (dead_bloc);
898   *ptr = 0;
899 }
900 
901 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
902    Do this by shifting all blocks above this one up in memory, unless
903    SIZE is less than or equal to the current bloc size, in which case
904    do nothing.
905 
906    Change *PTR to reflect the new bloc, and return this value.
907 
908    If more memory cannot be allocated, then leave *PTR unchanged, and
909    return zero.  */
910 
911 POINTER
r_re_alloc(ptr,size)912 r_re_alloc (ptr, size)
913      POINTER *ptr;
914      SIZE size;
915 {
916   register bloc_ptr bloc;
917 
918   bloc = find_bloc (ptr);
919   if (bloc == NIL_BLOC)
920     abort ();
921 
922   if (size <= bloc->size)
923     /* Wouldn't it be useful to actually resize the bloc here?  */
924     return *ptr;
925 
926   if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
927     return 0;
928 
929   return *ptr;
930 }
931 
932 /* Disable relocations, after making room for at least SIZE bytes
933    of non-relocatable heap if possible.  The relocatable blocs are
934    guaranteed to hold still until thawed, even if this means that
935    malloc must return a null pointer.  */
936 
937 void
r_alloc_freeze(size)938 r_alloc_freeze (size)
939      long size;
940 {
941   /* If already frozen, we can't make any more room, so don't try.  */
942   if (r_alloc_freeze_level > 0)
943     size = 0;
944   /* If we can't get the amount requested, half is better than nothing.  */
945   while (size > 0 && r_alloc_sbrk (size) == 0)
946     size /= 2;
947   ++r_alloc_freeze_level;
948   if (size > 0)
949     r_alloc_sbrk (-size);
950 }
951 
952 void
r_alloc_thaw()953 r_alloc_thaw ()
954 {
955   if (--r_alloc_freeze_level < 0)
956     abort ();
957 }
958 
959 /* The hook `malloc' uses for the function which gets more space
960    from the system.  */
961 extern POINTER (*__morecore) ();
962 
963 /* Initialize various things for memory allocation.  */
964 
965 static void
r_alloc_init()966 r_alloc_init ()
967 {
968   if (r_alloc_initialized)
969     return;
970 
971   r_alloc_initialized = 1;
972   real_morecore = __morecore;
973   __morecore = r_alloc_sbrk;
974 
975   first_heap = last_heap = &heap_base;
976   first_heap->next = first_heap->prev = NIL_HEAP;
977   first_heap->start = first_heap->bloc_start
978     = virtual_break_value = break_value = (*real_morecore) (0);
979   if (break_value == NIL)
980     abort ();
981 
982   page_size = PAGE;
983   extra_bytes = ROUNDUP (50000);
984 
985   first_heap->end = (POINTER) ROUNDUP (first_heap->start);
986 
987   /* The extra call to real_morecore guarantees that the end of the
988      address space is a multiple of page_size, even if page_size is
989      not really the page size of the system running the binary in
990      which page_size is stored.  This allows a binary to be built on a
991      system with one page size and run on a system with a smaller page
992      size.  */
993   (*real_morecore) (first_heap->end - first_heap->start);
994 
995   /* Clear the rest of the last page; this memory is in our address space
996      even though it is after the sbrk value.  */
997   /* Doubly true, with the additional call that explicitly adds the
998      rest of that page to the address space.  */
999   bzero (first_heap->start, first_heap->end - first_heap->start);
1000   virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1001   use_relocatable_buffers = 1;
1002 }
1003 #ifdef DEBUG
1004 #include <assert.h>
1005 
1006 void
r_alloc_check()1007 r_alloc_check ()
1008 {
1009     int found = 0;
1010     heap_ptr h, ph = 0;
1011     bloc_ptr b, pb = 0;
1012 
1013     if (!r_alloc_initialized)
1014       return;
1015 
1016     assert (first_heap);
1017     assert (last_heap->end <= (POINTER) sbrk (0));
1018     assert ((POINTER) first_heap < first_heap->start);
1019     assert (first_heap->start <= virtual_break_value);
1020     assert (virtual_break_value <= first_heap->end);
1021 
1022     for (h = first_heap; h; h = h->next)
1023       {
1024 	assert (h->prev == ph);
1025 	assert ((POINTER) ROUNDUP (h->end) == h->end);
1026 	assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1027 	assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1028 	assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1029 
1030 	if (ph)
1031 	  {
1032 	    assert (ph->end < h->start);
1033 	    assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1034 	  }
1035 
1036 	if (h->bloc_start <= break_value && break_value <= h->end)
1037 	    found = 1;
1038 
1039 	ph = h;
1040       }
1041 
1042     assert (found);
1043     assert (last_heap == ph);
1044 
1045     for (b = first_bloc; b; b = b->next)
1046       {
1047 	assert (b->prev == pb);
1048 	assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1049 	assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1050 
1051 	ph = 0;
1052 	for (h = first_heap; h; h = h->next)
1053 	  {
1054 	    if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1055 		break;
1056 	    ph = h;
1057 	  }
1058 
1059 	assert (h);
1060 
1061 	if (pb && pb->data + pb->size != b->data)
1062 	  {
1063 	    assert (ph && b->data == h->bloc_start);
1064 	    while (ph)
1065 	      {
1066 		if (ph->bloc_start <= pb->data
1067 		    && pb->data + pb->size <= ph->end)
1068 		  {
1069 		    assert (pb->data + pb->size + b->size > ph->end);
1070 		    break;
1071 		  }
1072 		else
1073 		  {
1074 		    assert (ph->bloc_start + b->size > ph->end);
1075 		  }
1076 		ph = ph->prev;
1077 	      }
1078 	  }
1079 	pb = b;
1080       }
1081 
1082     assert (last_bloc == pb);
1083 
1084     if (last_bloc)
1085 	assert (last_bloc->data + last_bloc->size == break_value);
1086     else
1087 	assert (first_heap->bloc_start == break_value);
1088 }
1089 #endif /* DEBUG */
1090