1 /*
2  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4  * Copyright (c) 1998-1999 by Silicon Graphics.  All rights reserved.
5  * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6  *
7  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
9  *
10  * Permission is hereby granted to use or copy this program
11  * for any purpose,  provided the above notices are retained on all copies.
12  * Permission to modify the code and to distribute modified code is granted,
13  * provided the above notices are retained, and a notice that the code was
14  * modified is included with the above copyright notice.
15  */
16 
17 /* #define DEBUG */
18 #include <stdio.h>
19 #include "private/gc_priv.h"
20 
21 GC_bool GC_use_entire_heap = 0;
22 
23 /*
24  * Free heap blocks are kept on one of several free lists,
25  * depending on the size of the block.  Each free list is doubly linked.
26  * Adjacent free blocks are coalesced.
27  */
28 
29 
30 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
31 		/* largest block we will allocate starting on a black   */
32 		/* listed block.  Must be >= HBLKSIZE.			*/
33 
34 
35 # define UNIQUE_THRESHOLD 32
36 	/* Sizes up to this many HBLKs each have their own free list    */
37 # define HUGE_THRESHOLD 256
38 	/* Sizes of at least this many heap blocks are mapped to a	*/
39 	/* single free list.						*/
40 # define FL_COMPRESSION 8
41 	/* In between sizes map this many distinct sizes to a single	*/
42 	/* bin.								*/
43 
44 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
45 				 + UNIQUE_THRESHOLD
46 
47 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
48 
49 #ifndef USE_MUNMAP
50 
51   word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
52 	/* Number of free bytes on each list.	*/
53 
54   /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS 	*/
55   /* > GC_max_large_allocd_bytes?					*/
56 # ifdef __GNUC__
57   __inline__
58 # endif
GC_enough_large_bytes_left(word bytes,int n)59   static GC_bool GC_enough_large_bytes_left(word bytes, int n)
60   {
61     int i;
62     for (i = N_HBLK_FLS; i >= n; --i) {
63 	bytes += GC_free_bytes[i];
64 	if (bytes > GC_max_large_allocd_bytes) return TRUE;
65     }
66     return FALSE;
67   }
68 
69 # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
70 
71 # define FREE_ASSERT(e) GC_ASSERT(e)
72 
73 #else /* USE_MUNMAP */
74 
75 # define INCR_FREE_BYTES(n, b)
76 # define FREE_ASSERT(e)
77 
78 #endif /* USE_MUNMAP */
79 
80 /* Map a number of blocks to the appropriate large block free list index. */
GC_hblk_fl_from_blocks(word blocks_needed)81 int GC_hblk_fl_from_blocks(word blocks_needed)
82 {
83     if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
84     if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
85     return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
86 					+ UNIQUE_THRESHOLD;
87 
88 }
89 
90 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
91 # define NHDR(hhdr) HDR(hhdr -> hb_next)
92 
93 # ifdef USE_MUNMAP
94 #   define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
95 # else  /* !USE_MMAP */
96 #   define IS_MAPPED(hhdr) 1
97 # endif /* USE_MUNMAP */
98 
99 # if !defined(NO_DEBUGGING)
GC_print_hblkfreelist()100 void GC_print_hblkfreelist()
101 {
102     struct hblk * h;
103     word total_free = 0;
104     hdr * hhdr;
105     word sz;
106     unsigned i;
107 
108     for (i = 0; i <= N_HBLK_FLS; ++i) {
109       h = GC_hblkfreelist[i];
110 #     ifdef USE_MUNMAP
111         if (0 != h) GC_printf("Free list %ld:\n",
112 		              (unsigned long)i);
113 #     else
114         if (0 != h) GC_printf("Free list %lu (Total size %lu):\n",
115 		              i, (unsigned long)GC_free_bytes[i]);
116 #     endif
117       while (h != 0) {
118         hhdr = HDR(h);
119         sz = hhdr -> hb_sz;
120     	GC_printf("\t%p size %lu ", h, (unsigned long)sz);
121     	total_free += sz;
122         if (GC_is_black_listed(h, HBLKSIZE) != 0) {
123              GC_printf("start black listed\n");
124         } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
125              GC_printf("partially black listed\n");
126         } else {
127              GC_printf("not black listed\n");
128         }
129         h = hhdr -> hb_next;
130       }
131     }
132 #   ifndef USE_MUNMAP
133       if (total_free != GC_large_free_bytes) {
134 	GC_printf("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
135 		  (unsigned long) GC_large_free_bytes);
136       }
137 #   endif
138     GC_printf("Total of %lu bytes on free list\n", (unsigned long)total_free);
139 }
140 
141 /* Return the free list index on which the block described by the header */
142 /* appears, or -1 if it appears nowhere.				 */
free_list_index_of(hdr * wanted)143 int free_list_index_of(hdr *wanted)
144 {
145     struct hblk * h;
146     hdr * hhdr;
147     int i;
148 
149     for (i = 0; i <= N_HBLK_FLS; ++i) {
150       h = GC_hblkfreelist[i];
151       while (h != 0) {
152         hhdr = HDR(h);
153 	if (hhdr == wanted) return i;
154         h = hhdr -> hb_next;
155       }
156     }
157     return -1;
158 }
159 
GC_dump_regions()160 void GC_dump_regions()
161 {
162     unsigned i;
163     ptr_t start, end;
164     ptr_t p;
165     size_t bytes;
166     hdr *hhdr;
167     for (i = 0; i < GC_n_heap_sects; ++i) {
168 	start = GC_heap_sects[i].hs_start;
169 	bytes = GC_heap_sects[i].hs_bytes;
170 	end = start + bytes;
171 	/* Merge in contiguous sections.	*/
172 	  while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
173 	    ++i;
174 	    end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
175 	  }
176 	GC_printf("***Section from %p to %p\n", start, end);
177 	for (p = start; p < end;) {
178 	    hhdr = HDR(p);
179 	    GC_printf("\t%p ", p);
180 	    if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
181 		GC_printf("Missing header!!(%d)\n", hhdr);
182 		p += HBLKSIZE;
183 		continue;
184 	    }
185 	    if (HBLK_IS_FREE(hhdr)) {
186                 int correct_index = GC_hblk_fl_from_blocks(
187 					divHBLKSZ(hhdr -> hb_sz));
188 	        int actual_index;
189 
190 		GC_printf("\tfree block of size 0x%lx bytes",
191 			  (unsigned long)(hhdr -> hb_sz));
192 	 	if (IS_MAPPED(hhdr)) {
193 		    GC_printf("\n");
194 		} else {
195 		    GC_printf("(unmapped)\n");
196 		}
197 		actual_index = free_list_index_of(hhdr);
198 		if (-1 == actual_index) {
199 		    GC_printf("\t\tBlock not on free list %d!!\n",
200 			      correct_index);
201 		} else if (correct_index != actual_index) {
202 		    GC_printf("\t\tBlock on list %d, should be on %d!!\n",
203 			      actual_index, correct_index);
204 		}
205 		p += hhdr -> hb_sz;
206 	    } else {
207 		GC_printf("\tused for blocks of size 0x%lx bytes\n",
208 			  (unsigned long)(hhdr -> hb_sz));
209 		p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
210 	    }
211 	}
212     }
213 }
214 
215 # endif /* NO_DEBUGGING */
216 
217 /* Initialize hdr for a block containing the indicated size and 	*/
218 /* kind of objects.							*/
219 /* Return FALSE on failure.						*/
setup_header(hdr * hhdr,struct hblk * block,size_t byte_sz,int kind,unsigned flags)220 static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
221 			    int kind, unsigned flags)
222 {
223     word descr;
224     size_t granules;
225 
226     /* Set size, kind and mark proc fields */
227       hhdr -> hb_sz = byte_sz;
228       hhdr -> hb_obj_kind = (unsigned char)kind;
229       hhdr -> hb_flags = (unsigned char)flags;
230       hhdr -> hb_block = block;
231       descr = GC_obj_kinds[kind].ok_descriptor;
232       if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
233       hhdr -> hb_descr = descr;
234 
235 #   ifdef MARK_BIT_PER_OBJ
236      /* Set hb_inv_sz as portably as possible. 				*/
237      /* We set it to the smallest value such that sz * inv_sz > 2**32    */
238      /* This may be more precision than necessary.			*/
239       if (byte_sz > MAXOBJBYTES) {
240 	 hhdr -> hb_inv_sz = LARGE_INV_SZ;
241       } else {
242 	word inv_sz;
243 
244 #       if CPP_WORDSZ == 64
245           inv_sz = ((word)1 << 32)/byte_sz;
246 	  if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
247 #	else  /* 32 bit words */
248 	  GC_ASSERT(byte_sz >= 4);
249 	  inv_sz = ((unsigned)1 << 31)/byte_sz;
250 	  inv_sz *= 2;
251 	  while (inv_sz*byte_sz > byte_sz) ++inv_sz;
252 #	endif
253 	hhdr -> hb_inv_sz = inv_sz;
254       }
255 #   else /* MARK_BIT_PER_GRANULE */
256       hhdr -> hb_large_block = (unsigned char)(byte_sz > MAXOBJBYTES);
257       granules = BYTES_TO_GRANULES(byte_sz);
258       if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
259         /* Make it look like a valid block. */
260         hhdr -> hb_sz = HBLKSIZE;
261         hhdr -> hb_descr = 0;
262         hhdr -> hb_large_block = TRUE;
263         hhdr -> hb_map = 0;
264         return FALSE;
265       } else {
266         size_t index = (hhdr -> hb_large_block? 0 : granules);
267         hhdr -> hb_map = GC_obj_map[index];
268       }
269 #   endif /* MARK_BIT_PER_GRANULE */
270 
271     /* Clear mark bits */
272       GC_clear_hdr_marks(hhdr);
273 
274     hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
275     return(TRUE);
276 }
277 
278 #define FL_UNKNOWN -1
279 /*
280  * Remove hhdr from the appropriate free list.
281  * We assume it is on the nth free list, or on the size
282  * appropriate free list if n is FL_UNKNOWN.
283  */
GC_remove_from_fl(hdr * hhdr,int n)284 void GC_remove_from_fl(hdr *hhdr, int n)
285 {
286     int index;
287 
288     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
289 #   ifndef USE_MUNMAP
290       /* We always need index to mainatin free counts.	*/
291       if (FL_UNKNOWN == n) {
292           index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
293       } else {
294 	  index = n;
295       }
296 #   endif
297     if (hhdr -> hb_prev == 0) {
298 #	ifdef USE_MUNMAP
299 	  if (FL_UNKNOWN == n) {
300             index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
301 	  } else {
302 	    index = n;
303 	  }
304 #	endif
305 	GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
306 	GC_hblkfreelist[index] = hhdr -> hb_next;
307     } else {
308 	hdr *phdr;
309 	GET_HDR(hhdr -> hb_prev, phdr);
310 	phdr -> hb_next = hhdr -> hb_next;
311     }
312     FREE_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
313     INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
314     if (0 != hhdr -> hb_next) {
315 	hdr * nhdr;
316 	GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
317 	GET_HDR(hhdr -> hb_next, nhdr);
318 	nhdr -> hb_prev = hhdr -> hb_prev;
319     }
320 }
321 
322 /*
323  * Return a pointer to the free block ending just before h, if any.
324  */
GC_free_block_ending_at(struct hblk * h)325 struct hblk * GC_free_block_ending_at(struct hblk *h)
326 {
327     struct hblk * p = h - 1;
328     hdr * phdr;
329 
330     GET_HDR(p, phdr);
331     while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
332 	p = FORWARDED_ADDR(p,phdr);
333 	phdr = HDR(p);
334     }
335     if (0 != phdr) {
336         if(HBLK_IS_FREE(phdr)) {
337 	    return p;
338 	} else {
339 	    return 0;
340 	}
341     }
342     p = GC_prev_block(h - 1);
343     if (0 != p) {
344       phdr = HDR(p);
345       if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
346 	return p;
347       }
348     }
349     return 0;
350 }
351 
352 /*
353  * Add hhdr to the appropriate free list.
354  * We maintain individual free lists sorted by address.
355  */
GC_add_to_fl(struct hblk * h,hdr * hhdr)356 void GC_add_to_fl(struct hblk *h, hdr *hhdr)
357 {
358     int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
359     struct hblk *second = GC_hblkfreelist[index];
360     hdr * second_hdr;
361 #   ifdef GC_ASSERTIONS
362       struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
363       hdr * nexthdr = HDR(next);
364       struct hblk *prev = GC_free_block_ending_at(h);
365       hdr * prevhdr = HDR(prev);
366       GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
367       GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
368 #   endif
369     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
370     GC_hblkfreelist[index] = h;
371     INCR_FREE_BYTES(index, hhdr -> hb_sz);
372     FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
373     hhdr -> hb_next = second;
374     hhdr -> hb_prev = 0;
375     if (0 != second) {
376       GET_HDR(second, second_hdr);
377       second_hdr -> hb_prev = h;
378     }
379     hhdr -> hb_flags |= FREE_BLK;
380 }
381 
382 #ifdef USE_MUNMAP
383 
384 /* Unmap blocks that haven't been recently touched.  This is the only way */
385 /* way blocks are ever unmapped.					  */
GC_unmap_old(void)386 void GC_unmap_old(void)
387 {
388     struct hblk * h;
389     hdr * hhdr;
390     word sz;
391     unsigned short last_rec, threshold;
392     int i;
393 #   define UNMAP_THRESHOLD 6
394 
395     for (i = 0; i <= N_HBLK_FLS; ++i) {
396       for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
397         hhdr = HDR(h);
398 	if (!IS_MAPPED(hhdr)) continue;
399 	threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
400 	last_rec = hhdr -> hb_last_reclaimed;
401 	if ((last_rec > GC_gc_no || last_rec < threshold)
402 	    && threshold < GC_gc_no /* not recently wrapped */) {
403           sz = hhdr -> hb_sz;
404 	  GC_unmap((ptr_t)h, sz);
405 	  hhdr -> hb_flags |= WAS_UNMAPPED;
406     	}
407       }
408     }
409 }
410 
411 /* Merge all unmapped blocks that are adjacent to other free		*/
412 /* blocks.  This may involve remapping, since all blocks are either	*/
413 /* fully mapped or fully unmapped.					*/
GC_merge_unmapped(void)414 void GC_merge_unmapped(void)
415 {
416     struct hblk * h, *next;
417     hdr * hhdr, *nexthdr;
418     word size, nextsize;
419     int i;
420 
421     for (i = 0; i <= N_HBLK_FLS; ++i) {
422       h = GC_hblkfreelist[i];
423       while (h != 0) {
424 	GET_HDR(h, hhdr);
425 	size = hhdr->hb_sz;
426 	next = (struct hblk *)((word)h + size);
427 	GET_HDR(next, nexthdr);
428 	/* Coalesce with successor, if possible */
429 	  if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
430 	    nextsize = nexthdr -> hb_sz;
431 	    if (IS_MAPPED(hhdr)) {
432 	      GC_ASSERT(!IS_MAPPED(nexthdr));
433 	      /* make both consistent, so that we can merge */
434 	        if (size > nextsize) {
435 		  GC_remap((ptr_t)next, nextsize);
436 		} else {
437 		  GC_unmap((ptr_t)h, size);
438 		  hhdr -> hb_flags |= WAS_UNMAPPED;
439 		}
440 	    } else if (IS_MAPPED(nexthdr)) {
441 	      GC_ASSERT(!IS_MAPPED(hhdr));
442 	      if (size > nextsize) {
443 		GC_unmap((ptr_t)next, nextsize);
444 	      } else {
445 		GC_remap((ptr_t)h, size);
446 		hhdr -> hb_flags &= ~WAS_UNMAPPED;
447 		hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
448 	      }
449 	    } else {
450 	      /* Unmap any gap in the middle */
451 		GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
452 	    }
453 	    /* If they are both unmapped, we merge, but leave unmapped. */
454 	    GC_remove_from_fl(hhdr, i);
455 	    GC_remove_from_fl(nexthdr, FL_UNKNOWN);
456 	    hhdr -> hb_sz += nexthdr -> hb_sz;
457 	    GC_remove_header(next);
458 	    GC_add_to_fl(h, hhdr);
459 	    /* Start over at beginning of list */
460 	    h = GC_hblkfreelist[i];
461 	  } else /* not mergable with successor */ {
462 	    h = hhdr -> hb_next;
463 	  }
464       } /* while (h != 0) ... */
465     } /* for ... */
466 }
467 
468 #endif /* USE_MUNMAP */
469 
470 /*
471  * Return a pointer to a block starting at h of length bytes.
472  * Memory for the block is mapped.
473  * Remove the block from its free list, and return the remainder (if any)
474  * to its appropriate free list.
475  * May fail by returning 0.
476  * The header for the returned block must be set up by the caller.
477  * If the return value is not 0, then hhdr is the header for it.
478  */
GC_get_first_part(struct hblk * h,hdr * hhdr,size_t bytes,int index)479 struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
480 			        size_t bytes, int index)
481 {
482     word total_size = hhdr -> hb_sz;
483     struct hblk * rest;
484     hdr * rest_hdr;
485 
486     GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
487     GC_remove_from_fl(hhdr, index);
488     if (total_size == bytes) return h;
489     rest = (struct hblk *)((word)h + bytes);
490     rest_hdr = GC_install_header(rest);
491     if (0 == rest_hdr) {
492 	/* FIXME: This is likely to be very bad news ... */
493 	WARN("Header allocation failed: Dropping block.\n", 0);
494 	return(0);
495     }
496     rest_hdr -> hb_sz = total_size - bytes;
497     rest_hdr -> hb_flags = 0;
498 #   ifdef GC_ASSERTIONS
499       /* Mark h not free, to avoid assertion about adjacent free blocks. */
500         hhdr -> hb_flags &= ~FREE_BLK;
501 #   endif
502     GC_add_to_fl(rest, rest_hdr);
503     return h;
504 }
505 
506 /*
507  * H is a free block.  N points at an address inside it.
508  * A new header for n has already been set up.  Fix up h's header
509  * to reflect the fact that it is being split, move it to the
510  * appropriate free list.
511  * N replaces h in the original free list.
512  *
513  * Nhdr is not completely filled in, since it is about to allocated.
514  * It may in fact end up on the wrong free list for its size.
515  * (Hence adding it to a free list is silly.  But this path is hopefully
516  * rare enough that it doesn't matter.  The code is cleaner this way.)
517  */
GC_split_block(struct hblk * h,hdr * hhdr,struct hblk * n,hdr * nhdr,int index)518 void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
519 		    hdr *nhdr, int index /* Index of free list */)
520 {
521     word total_size = hhdr -> hb_sz;
522     word h_size = (word)n - (word)h;
523     struct hblk *prev = hhdr -> hb_prev;
524     struct hblk *next = hhdr -> hb_next;
525 
526     /* Replace h with n on its freelist */
527       nhdr -> hb_prev = prev;
528       nhdr -> hb_next = next;
529       nhdr -> hb_sz = total_size - h_size;
530       nhdr -> hb_flags = 0;
531       if (0 != prev) {
532 	HDR(prev) -> hb_next = n;
533       } else {
534         GC_hblkfreelist[index] = n;
535       }
536       if (0 != next) {
537 	HDR(next) -> hb_prev = n;
538       }
539       INCR_FREE_BYTES(index, -(signed_word)h_size);
540       FREE_ASSERT(GC_free_bytes[index] > 0);
541 #     ifdef GC_ASSERTIONS
542 	nhdr -> hb_flags &= ~FREE_BLK;
543 				/* Don't fail test for consecutive	*/
544 				/* free blocks in GC_add_to_fl.		*/
545 #     endif
546 #   ifdef USE_MUNMAP
547       hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
548 #   endif
549     hhdr -> hb_sz = h_size;
550     GC_add_to_fl(h, hhdr);
551     nhdr -> hb_flags |= FREE_BLK;
552 }
553 
554 struct hblk *
555 GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n);
556 
557 /*
558  * Allocate (and return pointer to) a heap block
559  *   for objects of size sz bytes, searching the nth free list.
560  *
561  * NOTE: We set obj_map field in header correctly.
562  *       Caller is responsible for building an object freelist in block.
563  *
564  * The client is responsible for clearing the block, if necessary.
565  */
566 struct hblk *
GC_allochblk(size_t sz,int kind,unsigned flags)567 GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
568 {
569     word blocks;
570     int start_list;
571     int i;
572 
573     GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
574     blocks = OBJ_SZ_TO_BLOCKS(sz);
575     start_list = GC_hblk_fl_from_blocks(blocks);
576     for (i = start_list; i <= N_HBLK_FLS; ++i) {
577 	struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
578 	if (0 != result) {
579 	    return result;
580 	}
581     }
582     return 0;
583 }
584 /*
585  * The same, but with search restricted to nth free list.
586  * Flags is IGNORE_OFF_PAGE or zero.
587  * Unlike the above, sz is in bytes.
588  */
589 struct hblk *
GC_allochblk_nth(size_t sz,int kind,unsigned flags,int n)590 GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n)
591 {
592     struct hblk *hbp;
593     hdr * hhdr;		/* Header corr. to hbp */
594     			/* Initialized after loop if hbp !=0 	*/
595     			/* Gcc uninitialized use warning is bogus.	*/
596     struct hblk *thishbp;
597     hdr * thishdr;		/* Header corr. to hbp */
598     signed_word size_needed;    /* number of bytes in requested objects */
599     signed_word size_avail;	/* bytes available in this block	*/
600 
601     size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
602 
603     /* search for a big enough block in free list */
604 	hbp = GC_hblkfreelist[n];
605 	for(; 0 != hbp; hbp = hhdr -> hb_next) {
606 	    GET_HDR(hbp, hhdr);
607 	    size_avail = hhdr->hb_sz;
608 	    if (size_avail < size_needed) continue;
609 	    if (size_avail != size_needed
610 		&& !GC_use_entire_heap
611 		&& !GC_dont_gc
612 		&& USED_HEAP_SIZE >= GC_requested_heapsize
613 		&& !TRUE_INCREMENTAL && GC_should_collect()) {
614 #		ifdef USE_MUNMAP
615 		    continue;
616 #		else
617 		    /* If we have enough large blocks left to cover any	*/
618 		    /* previous request for large blocks, we go ahead	*/
619 		    /* and split.  Assuming a steady state, that should	*/
620 		    /* be safe.  It means that we can use the full 	*/
621 		    /* heap if we allocate only small objects.		*/
622 		    if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
623 		      continue;
624 		    }
625 		    /* If we are deallocating lots of memory from	*/
626 		    /* finalizers, fail and collect sooner rather	*/
627 		    /* than later.					*/
628 		    if (GC_finalizer_bytes_freed > (GC_heapsize >> 4))  {
629 		      continue;
630 		    }
631 #		endif /* !USE_MUNMAP */
632 	    }
633 	    /* If the next heap block is obviously better, go on.	*/
634 	    /* This prevents us from disassembling a single large block */
635 	    /* to get tiny blocks.					*/
636 	    {
637 	      signed_word next_size;
638 
639 	      thishbp = hhdr -> hb_next;
640 	      if (thishbp != 0) {
641 		GET_HDR(thishbp, thishdr);
642 	        next_size = (signed_word)(thishdr -> hb_sz);
643 	        if (next_size < size_avail
644 	          && next_size >= size_needed
645 	          && !GC_is_black_listed(thishbp, (word)size_needed)) {
646 	          continue;
647 	        }
648 	      }
649 	    }
650 	    if ( !IS_UNCOLLECTABLE(kind) &&
651 	         (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
652 	      struct hblk * lasthbp = hbp;
653 	      ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
654 	      signed_word orig_avail = size_avail;
655 	      signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
656 	      					HBLKSIZE
657 	      					: size_needed);
658 
659 
660 	      while ((ptr_t)lasthbp <= search_end
661 	             && (thishbp = GC_is_black_listed(lasthbp,
662 	             				      (word)eff_size_needed))
663 		        != 0) {
664 	        lasthbp = thishbp;
665 	      }
666 	      size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
667 	      thishbp = lasthbp;
668 	      if (size_avail >= size_needed) {
669 	        if (thishbp != hbp &&
670 		    0 != (thishdr = GC_install_header(thishbp))) {
671 		  /* Make sure it's mapped before we mangle it. */
672 #		    ifdef USE_MUNMAP
673 		      if (!IS_MAPPED(hhdr)) {
674 		        GC_remap((ptr_t)hbp, hhdr -> hb_sz);
675 		        hhdr -> hb_flags &= ~WAS_UNMAPPED;
676 		      }
677 #		    endif
678 	          /* Split the block at thishbp */
679 		      GC_split_block(hbp, hhdr, thishbp, thishdr, n);
680 		  /* Advance to thishbp */
681 		      hbp = thishbp;
682 		      hhdr = thishdr;
683 		      /* We must now allocate thishbp, since it may	*/
684 		      /* be on the wrong free list.			*/
685 		}
686 	      } else if (size_needed > (signed_word)BL_LIMIT
687 	                 && orig_avail - size_needed
688 			    > (signed_word)BL_LIMIT) {
689 	        /* Punt, since anything else risks unreasonable heap growth. */
690 		if (++GC_large_alloc_warn_suppressed
691 		    >= GC_large_alloc_warn_interval) {
692 	          WARN("Repeated allocation of very large block "
693 		       "(appr. size %ld):\n"
694 		       "\tMay lead to memory leak and poor performance.\n",
695 		       size_needed);
696 		  GC_large_alloc_warn_suppressed = 0;
697 		}
698 	        size_avail = orig_avail;
699 	      } else if (size_avail == 0 && size_needed == HBLKSIZE
700 			 && IS_MAPPED(hhdr)) {
701 		if (!GC_find_leak) {
702 	      	  static unsigned count = 0;
703 
704 	      	  /* The block is completely blacklisted.  We need 	*/
705 	      	  /* to drop some such blocks, since otherwise we spend */
706 	      	  /* all our time traversing them if pointerfree	*/
707 	      	  /* blocks are unpopular.				*/
708 	          /* A dropped block will be reconsidered at next GC.	*/
709 	          if ((++count & 3) == 0) {
710 	            /* Allocate and drop the block in small chunks, to	*/
711 	            /* maximize the chance that we will recover some	*/
712 	            /* later.						*/
713 		      word total_size = hhdr -> hb_sz;
714 	              struct hblk * limit = hbp + divHBLKSZ(total_size);
715 	              struct hblk * h;
716 		      struct hblk * prev = hhdr -> hb_prev;
717 
718 		      GC_large_free_bytes -= total_size;
719 		      GC_remove_from_fl(hhdr, n);
720 	              for (h = hbp; h < limit; h++) {
721 	                if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
722 	                  (void) setup_header(
723 	                	  hhdr, h,
724 	              		  HBLKSIZE,
725 	              		  PTRFREE, 0); /* Cant fail */
726 	              	  if (GC_debugging_started) {
727 	              	    BZERO(h, HBLKSIZE);
728 	              	  }
729 	                }
730 	              }
731 	            /* Restore hbp to point at free block */
732 		      hbp = prev;
733 		      if (0 == hbp) {
734 			return GC_allochblk_nth(sz, kind, flags, n);
735 		      }
736 	   	      hhdr = HDR(hbp);
737 	          }
738 		}
739 	      }
740 	    }
741 	    if( size_avail >= size_needed ) {
742 #		ifdef USE_MUNMAP
743 		  if (!IS_MAPPED(hhdr)) {
744 		    GC_remap((ptr_t)hbp, hhdr -> hb_sz);
745 		    hhdr -> hb_flags &= ~WAS_UNMAPPED;
746 		  }
747 #	        endif
748 		/* hbp may be on the wrong freelist; the parameter n	*/
749 		/* is important.					*/
750 		hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
751 		break;
752 	    }
753 	}
754 
755     if (0 == hbp) return 0;
756 
757     /* Add it to map of valid blocks */
758     	if (!GC_install_counts(hbp, (word)size_needed)) return(0);
759     	/* This leaks memory under very rare conditions. */
760 
761     /* Set up header */
762         if (!setup_header(hhdr, hbp, sz, kind, flags)) {
763             GC_remove_counts(hbp, (word)size_needed);
764             return(0); /* ditto */
765         }
766 
767     /* Notify virtual dirty bit implementation that we are about to write.  */
768     /* Ensure that pointerfree objects are not protected if it's avoidable. */
769     	GC_remove_protection(hbp, divHBLKSZ(size_needed),
770 			     (hhdr -> hb_descr == 0) /* pointer-free */);
771 
772     /* We just successfully allocated a block.  Restart count of	*/
773     /* consecutive failures.						*/
774     {
775 	extern unsigned GC_fail_count;
776 
777 	GC_fail_count = 0;
778     }
779 
780     GC_large_free_bytes -= size_needed;
781 
782     GC_ASSERT(IS_MAPPED(hhdr));
783     return( hbp );
784 }
785 
786 struct hblk * GC_freehblk_ptr = 0;  /* Search position hint for GC_freehblk */
787 
788 /*
789  * Free a heap block.
790  *
791  * Coalesce the block with its neighbors if possible.
792  *
793  * All mark words are assumed to be cleared.
794  */
795 void
GC_freehblk(struct hblk * hbp)796 GC_freehblk(struct hblk *hbp)
797 {
798 struct hblk *next, *prev;
799 hdr *hhdr, *prevhdr, *nexthdr;
800 signed_word size;
801 
802 
803     GET_HDR(hbp, hhdr);
804     size = hhdr->hb_sz;
805     size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
806     GC_remove_counts(hbp, (word)size);
807     hhdr->hb_sz = size;
808 #   ifdef USE_MUNMAP
809       hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
810 #   endif
811 
812     /* Check for duplicate deallocation in the easy case */
813       if (HBLK_IS_FREE(hhdr)) {
814         GC_printf("Duplicate large block deallocation of %p\n", hbp);
815 	ABORT("Duplicate large block deallocation");
816       }
817 
818     GC_ASSERT(IS_MAPPED(hhdr));
819     hhdr -> hb_flags |= FREE_BLK;
820     next = (struct hblk *)((word)hbp + size);
821     GET_HDR(next, nexthdr);
822     prev = GC_free_block_ending_at(hbp);
823     /* Coalesce with successor, if possible */
824       if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
825 	GC_remove_from_fl(nexthdr, FL_UNKNOWN);
826 	hhdr -> hb_sz += nexthdr -> hb_sz;
827 	GC_remove_header(next);
828       }
829     /* Coalesce with predecessor, if possible. */
830       if (0 != prev) {
831 	prevhdr = HDR(prev);
832 	if (IS_MAPPED(prevhdr)) {
833 	  GC_remove_from_fl(prevhdr, FL_UNKNOWN);
834 	  prevhdr -> hb_sz += hhdr -> hb_sz;
835 #	  ifdef USE_MUNMAP
836 	    prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
837 #	  endif
838 	  GC_remove_header(hbp);
839 	  hbp = prev;
840 	  hhdr = prevhdr;
841 	}
842       }
843     /* FIXME: It is not clear we really always want to do these merges	*/
844     /* with -DUSE_MUNMAP, since it updates ages and hence prevents	*/
845     /* unmapping. 							*/
846 
847     GC_large_free_bytes += size;
848     GC_add_to_fl(hbp, hhdr);
849 }
850 
851