1 /*
2  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
5  * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
6  *
7  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
9  *
10  * Permission is hereby granted to use or copy this program
11  * for any purpose,  provided the above notices are retained on all copies.
12  * Permission to modify the code and to distribute modified code is granted,
13  * provided the above notices are retained, and a notice that the code was
14  * modified is included with the above copyright notice.
15  */
16 
17 /*
18  * These are extra allocation routines which are likely to be less
19  * frequently used than those in malloc.c.  They are separate in the
20  * hope that the .o file will be excluded from statically linked
21  * executables.  We should probably break this up further.
22  */
23 
24 #include <stdio.h>
25 #include "private/gc_priv.h"
26 
27 /* extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */
28 void GC_extend_size_map();      /* in misc.c. */
29 GC_bool GC_alloc_reclaim_list();	/* in malloc.c */
30 
31 /* Some externally visible but unadvertised variables to allow access to */
32 /* free lists from inlined allocators without including gc_priv.h	 */
33 /* or introducing dependencies on internal data structure layouts.	 */
34 void ** const GC_objfreelist_ptr = GC_objfreelist;
35 void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
36 void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
37 # ifdef ATOMIC_UNCOLLECTABLE
38     void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
39 # endif
40 
41 
GC_generic_or_special_malloc(size_t lb,int knd)42 void * GC_generic_or_special_malloc(size_t lb, int knd)
43 {
44     switch(knd) {
45 #     ifdef STUBBORN_ALLOC
46 	case STUBBORN:
47 	    return(GC_malloc_stubborn((size_t)lb));
48 #     endif
49 	case PTRFREE:
50 	    return(GC_malloc_atomic((size_t)lb));
51 	case NORMAL:
52 	    return(GC_malloc((size_t)lb));
53 	case UNCOLLECTABLE:
54 	    return(GC_malloc_uncollectable((size_t)lb));
55 #       ifdef ATOMIC_UNCOLLECTABLE
56 	  case AUNCOLLECTABLE:
57 	    return(GC_malloc_atomic_uncollectable((size_t)lb));
58 #	endif /* ATOMIC_UNCOLLECTABLE */
59 	default:
60 	    return(GC_generic_malloc(lb,knd));
61     }
62 }
63 
64 
65 /* Change the size of the block pointed to by p to contain at least   */
66 /* lb bytes.  The object may be (and quite likely will be) moved.     */
67 /* The kind (e.g. atomic) is the same as that of the old.	      */
68 /* Shrinking of large blocks is not implemented well.                 */
GC_realloc(void * p,size_t lb)69 void * GC_realloc(void * p, size_t lb)
70 {
71     struct hblk * h;
72     hdr * hhdr;
73     size_t sz;	 /* Current size in bytes	*/
74     size_t orig_sz;	 /* Original sz in bytes	*/
75     int obj_kind;
76 
77     if (p == 0) return(GC_malloc(lb));	/* Required by ANSI */
78     h = HBLKPTR(p);
79     hhdr = HDR(h);
80     sz = hhdr -> hb_sz;
81     obj_kind = hhdr -> hb_obj_kind;
82     orig_sz = sz;
83 
84     if (sz > MAXOBJBYTES) {
85 	/* Round it up to the next whole heap block */
86 	  register word descr;
87 
88 	  sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
89 	  hhdr -> hb_sz = sz;
90 	  descr = GC_obj_kinds[obj_kind].ok_descriptor;
91           if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
92           hhdr -> hb_descr = descr;
93 #	  ifdef MARK_BIT_PER_OBJ
94 	    GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
95 #	  else
96 	    GC_ASSERT(hhdr -> hb_large_block &&
97 		      hhdr -> hb_map[ANY_INDEX] == 1);
98 #	  endif
99 	  if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
100 	  /* Extra area is already cleared by GC_alloc_large_and_clear. */
101     }
102     if (ADD_SLOP(lb) <= sz) {
103 	if (lb >= (sz >> 1)) {
104 #	    ifdef STUBBORN_ALLOC
105 	        if (obj_kind == STUBBORN) GC_change_stubborn(p);
106 #	    endif
107 	    if (orig_sz > lb) {
108 	      /* Clear unneeded part of object to avoid bogus pointer */
109 	      /* tracing.					      */
110 	      /* Safe for stubborn objects.			      */
111 	        BZERO(((ptr_t)p) + lb, orig_sz - lb);
112 	    }
113 	    return(p);
114 	} else {
115 	    /* shrink */
116 	      void * result =
117 	      		GC_generic_or_special_malloc((word)lb, obj_kind);
118 
119 	      if (result == 0) return(0);
120 	          /* Could also return original object.  But this 	*/
121 	          /* gives the client warning of imminent disaster.	*/
122 	      BCOPY(p, result, lb);
123 #	      ifndef IGNORE_FREE
124 	        GC_free(p);
125 #	      endif
126 	      return(result);
127 	}
128     } else {
129 	/* grow */
130 	  void * result =
131 	  	GC_generic_or_special_malloc((word)lb, obj_kind);
132 
133 	  if (result == 0) return(0);
134 	  BCOPY(p, result, sz);
135 #	  ifndef IGNORE_FREE
136 	    GC_free(p);
137 #	  endif
138 	  return(result);
139     }
140 }
141 
142 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
143 #   define REDIRECT_REALLOC GC_realloc
144 # endif
145 
146 # ifdef REDIRECT_REALLOC
147 
148 /* As with malloc, avoid two levels of extra calls here.	*/
149 # ifdef GC_ADD_CALLER
150 #   define RA GC_RETURN_ADDR,
151 # else
152 #   define RA
153 # endif
154 # define GC_debug_realloc_replacement(p, lb) \
155 	GC_debug_realloc(p, lb, RA "unknown", 0)
156 
realloc(void * p,size_t lb)157 void * realloc(void * p, size_t lb)
158   {
159     return(REDIRECT_REALLOC(p, lb));
160   }
161 
162 # undef GC_debug_realloc_replacement
163 # endif /* REDIRECT_REALLOC */
164 
165 
166 /* Allocate memory such that only pointers to near the          */
167 /* beginning of the object are considered.                      */
168 /* We avoid holding allocation lock while we clear memory.	*/
GC_generic_malloc_ignore_off_page(size_t lb,int k)169 void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
170 {
171     void *result;
172     size_t lw;
173     size_t lb_rounded;
174     word n_blocks;
175     GC_bool init;
176     DCL_LOCK_STATE;
177 
178     if (SMALL_OBJ(lb))
179         return(GC_generic_malloc((word)lb, k));
180     lw = ROUNDED_UP_WORDS(lb);
181     lb_rounded = WORDS_TO_BYTES(lw);
182     n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
183     init = GC_obj_kinds[k].ok_init;
184     if (GC_have_errors) GC_print_all_errors();
185     GC_INVOKE_FINALIZERS();
186     LOCK();
187     result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
188     if (0 != result) {
189         if (GC_debugging_started) {
190 	    BZERO(result, n_blocks * HBLKSIZE);
191         } else {
192 #           ifdef THREADS
193 	      /* Clear any memory that might be used for GC descriptors */
194 	      /* before we release the lock.			      */
195 	        ((word *)result)[0] = 0;
196 	        ((word *)result)[1] = 0;
197 	        ((word *)result)[lw-1] = 0;
198 	        ((word *)result)[lw-2] = 0;
199 #	    endif
200         }
201     }
202     GC_bytes_allocd += lb_rounded;
203     UNLOCK();
204     if (0 == result) {
205         return((*GC_oom_fn)(lb));
206     } else {
207     	if (init && !GC_debugging_started) {
208 	    BZERO(result, n_blocks * HBLKSIZE);
209         }
210         return(result);
211     }
212 }
213 
GC_malloc_ignore_off_page(size_t lb)214 void * GC_malloc_ignore_off_page(size_t lb)
215 {
216     return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));
217 }
218 
GC_malloc_atomic_ignore_off_page(size_t lb)219 void * GC_malloc_atomic_ignore_off_page(size_t lb)
220 {
221     return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
222 }
223 
224 /* Increment GC_bytes_allocd from code that doesn't have direct access 	*/
225 /* to GC_arrays.							*/
GC_incr_bytes_allocd(size_t n)226 void GC_incr_bytes_allocd(size_t n)
227 {
228     GC_bytes_allocd += n;
229 }
230 
231 /* The same for GC_bytes_freed.				*/
GC_incr_bytes_freed(size_t n)232 void GC_incr_bytes_freed(size_t n)
233 {
234     GC_bytes_freed += n;
235 }
236 
237 #if defined(THREADS)
238 
239 extern signed_word GC_bytes_found;   /* Protected by GC lock.  */
240 
241 #ifdef PARALLEL_MARK
242 volatile signed_word GC_bytes_allocd_tmp = 0;
243                         /* Number of bytes of memory allocated since    */
244                         /* we released the GC lock.  Instead of         */
245                         /* reacquiring the GC lock just to add this in, */
246                         /* we add it in the next time we reacquire      */
247                         /* the lock.  (Atomically adding it doesn't     */
248                         /* work, since we would have to atomically      */
249                         /* update it in GC_malloc, which is too         */
250                         /* expensive.)                                   */
251 #endif /* PARALLEL_MARK */
252 
253 /* Return a list of 1 or more objects of the indicated size, linked	*/
254 /* through the first word in the object.  This has the advantage that	*/
255 /* it acquires the allocation lock only once, and may greatly reduce	*/
256 /* time wasted contending for the allocation lock.  Typical usage would */
257 /* be in a thread that requires many items of the same size.  It would	*/
258 /* keep its own free list in thread-local storage, and call		*/
259 /* GC_malloc_many or friends to replenish it.  (We do not round up	*/
260 /* object sizes, since a call indicates the intention to consume many	*/
261 /* objects of exactly this size.)					*/
262 /* We assume that the size is a multiple of GRANULE_BYTES.		*/
263 /* We return the free-list by assigning it to *result, since it is	*/
264 /* not safe to return, e.g. a linked list of pointer-free objects,	*/
265 /* since the collector would not retain the entire list if it were 	*/
266 /* invoked just as we were returning.					*/
267 /* Note that the client should usually clear the link field.		*/
GC_generic_malloc_many(size_t lb,int k,void ** result)268 void GC_generic_malloc_many(size_t lb, int k, void **result)
269 {
270 void *op;
271 void *p;
272 void **opp;
273 size_t lw;	/* Length in words.	*/
274 size_t lg;	/* Length in granules.	*/
275 signed_word my_bytes_allocd = 0;
276 struct obj_kind * ok = &(GC_obj_kinds[k]);
277 DCL_LOCK_STATE;
278 
279     GC_ASSERT((lb & (GRANULE_BYTES-1)) == 0);
280     if (!SMALL_OBJ(lb)) {
281         op = GC_generic_malloc(lb, k);
282         if(0 != op) obj_link(op) = 0;
283 	*result = op;
284         return;
285     }
286     lw = BYTES_TO_WORDS(lb);
287     lg = BYTES_TO_GRANULES(lb);
288     if (GC_have_errors) GC_print_all_errors();
289     GC_INVOKE_FINALIZERS();
290     LOCK();
291     if (!GC_is_initialized) GC_init_inner();
292     /* Do our share of marking work */
293       if (GC_incremental && !GC_dont_gc) {
294         ENTER_GC();
295 	GC_collect_a_little_inner(1);
296         EXIT_GC();
297       }
298     /* First see if we can reclaim a page of objects waiting to be */
299     /* reclaimed.						   */
300     {
301 	struct hblk ** rlh = ok -> ok_reclaim_list;
302 	struct hblk * hbp;
303 	hdr * hhdr;
304 
305 	rlh += lg;
306     	while ((hbp = *rlh) != 0) {
307             hhdr = HDR(hbp);
308             *rlh = hhdr -> hb_next;
309 	    GC_ASSERT(hhdr -> hb_sz == lb);
310 	    hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
311 #	    ifdef PARALLEL_MARK
312 		{
313 		  signed_word my_bytes_allocd_tmp = GC_bytes_allocd_tmp;
314 
315 		  GC_ASSERT(my_bytes_allocd_tmp >= 0);
316 		  /* We only decrement it while holding the GC lock.	*/
317 		  /* Thus we can't accidentally adjust it down in more	*/
318 		  /* than one thread simultaneously.			*/
319 		  if (my_bytes_allocd_tmp != 0) {
320 		    (void)AO_fetch_and_add(
321 				(volatile AO_t *)(&GC_bytes_allocd_tmp),
322 				(AO_t)(-my_bytes_allocd_tmp));
323 		    GC_bytes_allocd += my_bytes_allocd_tmp;
324 		  }
325 		}
326 		GC_acquire_mark_lock();
327 		++ GC_fl_builder_count;
328 		UNLOCK();
329 		GC_release_mark_lock();
330 #	    endif
331 	    op = GC_reclaim_generic(hbp, hhdr, lb,
332 				    ok -> ok_init, 0, &my_bytes_allocd);
333             if (op != 0) {
334 	      /* We also reclaimed memory, so we need to adjust 	*/
335 	      /* that count.						*/
336 	      /* This should be atomic, so the results may be		*/
337 	      /* inaccurate.						*/
338 	      GC_bytes_found += my_bytes_allocd;
339 #	      ifdef PARALLEL_MARK
340 		*result = op;
341 		(void)AO_fetch_and_add(
342 				(volatile AO_t *)(&GC_bytes_allocd_tmp),
343 				(AO_t)(my_bytes_allocd));
344 		GC_acquire_mark_lock();
345 		-- GC_fl_builder_count;
346 		if (GC_fl_builder_count == 0) GC_notify_all_builder();
347 		GC_release_mark_lock();
348 		(void) GC_clear_stack(0);
349 		return;
350 #	      else
351 	        GC_bytes_allocd += my_bytes_allocd;
352 	        goto out;
353 #	      endif
354 	    }
355 #	    ifdef PARALLEL_MARK
356 	      GC_acquire_mark_lock();
357 	      -- GC_fl_builder_count;
358 	      if (GC_fl_builder_count == 0) GC_notify_all_builder();
359 	      GC_release_mark_lock();
360 	      LOCK();
361 	      /* GC lock is needed for reclaim list access.	We	*/
362 	      /* must decrement fl_builder_count before reaquiring GC	*/
363 	      /* lock.  Hopefully this path is rare.			*/
364 #	    endif
365     	}
366     }
367     /* Next try to use prefix of global free list if there is one.	*/
368     /* We don't refill it, but we need to use it up before allocating	*/
369     /* a new block ourselves.						*/
370       opp = &(GC_obj_kinds[k].ok_freelist[lg]);
371       if ( (op = *opp) != 0 ) {
372 	*opp = 0;
373         my_bytes_allocd = 0;
374         for (p = op; p != 0; p = obj_link(p)) {
375           my_bytes_allocd += lb;
376           if (my_bytes_allocd >= HBLKSIZE) {
377             *opp = obj_link(p);
378             obj_link(p) = 0;
379             break;
380 	  }
381         }
382 	GC_bytes_allocd += my_bytes_allocd;
383 	goto out;
384       }
385     /* Next try to allocate a new block worth of objects of this size.	*/
386     {
387 	struct hblk *h = GC_allochblk(lb, k, 0);
388 	if (h != 0) {
389 	  if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
390 	  GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
391 #	  ifdef PARALLEL_MARK
392 	    GC_acquire_mark_lock();
393 	    ++ GC_fl_builder_count;
394 	    UNLOCK();
395 	    GC_release_mark_lock();
396 #	  endif
397 
398 	  op = GC_build_fl(h, lw, ok -> ok_init, 0);
399 #	  ifdef PARALLEL_MARK
400 	    *result = op;
401 	    GC_acquire_mark_lock();
402 	    -- GC_fl_builder_count;
403 	    if (GC_fl_builder_count == 0) GC_notify_all_builder();
404 	    GC_release_mark_lock();
405 	    (void) GC_clear_stack(0);
406 	    return;
407 #	  else
408 	    goto out;
409 #	  endif
410 	}
411     }
412 
413     /* As a last attempt, try allocating a single object.  Note that	*/
414     /* this may trigger a collection or expand the heap.		*/
415       op = GC_generic_malloc_inner(lb, k);
416       if (0 != op) obj_link(op) = 0;
417 
418   out:
419     *result = op;
420     UNLOCK();
421     (void) GC_clear_stack(0);
422 }
423 
GC_malloc_many(size_t lb)424 void * GC_malloc_many(size_t lb)
425 {
426     void *result;
427     GC_generic_malloc_many(((lb + EXTRA_BYTES + GRANULE_BYTES-1)
428 			   & ~(GRANULE_BYTES-1)),
429 	    		   NORMAL, &result);
430     return result;
431 }
432 
433 /* Note that the "atomic" version of this would be unsafe, since the	*/
434 /* links would not be seen by the collector.				*/
435 # endif
436 
437 /* Allocate lb bytes of pointerful, traced, but not collectable data */
GC_malloc_uncollectable(size_t lb)438 void * GC_malloc_uncollectable(size_t lb)
439 {
440     void *op;
441     void **opp;
442     size_t lg;
443     DCL_LOCK_STATE;
444 
445     if( SMALL_OBJ(lb) ) {
446 	if (EXTRA_BYTES != 0 && lb != 0) lb--;
447 	    	  /* We don't need the extra byte, since this won't be	*/
448 	    	  /* collected anyway.					*/
449 	lg = GC_size_map[lb];
450 	opp = &(GC_uobjfreelist[lg]);
451 	LOCK();
452         if( (op = *opp) != 0 ) {
453             /* See above comment on signals.	*/
454             *opp = obj_link(op);
455             obj_link(op) = 0;
456             GC_bytes_allocd += GRANULES_TO_BYTES(lg);
457             /* Mark bit ws already set on free list.  It will be	*/
458 	    /* cleared only temporarily during a collection, as a 	*/
459 	    /* result of the normal free list mark bit clearing.	*/
460             GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
461             UNLOCK();
462         } else {
463             UNLOCK();
464             op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
465 	    /* For small objects, the free lists are completely marked. */
466 	}
467 	GC_ASSERT(0 == op || GC_is_marked(op));
468         return((void *) op);
469     } else {
470 	hdr * hhdr;
471 
472 	op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
473         if (0 == op) return(0);
474 
475 	GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
476 	hhdr = HDR((struct hbklk *)op);
477 	/* We don't need the lock here, since we have an undisguised 	*/
478 	/* pointer.  We do need to hold the lock while we adjust	*/
479 	/* mark bits.							*/
480 	lb = hhdr -> hb_sz;
481 	LOCK();
482 	set_mark_bit_from_hdr(hhdr, 0);	/* Only object.	*/
483 	GC_ASSERT(hhdr -> hb_n_marks == 0);
484 	hhdr -> hb_n_marks = 1;
485 	UNLOCK();
486 	return((void *) op);
487     }
488 }
489 
490 /* Not well tested nor integrated.	*/
491 /* Debug version is tricky and currently missing.	*/
492 #include <limits.h>
493 
GC_memalign(size_t align,size_t lb)494 void * GC_memalign(size_t align, size_t lb)
495 {
496     size_t new_lb;
497     size_t offset;
498     ptr_t result;
499 
500     if (align <= GRANULE_BYTES) return GC_malloc(lb);
501     if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
502         if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;
503 	return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
504 	    /* Will be HBLKSIZE aligned.	*/
505     }
506     /* We could also try to make sure that the real rounded-up object size */
507     /* is a multiple of align.  That would be correct up to HBLKSIZE.	   */
508     new_lb = lb + align - 1;
509     result = GC_malloc(new_lb);
510     offset = (word)result % align;
511     if (offset != 0) {
512 	offset = align - offset;
513         if (!GC_all_interior_pointers) {
514 	    if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
515 	    GC_register_displacement(offset);
516 	}
517     }
518     result = (void *) ((ptr_t)result + offset);
519     GC_ASSERT((word)result % align == 0);
520     return result;
521 }
522 
523 # ifdef ATOMIC_UNCOLLECTABLE
524 /* Allocate lb bytes of pointerfree, untraced, uncollectable data 	*/
525 /* This is normally roughly equivalent to the system malloc.		*/
526 /* But it may be useful if malloc is redefined.				*/
GC_malloc_atomic_uncollectable(size_t lb)527 void * GC_malloc_atomic_uncollectable(size_t lb)
528 {
529     void *op;
530     void **opp;
531     size_t lg;
532     DCL_LOCK_STATE;
533 
534     if( SMALL_OBJ(lb) ) {
535 	if (EXTRA_BYTES != 0 && lb != 0) lb--;
536 	    	  /* We don't need the extra byte, since this won't be	*/
537 	    	  /* collected anyway.					*/
538 	lg = GC_size_map[lb];
539 	opp = &(GC_auobjfreelist[lg]);
540 	LOCK();
541         if( (op = *opp) != 0 ) {
542             /* See above comment on signals.	*/
543             *opp = obj_link(op);
544             obj_link(op) = 0;
545             GC_bytes_allocd += GRANULES_TO_BYTES(lg);
546 	    /* Mark bit was already set while object was on free list. */
547             GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
548             UNLOCK();
549         } else {
550             UNLOCK();
551             op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
552 	}
553 	GC_ASSERT(0 == op || GC_is_marked(op));
554         return((void *) op);
555     } else {
556 	hdr * hhdr;
557 
558 	op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
559         if (0 == op) return(0);
560 
561 	GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);
562 	hhdr = HDR((struct hbklk *)op);
563 	lb = hhdr -> hb_sz;
564 
565 	LOCK();
566 	set_mark_bit_from_hdr(hhdr, 0);	/* Only object.	*/
567 	GC_ASSERT(hhdr -> hb_n_marks == 0);
568 	hhdr -> hb_n_marks = 1;
569 	UNLOCK();
570 	return((void *) op);
571     }
572 }
573 
574 #endif /* ATOMIC_UNCOLLECTABLE */
575