1 /*
2  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
5  * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
6  *
7  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
9  *
10  * Permission is hereby granted to use or copy this program
11  * for any purpose,  provided the above notices are retained on all copies.
12  * Permission to modify the code and to distribute modified code is granted,
13  * provided the above notices are retained, and a notice that the code was
14  * modified is included with the above copyright notice.
15  */
16 
17 /*
18  * These are extra allocation routines which are likely to be less
19  * frequently used than those in malloc.c.  They are separate in the
20  * hope that the .o file will be excluded from statically linked
21  * executables.  We should probably break this up further.
22  */
23 
24 #include <stdio.h>
25 #include "private/gc_priv.h"
26 
27 extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */
28 void GC_extend_size_map();      /* in misc.c. */
29 GC_bool GC_alloc_reclaim_list();	/* in malloc.c */
30 
31 /* Some externally visible but unadvertised variables to allow access to */
32 /* free lists from inlined allocators without including gc_priv.h	 */
33 /* or introducing dependencies on internal data structure layouts.	 */
34 ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
35 ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
36 ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
37 # ifdef ATOMIC_UNCOLLECTABLE
38     ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
39 # endif
40 
41 
GC_generic_or_special_malloc(lb,knd)42 GC_PTR GC_generic_or_special_malloc(lb,knd)
43 word lb;
44 int knd;
45 {
46     switch(knd) {
47 #     ifdef STUBBORN_ALLOC
48 	case STUBBORN:
49 	    return(GC_malloc_stubborn((size_t)lb));
50 #     endif
51 	case PTRFREE:
52 	    return(GC_malloc_atomic((size_t)lb));
53 	case NORMAL:
54 	    return(GC_malloc((size_t)lb));
55 	case UNCOLLECTABLE:
56 	    return(GC_malloc_uncollectable((size_t)lb));
57 #       ifdef ATOMIC_UNCOLLECTABLE
58 	  case AUNCOLLECTABLE:
59 	    return(GC_malloc_atomic_uncollectable((size_t)lb));
60 #	endif /* ATOMIC_UNCOLLECTABLE */
61 	default:
62 	    return(GC_generic_malloc(lb,knd));
63     }
64 }
65 
66 
67 /* Change the size of the block pointed to by p to contain at least   */
68 /* lb bytes.  The object may be (and quite likely will be) moved.     */
69 /* The kind (e.g. atomic) is the same as that of the old.	      */
70 /* Shrinking of large blocks is not implemented well.                 */
71 # ifdef __STDC__
GC_realloc(GC_PTR p,size_t lb)72     GC_PTR GC_realloc(GC_PTR p, size_t lb)
73 # else
74     GC_PTR GC_realloc(p,lb)
75     GC_PTR p;
76     size_t lb;
77 # endif
78 {
79 register struct hblk * h;
80 register hdr * hhdr;
81 register word sz;	 /* Current size in bytes	*/
82 register word orig_sz;	 /* Original sz in bytes	*/
83 int obj_kind;
84 
85     if (p == 0) return(GC_malloc(lb));	/* Required by ANSI */
86     h = HBLKPTR(p);
87     hhdr = HDR(h);
88     sz = hhdr -> hb_sz;
89     obj_kind = hhdr -> hb_obj_kind;
90     sz = WORDS_TO_BYTES(sz);
91     orig_sz = sz;
92 
93     if (sz > MAXOBJBYTES) {
94 	/* Round it up to the next whole heap block */
95 	  register word descr;
96 
97 	  sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
98 	  hhdr -> hb_sz = BYTES_TO_WORDS(sz);
99 	  descr = GC_obj_kinds[obj_kind].ok_descriptor;
100           if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
101           hhdr -> hb_descr = descr;
102 	  if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
103 	  /* Extra area is already cleared by GC_alloc_large_and_clear. */
104     }
105     if (ADD_SLOP(lb) <= sz) {
106 	if (lb >= (sz >> 1)) {
107 #	    ifdef STUBBORN_ALLOC
108 	        if (obj_kind == STUBBORN) GC_change_stubborn(p);
109 #	    endif
110 	    if (orig_sz > lb) {
111 	      /* Clear unneeded part of object to avoid bogus pointer */
112 	      /* tracing.					      */
113 	      /* Safe for stubborn objects.			      */
114 	        BZERO(((ptr_t)p) + lb, orig_sz - lb);
115 	    }
116 	    return(p);
117 	} else {
118 	    /* shrink */
119 	      GC_PTR result =
120 	      		GC_generic_or_special_malloc((word)lb, obj_kind);
121 
122 	      if (result == 0) return(0);
123 	          /* Could also return original object.  But this 	*/
124 	          /* gives the client warning of imminent disaster.	*/
125 	      BCOPY(p, result, lb);
126 #	      ifndef IGNORE_FREE
127 	        GC_free(p);
128 #	      endif
129 	      return(result);
130 	}
131     } else {
132 	/* grow */
133 	  GC_PTR result =
134 	  	GC_generic_or_special_malloc((word)lb, obj_kind);
135 
136 	  if (result == 0) return(0);
137 	  BCOPY(p, result, sz);
138 #	  ifndef IGNORE_FREE
139 	    GC_free(p);
140 #	  endif
141 	  return(result);
142     }
143 }
144 
145 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
146 #   define REDIRECT_REALLOC GC_realloc
147 # endif
148 
149 # ifdef REDIRECT_REALLOC
150 # ifdef __STDC__
realloc(GC_PTR p,size_t lb)151     GC_PTR realloc(GC_PTR p, size_t lb)
152 # else
153     GC_PTR realloc(p,lb)
154     GC_PTR p;
155     size_t lb;
156 # endif
157   {
158     return(REDIRECT_REALLOC(p, lb));
159   }
160 # endif /* REDIRECT_REALLOC */
161 
162 
163 /* The same thing, except caller does not hold allocation lock.	*/
164 /* We avoid holding allocation lock while we clear memory.	*/
GC_generic_malloc_ignore_off_page(lb,k)165 ptr_t GC_generic_malloc_ignore_off_page(lb, k)
166 register size_t lb;
167 register int k;
168 {
169     register ptr_t result;
170     word lw;
171     word n_blocks;
172     GC_bool init;
173     DCL_LOCK_STATE;
174 
175     if (SMALL_OBJ(lb))
176         return(GC_generic_malloc((word)lb, k));
177     lw = ROUNDED_UP_WORDS(lb);
178     n_blocks = OBJ_SZ_TO_BLOCKS(lw);
179     init = GC_obj_kinds[k].ok_init;
180     if (GC_have_errors) GC_print_all_errors();
181     GC_INVOKE_FINALIZERS();
182     DISABLE_SIGNALS();
183     LOCK();
184     result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
185     if (0 != result) {
186         if (GC_debugging_started) {
187 	    BZERO(result, n_blocks * HBLKSIZE);
188         } else {
189 #           ifdef THREADS
190 	      /* Clear any memory that might be used for GC descriptors */
191 	      /* before we release the lock.			      */
192 	        ((word *)result)[0] = 0;
193 	        ((word *)result)[1] = 0;
194 	        ((word *)result)[lw-1] = 0;
195 	        ((word *)result)[lw-2] = 0;
196 #	    endif
197         }
198     }
199     GC_words_allocd += lw;
200     UNLOCK();
201     ENABLE_SIGNALS();
202     if (0 == result) {
203         return((*GC_oom_fn)(lb));
204     } else {
205     	if (init && !GC_debugging_started) {
206 	    BZERO(result, n_blocks * HBLKSIZE);
207         }
208         return(result);
209     }
210 }
211 
212 # if defined(__STDC__) || defined(__cplusplus)
GC_malloc_ignore_off_page(size_t lb)213   void * GC_malloc_ignore_off_page(size_t lb)
214 # else
215   char * GC_malloc_ignore_off_page(lb)
216   register size_t lb;
217 # endif
218 {
219     return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
220 }
221 
222 # if defined(__STDC__) || defined(__cplusplus)
GC_malloc_atomic_ignore_off_page(size_t lb)223   void * GC_malloc_atomic_ignore_off_page(size_t lb)
224 # else
225   char * GC_malloc_atomic_ignore_off_page(lb)
226   register size_t lb;
227 # endif
228 {
229     return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
230 }
231 
232 /* Increment GC_words_allocd from code that doesn't have direct access 	*/
233 /* to GC_arrays.							*/
234 # ifdef __STDC__
GC_incr_words_allocd(size_t n)235 void GC_incr_words_allocd(size_t n)
236 {
237     GC_words_allocd += n;
238 }
239 
240 /* The same for GC_mem_freed.				*/
GC_incr_mem_freed(size_t n)241 void GC_incr_mem_freed(size_t n)
242 {
243     GC_mem_freed += n;
244 }
245 # endif /* __STDC__ */
246 
247 /* Analogous to the above, but assumes a small object size, and 	*/
248 /* bypasses MERGE_SIZES mechanism.  Used by gc_inline.h.		*/
GC_generic_malloc_words_small_inner(lw,k)249 ptr_t GC_generic_malloc_words_small_inner(lw, k)
250 register word lw;
251 register int k;
252 {
253 register ptr_t op;
254 register ptr_t *opp;
255 register struct obj_kind * kind = GC_obj_kinds + k;
256 
257     opp = &(kind -> ok_freelist[lw]);
258     if( (op = *opp) == 0 ) {
259         if (!GC_is_initialized) {
260             GC_init_inner();
261         }
262 	if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
263 	    op = GC_clear_stack(GC_allocobj((word)lw, k));
264 	}
265 	if (op == 0) {
266 	    UNLOCK();
267 	    ENABLE_SIGNALS();
268 	    return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
269 	}
270     }
271     *opp = obj_link(op);
272     obj_link(op) = 0;
273     GC_words_allocd += lw;
274     return((ptr_t)op);
275 }
276 
277 /* Analogous to the above, but assumes a small object size, and 	*/
278 /* bypasses MERGE_SIZES mechanism.  Used by gc_inline.h.		*/
279 #ifdef __STDC__
GC_generic_malloc_words_small(size_t lw,int k)280      ptr_t GC_generic_malloc_words_small(size_t lw, int k)
281 #else
282      ptr_t GC_generic_malloc_words_small(lw, k)
283      register word lw;
284      register int k;
285 #endif
286 {
287 register ptr_t op;
288 DCL_LOCK_STATE;
289 
290     if (GC_have_errors) GC_print_all_errors();
291     GC_INVOKE_FINALIZERS();
292     DISABLE_SIGNALS();
293     LOCK();
294     op = GC_generic_malloc_words_small_inner(lw, k);
295     UNLOCK();
296     ENABLE_SIGNALS();
297     return((ptr_t)op);
298 }
299 
300 #if defined(THREADS) && !defined(SRC_M3)
301 
302 extern signed_word GC_mem_found;   /* Protected by GC lock.  */
303 
304 #ifdef PARALLEL_MARK
305 volatile signed_word GC_words_allocd_tmp = 0;
306                         /* Number of words of memory allocated since    */
307                         /* we released the GC lock.  Instead of         */
308                         /* reacquiring the GC lock just to add this in, */
309                         /* we add it in the next time we reacquire      */
310                         /* the lock.  (Atomically adding it doesn't     */
311                         /* work, since we would have to atomically      */
312                         /* update it in GC_malloc, which is too         */
313                         /* expensive.                                   */
314 #endif /* PARALLEL_MARK */
315 
316 /* See reclaim.c: */
317 extern ptr_t GC_reclaim_generic();
318 
319 /* Return a list of 1 or more objects of the indicated size, linked	*/
320 /* through the first word in the object.  This has the advantage that	*/
321 /* it acquires the allocation lock only once, and may greatly reduce	*/
322 /* time wasted contending for the allocation lock.  Typical usage would */
323 /* be in a thread that requires many items of the same size.  It would	*/
324 /* keep its own free list in thread-local storage, and call		*/
325 /* GC_malloc_many or friends to replenish it.  (We do not round up	*/
326 /* object sizes, since a call indicates the intention to consume many	*/
327 /* objects of exactly this size.)					*/
328 /* We return the free-list by assigning it to *result, since it is	*/
329 /* not safe to return, e.g. a linked list of pointer-free objects,	*/
330 /* since the collector would not retain the entire list if it were 	*/
331 /* invoked just as we were returning.					*/
332 /* Note that the client should usually clear the link field.		*/
GC_generic_malloc_many(lb,k,result)333 void GC_generic_malloc_many(lb, k, result)
334 register word lb;
335 register int k;
336 ptr_t *result;
337 {
338 ptr_t op;
339 ptr_t p;
340 ptr_t *opp;
341 word lw;
342 word my_words_allocd = 0;
343 struct obj_kind * ok = &(GC_obj_kinds[k]);
344 DCL_LOCK_STATE;
345 
346 #   if defined(GATHERSTATS) || defined(PARALLEL_MARK)
347 #     define COUNT_ARG , &my_words_allocd
348 #   else
349 #     define COUNT_ARG
350 #     define NEED_TO_COUNT
351 #   endif
352     if (!SMALL_OBJ(lb)) {
353         op = GC_generic_malloc(lb, k);
354         if(0 != op) obj_link(op) = 0;
355 	*result = op;
356         return;
357     }
358     lw = ALIGNED_WORDS(lb);
359     if (GC_have_errors) GC_print_all_errors();
360     GC_INVOKE_FINALIZERS();
361     DISABLE_SIGNALS();
362     LOCK();
363     if (!GC_is_initialized) GC_init_inner();
364     /* Do our share of marking work */
365       if (GC_incremental && !GC_dont_gc) {
366         ENTER_GC();
367 	GC_collect_a_little_inner(1);
368         EXIT_GC();
369       }
370     /* First see if we can reclaim a page of objects waiting to be */
371     /* reclaimed.						   */
372     {
373 	struct hblk ** rlh = ok -> ok_reclaim_list;
374 	struct hblk * hbp;
375 	hdr * hhdr;
376 
377 	rlh += lw;
378     	while ((hbp = *rlh) != 0) {
379             hhdr = HDR(hbp);
380             *rlh = hhdr -> hb_next;
381 	    hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
382 #	    ifdef PARALLEL_MARK
383 		{
384 		  signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
385 
386 		  GC_ASSERT(my_words_allocd_tmp >= 0);
387 		  /* We only decrement it while holding the GC lock.	*/
388 		  /* Thus we can't accidentally adjust it down in more	*/
389 		  /* than one thread simultaneously.			*/
390 		  if (my_words_allocd_tmp != 0) {
391 		    (void)GC_atomic_add(
392 				(volatile GC_word *)(&GC_words_allocd_tmp),
393 				(GC_word)(-my_words_allocd_tmp));
394 		    GC_words_allocd += my_words_allocd_tmp;
395 		  }
396 		}
397 		GC_acquire_mark_lock();
398 		++ GC_fl_builder_count;
399 		UNLOCK();
400 		ENABLE_SIGNALS();
401 		GC_release_mark_lock();
402 #	    endif
403 	    op = GC_reclaim_generic(hbp, hhdr, lw,
404 				    ok -> ok_init, 0 COUNT_ARG);
405             if (op != 0) {
406 #	      ifdef NEED_TO_COUNT
407 		/* We are neither gathering statistics, nor marking in	*/
408 		/* parallel.  Thus GC_reclaim_generic doesn't count	*/
409 		/* for us.						*/
410     		for (p = op; p != 0; p = obj_link(p)) {
411         	  my_words_allocd += lw;
412 		}
413 #	      endif
414 #	      if defined(GATHERSTATS)
415 	        /* We also reclaimed memory, so we need to adjust 	*/
416 	        /* that count.						*/
417 		/* This should be atomic, so the results may be		*/
418 		/* inaccurate.						*/
419 		GC_mem_found += my_words_allocd;
420 #	      endif
421 #	      ifdef PARALLEL_MARK
422 		*result = op;
423 		(void)GC_atomic_add(
424 				(volatile GC_word *)(&GC_words_allocd_tmp),
425 				(GC_word)(my_words_allocd));
426 		GC_acquire_mark_lock();
427 		-- GC_fl_builder_count;
428 		if (GC_fl_builder_count == 0) GC_notify_all_builder();
429 		GC_release_mark_lock();
430 		(void) GC_clear_stack(0);
431 		return;
432 #	      else
433 	        GC_words_allocd += my_words_allocd;
434 	        goto out;
435 #	      endif
436 	    }
437 #	    ifdef PARALLEL_MARK
438 	      GC_acquire_mark_lock();
439 	      -- GC_fl_builder_count;
440 	      if (GC_fl_builder_count == 0) GC_notify_all_builder();
441 	      GC_release_mark_lock();
442 	      DISABLE_SIGNALS();
443 	      LOCK();
444 	      /* GC lock is needed for reclaim list access.	We	*/
445 	      /* must decrement fl_builder_count before reaquiring GC	*/
446 	      /* lock.  Hopefully this path is rare.			*/
447 #	    endif
448     	}
449     }
450     /* Next try to use prefix of global free list if there is one.	*/
451     /* We don't refill it, but we need to use it up before allocating	*/
452     /* a new block ourselves.						*/
453       opp = &(GC_obj_kinds[k].ok_freelist[lw]);
454       if ( (op = *opp) != 0 ) {
455 	*opp = 0;
456         my_words_allocd = 0;
457         for (p = op; p != 0; p = obj_link(p)) {
458           my_words_allocd += lw;
459           if (my_words_allocd >= BODY_SZ) {
460             *opp = obj_link(p);
461             obj_link(p) = 0;
462             break;
463 	  }
464         }
465 	GC_words_allocd += my_words_allocd;
466 	goto out;
467       }
468     /* Next try to allocate a new block worth of objects of this size.	*/
469     {
470 	struct hblk *h = GC_allochblk(lw, k, 0);
471 	if (h != 0) {
472 	  if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
473 	  GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
474 			       - BYTES_TO_WORDS(HBLKSIZE) % lw;
475 #	  ifdef PARALLEL_MARK
476 	    GC_acquire_mark_lock();
477 	    ++ GC_fl_builder_count;
478 	    UNLOCK();
479 	    ENABLE_SIGNALS();
480 	    GC_release_mark_lock();
481 #	  endif
482 
483 	  op = GC_build_fl(h, lw, ok -> ok_init, 0);
484 #	  ifdef PARALLEL_MARK
485 	    *result = op;
486 	    GC_acquire_mark_lock();
487 	    -- GC_fl_builder_count;
488 	    if (GC_fl_builder_count == 0) GC_notify_all_builder();
489 	    GC_release_mark_lock();
490 	    (void) GC_clear_stack(0);
491 	    return;
492 #	  else
493 	    goto out;
494 #	  endif
495 	}
496     }
497 
498     /* As a last attempt, try allocating a single object.  Note that	*/
499     /* this may trigger a collection or expand the heap.		*/
500       op = GC_generic_malloc_inner(lb, k);
501       if (0 != op) obj_link(op) = 0;
502 
503   out:
504     *result = op;
505     UNLOCK();
506     ENABLE_SIGNALS();
507     (void) GC_clear_stack(0);
508 }
509 
GC_malloc_many(size_t lb)510 GC_PTR GC_malloc_many(size_t lb)
511 {
512     ptr_t result;
513     GC_generic_malloc_many(lb, NORMAL, &result);
514     return result;
515 }
516 
517 /* Note that the "atomic" version of this would be unsafe, since the	*/
518 /* links would not be seen by the collector.				*/
519 # endif
520 
521 /* Allocate lb bytes of pointerful, traced, but not collectable data */
522 # ifdef __STDC__
GC_malloc_uncollectable(size_t lb)523     GC_PTR GC_malloc_uncollectable(size_t lb)
524 # else
525     GC_PTR GC_malloc_uncollectable(lb)
526     size_t lb;
527 # endif
528 {
529 register ptr_t op;
530 register ptr_t *opp;
531 register word lw;
532 DCL_LOCK_STATE;
533 
534     if( SMALL_OBJ(lb) ) {
535 #       ifdef MERGE_SIZES
536 	  if (EXTRA_BYTES != 0 && lb != 0) lb--;
537 	    	  /* We don't need the extra byte, since this won't be	*/
538 	    	  /* collected anyway.					*/
539 	  lw = GC_size_map[lb];
540 #	else
541 	  lw = ALIGNED_WORDS(lb);
542 #       endif
543 	opp = &(GC_uobjfreelist[lw]);
544 	FASTLOCK();
545         if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
546             /* See above comment on signals.	*/
547             *opp = obj_link(op);
548             obj_link(op) = 0;
549             GC_words_allocd += lw;
550             /* Mark bit ws already set on free list.  It will be	*/
551 	    /* cleared only temporarily during a collection, as a 	*/
552 	    /* result of the normal free list mark bit clearing.	*/
553             GC_non_gc_bytes += WORDS_TO_BYTES(lw);
554             FASTUNLOCK();
555             return((GC_PTR) op);
556         }
557         FASTUNLOCK();
558         op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
559     } else {
560 	op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
561     }
562     if (0 == op) return(0);
563     /* We don't need the lock here, since we have an undisguised 	*/
564     /* pointer.  We do need to hold the lock while we adjust		*/
565     /* mark bits.							*/
566     {
567 	register struct hblk * h;
568 
569 	h = HBLKPTR(op);
570 	lw = HDR(h) -> hb_sz;
571 
572 	DISABLE_SIGNALS();
573 	LOCK();
574 	GC_set_mark_bit(op);
575 	GC_non_gc_bytes += WORDS_TO_BYTES(lw);
576 	UNLOCK();
577 	ENABLE_SIGNALS();
578 	return((GC_PTR) op);
579     }
580 }
581 
582 #ifdef __STDC__
583 /* Not well tested nor integrated.	*/
584 /* Debug version is tricky and currently missing.	*/
585 #include <limits.h>
586 
GC_memalign(size_t align,size_t lb)587 GC_PTR GC_memalign(size_t align, size_t lb)
588 {
589     size_t new_lb;
590     size_t offset;
591     ptr_t result;
592 
593 #   ifdef ALIGN_DOUBLE
594 	if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb);
595 #   endif
596     if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb);
597     if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
598         if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;
599 	return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
600 	    /* Will be HBLKSIZE aligned.	*/
601     }
602     /* We could also try to make sure that the real rounded-up object size */
603     /* is a multiple of align.  That would be correct up to HBLKSIZE.	   */
604     new_lb = lb + align - 1;
605     result = GC_malloc(new_lb);
606     offset = (word)result % align;
607     if (offset != 0) {
608 	offset = align - offset;
609         if (!GC_all_interior_pointers) {
610 	    if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
611 	    GC_register_displacement(offset);
612 	}
613     }
614     result = (GC_PTR) ((ptr_t)result + offset);
615     GC_ASSERT((word)result % align == 0);
616     return result;
617 }
618 #endif
619 
620 # ifdef ATOMIC_UNCOLLECTABLE
621 /* Allocate lb bytes of pointerfree, untraced, uncollectable data 	*/
622 /* This is normally roughly equivalent to the system malloc.		*/
623 /* But it may be useful if malloc is redefined.				*/
624 # ifdef __STDC__
GC_malloc_atomic_uncollectable(size_t lb)625     GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
626 # else
627     GC_PTR GC_malloc_atomic_uncollectable(lb)
628     size_t lb;
629 # endif
630 {
631 register ptr_t op;
632 register ptr_t *opp;
633 register word lw;
634 DCL_LOCK_STATE;
635 
636     if( SMALL_OBJ(lb) ) {
637 #       ifdef MERGE_SIZES
638 	  if (EXTRA_BYTES != 0 && lb != 0) lb--;
639 	    	  /* We don't need the extra byte, since this won't be	*/
640 	    	  /* collected anyway.					*/
641 	  lw = GC_size_map[lb];
642 #	else
643 	  lw = ALIGNED_WORDS(lb);
644 #       endif
645 	opp = &(GC_auobjfreelist[lw]);
646 	FASTLOCK();
647         if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
648             /* See above comment on signals.	*/
649             *opp = obj_link(op);
650             obj_link(op) = 0;
651             GC_words_allocd += lw;
652 	    /* Mark bit was already set while object was on free list. */
653             GC_non_gc_bytes += WORDS_TO_BYTES(lw);
654             FASTUNLOCK();
655             return((GC_PTR) op);
656         }
657         FASTUNLOCK();
658         op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
659     } else {
660 	op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
661     }
662     if (0 == op) return(0);
663     /* We don't need the lock here, since we have an undisguised 	*/
664     /* pointer.  We do need to hold the lock while we adjust		*/
665     /* mark bits.							*/
666     {
667 	register struct hblk * h;
668 
669 	h = HBLKPTR(op);
670 	lw = HDR(h) -> hb_sz;
671 
672 	DISABLE_SIGNALS();
673 	LOCK();
674 	GC_set_mark_bit(op);
675 	GC_non_gc_bytes += WORDS_TO_BYTES(lw);
676 	UNLOCK();
677 	ENABLE_SIGNALS();
678 	return((GC_PTR) op);
679     }
680 }
681 
682 #endif /* ATOMIC_UNCOLLECTABLE */
683