1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
5 *
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 *
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
14 */
15
16 #include "private/gc_priv.h"
17 #include "gc_inline.h" /* for GC_malloc_kind */
18
19 #include <stdio.h>
20 #include <string.h>
21
22 /* Allocate reclaim list for kind: */
23 /* Return TRUE on success */
GC_alloc_reclaim_list(struct obj_kind * kind)24 STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
25 {
26 struct hblk ** result = (struct hblk **)
27 GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
28 if (result == 0) return(FALSE);
29 BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
30 kind -> ok_reclaim_list = result;
31 return(TRUE);
32 }
33
34 /* Allocate a large block of size lb bytes. */
35 /* The block is not cleared. */
36 /* Flags is 0 or IGNORE_OFF_PAGE. */
37 /* EXTRA_BYTES were already added to lb. */
GC_alloc_large(size_t lb,int k,unsigned flags)38 GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
39 {
40 struct hblk * h;
41 word n_blocks;
42 ptr_t result;
43 GC_bool retry = FALSE;
44
45 GC_ASSERT(I_HOLD_LOCK());
46 lb = ROUNDUP_GRANULE_SIZE(lb);
47 n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(lb);
48 if (!EXPECT(GC_is_initialized, TRUE)) {
49 DCL_LOCK_STATE;
50 UNLOCK(); /* just to unset GC_lock_holder */
51 GC_init();
52 LOCK();
53 }
54 /* Do our share of marking work */
55 if (GC_incremental && !GC_dont_gc)
56 GC_collect_a_little_inner((int)n_blocks);
57 h = GC_allochblk(lb, k, flags);
58 # ifdef USE_MUNMAP
59 if (0 == h) {
60 GC_merge_unmapped();
61 h = GC_allochblk(lb, k, flags);
62 }
63 # endif
64 while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
65 h = GC_allochblk(lb, k, flags);
66 retry = TRUE;
67 }
68 if (h == 0) {
69 result = 0;
70 } else {
71 size_t total_bytes = n_blocks * HBLKSIZE;
72 if (n_blocks > 1) {
73 GC_large_allocd_bytes += total_bytes;
74 if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
75 GC_max_large_allocd_bytes = GC_large_allocd_bytes;
76 }
77 /* FIXME: Do we need some way to reset GC_max_large_allocd_bytes? */
78 result = h -> hb_body;
79 }
80 return result;
81 }
82
83 /* Allocate a large block of size lb bytes. Clear if appropriate. */
84 /* EXTRA_BYTES were already added to lb. */
GC_alloc_large_and_clear(size_t lb,int k,unsigned flags)85 STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
86 {
87 ptr_t result;
88
89 GC_ASSERT(I_HOLD_LOCK());
90 result = GC_alloc_large(lb, k, flags);
91 if (result != NULL
92 && (GC_debugging_started || GC_obj_kinds[k].ok_init)) {
93 word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
94
95 /* Clear the whole block, in case of GC_realloc call. */
96 BZERO(result, n_blocks * HBLKSIZE);
97 }
98 return result;
99 }
100
101 /* Fill in additional entries in GC_size_map, including the i-th one. */
102 /* Note that a filled in section of the array ending at n always */
103 /* has the length of at least n/4. */
GC_extend_size_map(size_t i)104 STATIC void GC_extend_size_map(size_t i)
105 {
106 size_t orig_granule_sz = ROUNDED_UP_GRANULES(i);
107 size_t granule_sz;
108 size_t byte_sz = GRANULES_TO_BYTES(orig_granule_sz);
109 /* The size we try to preserve. */
110 /* Close to i, unless this would */
111 /* introduce too many distinct sizes. */
112 size_t smaller_than_i = byte_sz - (byte_sz >> 3);
113 size_t low_limit; /* The lowest indexed entry we initialize. */
114 size_t number_of_objs;
115
116 GC_ASSERT(I_HOLD_LOCK());
117 GC_ASSERT(0 == GC_size_map[i]);
118 if (0 == GC_size_map[smaller_than_i]) {
119 low_limit = byte_sz - (byte_sz >> 2); /* much smaller than i */
120 granule_sz = orig_granule_sz;
121 while (GC_size_map[low_limit] != 0)
122 low_limit++;
123 } else {
124 low_limit = smaller_than_i + 1;
125 while (GC_size_map[low_limit] != 0)
126 low_limit++;
127
128 granule_sz = ROUNDED_UP_GRANULES(low_limit);
129 granule_sz += granule_sz >> 3;
130 if (granule_sz < orig_granule_sz)
131 granule_sz = orig_granule_sz;
132 }
133
134 /* For these larger sizes, we use an even number of granules. */
135 /* This makes it easier to, e.g., construct a 16-byte-aligned */
136 /* allocator even if GRANULE_BYTES is 8. */
137 granule_sz = (granule_sz + 1) & ~1;
138 if (granule_sz > MAXOBJGRANULES)
139 granule_sz = MAXOBJGRANULES;
140
141 /* If we can fit the same number of larger objects in a block, do so. */
142 number_of_objs = HBLK_GRANULES / granule_sz;
143 GC_ASSERT(number_of_objs != 0);
144 granule_sz = (HBLK_GRANULES / number_of_objs) & ~1;
145
146 byte_sz = GRANULES_TO_BYTES(granule_sz) - EXTRA_BYTES;
147 /* We may need one extra byte; do not always */
148 /* fill in GC_size_map[byte_sz]. */
149
150 for (; low_limit <= byte_sz; low_limit++)
151 GC_size_map[low_limit] = granule_sz;
152 }
153
154 /* Allocate lb bytes for an object of kind k. */
155 /* Should not be used to directly to allocate objects */
156 /* that require special handling on allocation. */
GC_generic_malloc_inner(size_t lb,int k)157 GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
158 {
159 void *op;
160
161 GC_ASSERT(I_HOLD_LOCK());
162 GC_ASSERT(k < MAXOBJKINDS);
163 if (SMALL_OBJ(lb)) {
164 struct obj_kind * kind = GC_obj_kinds + k;
165 size_t lg = GC_size_map[lb];
166 void ** opp = &(kind -> ok_freelist[lg]);
167
168 op = *opp;
169 if (EXPECT(0 == op, FALSE)) {
170 if (lg == 0) {
171 if (!EXPECT(GC_is_initialized, TRUE)) {
172 DCL_LOCK_STATE;
173 UNLOCK(); /* just to unset GC_lock_holder */
174 GC_init();
175 LOCK();
176 lg = GC_size_map[lb];
177 }
178 if (0 == lg) {
179 GC_extend_size_map(lb);
180 lg = GC_size_map[lb];
181 GC_ASSERT(lg != 0);
182 }
183 /* Retry */
184 opp = &(kind -> ok_freelist[lg]);
185 op = *opp;
186 }
187 if (0 == op) {
188 if (0 == kind -> ok_reclaim_list &&
189 !GC_alloc_reclaim_list(kind))
190 return NULL;
191 op = GC_allocobj(lg, k);
192 if (0 == op)
193 return NULL;
194 }
195 }
196 *opp = obj_link(op);
197 obj_link(op) = 0;
198 GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
199 } else {
200 op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
201 if (op != NULL)
202 GC_bytes_allocd += lb;
203 }
204
205 return op;
206 }
207
208 #if defined(DBG_HDRS_ALL) || defined(GC_GCJ_SUPPORT) \
209 || !defined(GC_NO_FINALIZATION)
210 /* Allocate a composite object of size n bytes. The caller */
211 /* guarantees that pointers past the first page are not relevant. */
GC_generic_malloc_inner_ignore_off_page(size_t lb,int k)212 GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
213 {
214 word lb_adjusted;
215 void * op;
216
217 GC_ASSERT(I_HOLD_LOCK());
218 if (lb <= HBLKSIZE)
219 return GC_generic_malloc_inner(lb, k);
220 GC_ASSERT(k < MAXOBJKINDS);
221 lb_adjusted = ADD_SLOP(lb);
222 op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
223 if (op != NULL)
224 GC_bytes_allocd += lb_adjusted;
225 return op;
226 }
227 #endif
228
229 #ifdef GC_COLLECT_AT_MALLOC
230 /* Parameter to force GC at every malloc of size greater or equal to */
231 /* the given value. This might be handy during debugging. */
232 # if defined(CPPCHECK)
233 size_t GC_dbg_collect_at_malloc_min_lb = 16*1024; /* e.g. */
234 # else
235 size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
236 # endif
237 #endif
238
GC_generic_malloc(size_t lb,int k)239 GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k)
240 {
241 void * result;
242 DCL_LOCK_STATE;
243
244 GC_ASSERT(k < MAXOBJKINDS);
245 if (EXPECT(GC_have_errors, FALSE))
246 GC_print_all_errors();
247 GC_INVOKE_FINALIZERS();
248 GC_DBG_COLLECT_AT_MALLOC(lb);
249 if (SMALL_OBJ(lb)) {
250 LOCK();
251 result = GC_generic_malloc_inner(lb, k);
252 UNLOCK();
253 } else {
254 size_t lg;
255 size_t lb_rounded;
256 word n_blocks;
257 GC_bool init;
258
259 lg = ROUNDED_UP_GRANULES(lb);
260 lb_rounded = GRANULES_TO_BYTES(lg);
261 n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
262 init = GC_obj_kinds[k].ok_init;
263 LOCK();
264 result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
265 if (0 != result) {
266 if (GC_debugging_started) {
267 BZERO(result, n_blocks * HBLKSIZE);
268 } else {
269 # ifdef THREADS
270 /* Clear any memory that might be used for GC descriptors */
271 /* before we release the lock. */
272 ((word *)result)[0] = 0;
273 ((word *)result)[1] = 0;
274 ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
275 ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
276 # endif
277 }
278 GC_bytes_allocd += lb_rounded;
279 }
280 UNLOCK();
281 if (init && !GC_debugging_started && 0 != result) {
282 BZERO(result, n_blocks * HBLKSIZE);
283 }
284 }
285 if (0 == result) {
286 return((*GC_get_oom_fn())(lb));
287 } else {
288 return(result);
289 }
290 }
291
GC_malloc_kind_global(size_t lb,int k)292 GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind_global(size_t lb, int k)
293 {
294 GC_ASSERT(k < MAXOBJKINDS);
295 if (SMALL_OBJ(lb)) {
296 void *op;
297 void **opp;
298 size_t lg;
299 DCL_LOCK_STATE;
300
301 GC_DBG_COLLECT_AT_MALLOC(lb);
302 LOCK();
303 lg = GC_size_map[lb];
304 opp = &GC_obj_kinds[k].ok_freelist[lg];
305 op = *opp;
306 if (EXPECT(op != NULL, TRUE)) {
307 if (k == PTRFREE) {
308 *opp = obj_link(op);
309 } else {
310 GC_ASSERT(0 == obj_link(op)
311 || ((word)obj_link(op)
312 <= (word)GC_greatest_plausible_heap_addr
313 && (word)obj_link(op)
314 >= (word)GC_least_plausible_heap_addr));
315 *opp = obj_link(op);
316 obj_link(op) = 0;
317 }
318 GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
319 UNLOCK();
320 return op;
321 }
322 UNLOCK();
323 }
324
325 /* We make the GC_clear_stack() call a tail one, hoping to get more */
326 /* of the stack. */
327 return GC_clear_stack(GC_generic_malloc(lb, k));
328 }
329
330 #if defined(THREADS) && !defined(THREAD_LOCAL_ALLOC)
GC_malloc_kind(size_t lb,int k)331 GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t lb, int k)
332 {
333 return GC_malloc_kind_global(lb, k);
334 }
335 #endif
336
337 /* Allocate lb bytes of atomic (pointer-free) data. */
GC_malloc_atomic(size_t lb)338 GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_atomic(size_t lb)
339 {
340 return GC_malloc_kind(lb, PTRFREE);
341 }
342
343 /* Allocate lb bytes of composite (pointerful) data. */
GC_malloc(size_t lb)344 GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t lb)
345 {
346 return GC_malloc_kind(lb, NORMAL);
347 }
348
GC_generic_malloc_uncollectable(size_t lb,int k)349 GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc_uncollectable(
350 size_t lb, int k)
351 {
352 void *op;
353 DCL_LOCK_STATE;
354
355 GC_ASSERT(k < MAXOBJKINDS);
356 if (SMALL_OBJ(lb)) {
357 void **opp;
358 size_t lg;
359
360 GC_DBG_COLLECT_AT_MALLOC(lb);
361 if (EXTRA_BYTES != 0 && lb != 0) lb--;
362 /* We don't need the extra byte, since this won't be */
363 /* collected anyway. */
364 LOCK();
365 lg = GC_size_map[lb];
366 opp = &GC_obj_kinds[k].ok_freelist[lg];
367 op = *opp;
368 if (EXPECT(op != NULL, TRUE)) {
369 *opp = obj_link(op);
370 obj_link(op) = 0;
371 GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
372 /* Mark bit was already set on free list. It will be */
373 /* cleared only temporarily during a collection, as a */
374 /* result of the normal free list mark bit clearing. */
375 GC_non_gc_bytes += GRANULES_TO_BYTES((word)lg);
376 UNLOCK();
377 } else {
378 UNLOCK();
379 op = GC_generic_malloc(lb, k);
380 /* For small objects, the free lists are completely marked. */
381 }
382 GC_ASSERT(0 == op || GC_is_marked(op));
383 } else {
384 hdr * hhdr;
385
386 op = GC_generic_malloc(lb, k);
387 if (NULL == op)
388 return NULL;
389
390 GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
391 hhdr = HDR(op);
392 /* We don't need the lock here, since we have an undisguised */
393 /* pointer. We do need to hold the lock while we adjust */
394 /* mark bits. */
395 LOCK();
396 set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
397 # ifndef THREADS
398 GC_ASSERT(hhdr -> hb_n_marks == 0);
399 /* This is not guaranteed in the multi-threaded case */
400 /* because the counter could be updated before locking. */
401 # endif
402 hhdr -> hb_n_marks = 1;
403 UNLOCK();
404 }
405 return op;
406 }
407
408 /* Allocate lb bytes of pointerful, traced, but not collectible data. */
GC_malloc_uncollectable(size_t lb)409 GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_uncollectable(size_t lb)
410 {
411 return GC_generic_malloc_uncollectable(lb, UNCOLLECTABLE);
412 }
413
414 #ifdef GC_ATOMIC_UNCOLLECTABLE
415 /* Allocate lb bytes of pointer-free, untraced, uncollectible data */
416 /* This is normally roughly equivalent to the system malloc. */
417 /* But it may be useful if malloc is redefined. */
418 GC_API GC_ATTR_MALLOC void * GC_CALL
GC_malloc_atomic_uncollectable(size_t lb)419 GC_malloc_atomic_uncollectable(size_t lb)
420 {
421 return GC_generic_malloc_uncollectable(lb, AUNCOLLECTABLE);
422 }
423 #endif /* GC_ATOMIC_UNCOLLECTABLE */
424
425 #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER)
426
427 # ifndef MSWINCE
428 # include <errno.h>
429 # endif
430
431 /* Avoid unnecessary nested procedure calls here, by #defining some */
432 /* malloc replacements. Otherwise we end up saving a meaningless */
433 /* return address in the object. It also speeds things up, but it is */
434 /* admittedly quite ugly. */
435 # define GC_debug_malloc_replacement(lb) GC_debug_malloc(lb, GC_DBG_EXTRAS)
436
437 # if defined(CPPCHECK)
438 # define REDIRECT_MALLOC_F GC_malloc /* e.g. */
439 # else
440 # define REDIRECT_MALLOC_F REDIRECT_MALLOC
441 # endif
442
malloc(size_t lb)443 void * malloc(size_t lb)
444 {
445 /* It might help to manually inline the GC_malloc call here. */
446 /* But any decent compiler should reduce the extra procedure call */
447 /* to at most a jump instruction in this case. */
448 # if defined(I386) && defined(GC_SOLARIS_THREADS)
449 /* Thread initialization can call malloc before we are ready for. */
450 /* It is not clear that this is enough to help matters. */
451 /* The thread implementation may well call malloc at other */
452 /* inopportune times. */
453 if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
454 # endif
455 return (void *)REDIRECT_MALLOC_F(lb);
456 }
457
458 # if defined(GC_LINUX_THREADS)
459 STATIC ptr_t GC_libpthread_start = 0;
460 STATIC ptr_t GC_libpthread_end = 0;
461 STATIC ptr_t GC_libld_start = 0;
462 STATIC ptr_t GC_libld_end = 0;
463
GC_init_lib_bounds(void)464 STATIC void GC_init_lib_bounds(void)
465 {
466 IF_CANCEL(int cancel_state;)
467
468 if (GC_libpthread_start != 0) return;
469 DISABLE_CANCEL(cancel_state);
470 GC_init(); /* if not called yet */
471 if (!GC_text_mapping("libpthread-",
472 &GC_libpthread_start, &GC_libpthread_end)) {
473 WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
474 /* This might still work with some versions of libpthread, */
475 /* so we don't abort. Perhaps we should. */
476 /* Generate message only once: */
477 GC_libpthread_start = (ptr_t)1;
478 }
479 if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
480 WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
481 }
482 RESTORE_CANCEL(cancel_state);
483 }
484 # endif /* GC_LINUX_THREADS */
485
calloc(size_t n,size_t lb)486 void * calloc(size_t n, size_t lb)
487 {
488 if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial test */
489 && lb && n > GC_SIZE_MAX / lb)
490 return (*GC_get_oom_fn())(GC_SIZE_MAX); /* n*lb overflow */
491 # if defined(GC_LINUX_THREADS)
492 /* libpthread allocated some memory that is only pointed to by */
493 /* mmapped thread stacks. Make sure it is not collectible. */
494 {
495 static GC_bool lib_bounds_set = FALSE;
496 ptr_t caller = (ptr_t)__builtin_return_address(0);
497 /* This test does not need to ensure memory visibility, since */
498 /* the bounds will be set when/if we create another thread. */
499 if (!EXPECT(lib_bounds_set, TRUE)) {
500 GC_init_lib_bounds();
501 lib_bounds_set = TRUE;
502 }
503 if (((word)caller >= (word)GC_libpthread_start
504 && (word)caller < (word)GC_libpthread_end)
505 || ((word)caller >= (word)GC_libld_start
506 && (word)caller < (word)GC_libld_end))
507 return GC_generic_malloc_uncollectable(n * lb, UNCOLLECTABLE);
508 /* The two ranges are actually usually adjacent, so there may */
509 /* be a way to speed this up. */
510 }
511 # endif
512 return (void *)REDIRECT_MALLOC_F(n * lb);
513 }
514
515 # ifndef strdup
strdup(const char * s)516 char *strdup(const char *s)
517 {
518 size_t lb = strlen(s) + 1;
519 char *result = (char *)REDIRECT_MALLOC_F(lb);
520 if (result == 0) {
521 errno = ENOMEM;
522 return 0;
523 }
524 BCOPY(s, result, lb);
525 return result;
526 }
527 # endif /* !defined(strdup) */
528 /* If strdup is macro defined, we assume that it actually calls malloc, */
529 /* and thus the right thing will happen even without overriding it. */
530 /* This seems to be true on most Linux systems. */
531
532 # ifndef strndup
533 /* This is similar to strdup(). */
strndup(const char * str,size_t size)534 char *strndup(const char *str, size_t size)
535 {
536 char *copy;
537 size_t len = strlen(str);
538 if (len > size)
539 len = size;
540 copy = (char *)REDIRECT_MALLOC_F(len + 1);
541 if (copy == NULL) {
542 errno = ENOMEM;
543 return NULL;
544 }
545 if (EXPECT(len > 0, TRUE))
546 BCOPY(str, copy, len);
547 copy[len] = '\0';
548 return copy;
549 }
550 # endif /* !strndup */
551
552 # undef GC_debug_malloc_replacement
553
554 #endif /* REDIRECT_MALLOC */
555
556 /* Explicitly deallocate an object p. */
GC_free(void * p)557 GC_API void GC_CALL GC_free(void * p)
558 {
559 struct hblk *h;
560 hdr *hhdr;
561 size_t sz; /* In bytes */
562 size_t ngranules; /* sz in granules */
563 int knd;
564 struct obj_kind * ok;
565 DCL_LOCK_STATE;
566
567 if (p == 0) return;
568 /* Required by ANSI. It's not my fault ... */
569 # ifdef LOG_ALLOCS
570 GC_log_printf("GC_free(%p) after GC #%lu\n",
571 p, (unsigned long)GC_gc_no);
572 # endif
573 h = HBLKPTR(p);
574 hhdr = HDR(h);
575 # if defined(REDIRECT_MALLOC) && \
576 ((defined(NEED_CALLINFO) && defined(GC_HAVE_BUILTIN_BACKTRACE)) \
577 || defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
578 || defined(MSWIN32))
579 /* This might be called indirectly by GC_print_callers to free */
580 /* the result of backtrace_symbols. */
581 /* For Solaris, we have to redirect malloc calls during */
582 /* initialization. For the others, this seems to happen */
583 /* implicitly. */
584 /* Don't try to deallocate that memory. */
585 if (0 == hhdr) return;
586 # endif
587 GC_ASSERT(GC_base(p) == p);
588 sz = (size_t)hhdr->hb_sz;
589 ngranules = BYTES_TO_GRANULES(sz);
590 knd = hhdr -> hb_obj_kind;
591 ok = &GC_obj_kinds[knd];
592 if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
593 void **flh;
594
595 LOCK();
596 GC_bytes_freed += sz;
597 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
598 /* Its unnecessary to clear the mark bit. If the */
599 /* object is reallocated, it doesn't matter. O.w. the */
600 /* collector will do it, since it's on a free list. */
601 if (ok -> ok_init && EXPECT(sz > sizeof(word), TRUE)) {
602 BZERO((word *)p + 1, sz-sizeof(word));
603 }
604 flh = &(ok -> ok_freelist[ngranules]);
605 obj_link(p) = *flh;
606 *flh = (ptr_t)p;
607 UNLOCK();
608 } else {
609 size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
610
611 LOCK();
612 GC_bytes_freed += sz;
613 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
614 if (nblocks > 1) {
615 GC_large_allocd_bytes -= nblocks * HBLKSIZE;
616 }
617 GC_freehblk(h);
618 UNLOCK();
619 }
620 }
621
622 /* Explicitly deallocate an object p when we already hold lock. */
623 /* Only used for internally allocated objects, so we can take some */
624 /* shortcuts. */
625 #ifdef THREADS
GC_free_inner(void * p)626 GC_INNER void GC_free_inner(void * p)
627 {
628 struct hblk *h;
629 hdr *hhdr;
630 size_t sz; /* bytes */
631 size_t ngranules; /* sz in granules */
632 int knd;
633 struct obj_kind * ok;
634
635 h = HBLKPTR(p);
636 hhdr = HDR(h);
637 knd = hhdr -> hb_obj_kind;
638 sz = (size_t)hhdr->hb_sz;
639 ngranules = BYTES_TO_GRANULES(sz);
640 ok = &GC_obj_kinds[knd];
641 if (ngranules <= MAXOBJGRANULES) {
642 void ** flh;
643
644 GC_bytes_freed += sz;
645 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
646 if (ok -> ok_init && EXPECT(sz > sizeof(word), TRUE)) {
647 BZERO((word *)p + 1, sz-sizeof(word));
648 }
649 flh = &(ok -> ok_freelist[ngranules]);
650 obj_link(p) = *flh;
651 *flh = (ptr_t)p;
652 } else {
653 size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
654 GC_bytes_freed += sz;
655 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
656 if (nblocks > 1) {
657 GC_large_allocd_bytes -= nblocks * HBLKSIZE;
658 }
659 GC_freehblk(h);
660 }
661 }
662 #endif /* THREADS */
663
664 #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
665 # define REDIRECT_FREE GC_free
666 #endif
667
668 #if defined(REDIRECT_FREE) && !defined(REDIRECT_MALLOC_IN_HEADER)
669
670 # if defined(CPPCHECK)
671 # define REDIRECT_FREE_F GC_free /* e.g. */
672 # else
673 # define REDIRECT_FREE_F REDIRECT_FREE
674 # endif
675
free(void * p)676 void free(void * p)
677 {
678 # ifndef IGNORE_FREE
679 # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
680 /* Don't bother with initialization checks. If nothing */
681 /* has been initialized, the check fails, and that's safe, */
682 /* since we have not allocated uncollectible objects neither. */
683 ptr_t caller = (ptr_t)__builtin_return_address(0);
684 /* This test does not need to ensure memory visibility, since */
685 /* the bounds will be set when/if we create another thread. */
686 if (((word)caller >= (word)GC_libpthread_start
687 && (word)caller < (word)GC_libpthread_end)
688 || ((word)caller >= (word)GC_libld_start
689 && (word)caller < (word)GC_libld_end)) {
690 GC_free(p);
691 return;
692 }
693 # endif
694 REDIRECT_FREE_F(p);
695 # endif
696 }
697 #endif /* REDIRECT_FREE */
698