1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
5 *
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 *
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
14 */
15
16 #include <stdio.h>
17 #include <string.h>
18 #include <errno.h>
19 #include "private/gc_priv.h"
20
21 extern void * GC_clear_stack(void *); /* in misc.c, behaves like identity */
22 void GC_extend_size_map(size_t); /* in misc.c. */
23
24 /* Allocate reclaim list for kind: */
25 /* Return TRUE on success */
GC_alloc_reclaim_list(struct obj_kind * kind)26 GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
27 {
28 struct hblk ** result = (struct hblk **)
29 GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
30 if (result == 0) return(FALSE);
31 BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
32 kind -> ok_reclaim_list = result;
33 return(TRUE);
34 }
35
36 /* Allocate a large block of size lb bytes. */
37 /* The block is not cleared. */
38 /* Flags is 0 or IGNORE_OFF_PAGE. */
39 /* We hold the allocation lock. */
40 /* EXTRA_BYTES were already added to lb. */
GC_alloc_large(size_t lb,int k,unsigned flags)41 ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
42 {
43 struct hblk * h;
44 word n_blocks;
45 ptr_t result;
46
47 /* Round up to a multiple of a granule. */
48 lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
49 n_blocks = OBJ_SZ_TO_BLOCKS(lb);
50 if (!GC_is_initialized) GC_init_inner();
51 /* Do our share of marking work */
52 if(GC_incremental && !GC_dont_gc)
53 GC_collect_a_little_inner((int)n_blocks);
54 h = GC_allochblk(lb, k, flags);
55 # ifdef USE_MUNMAP
56 if (0 == h) {
57 GC_merge_unmapped();
58 h = GC_allochblk(lb, k, flags);
59 }
60 # endif
61 while (0 == h && GC_collect_or_expand(n_blocks, (flags != 0))) {
62 h = GC_allochblk(lb, k, flags);
63 }
64 if (h == 0) {
65 result = 0;
66 } else {
67 size_t total_bytes = n_blocks * HBLKSIZE;
68 if (n_blocks > 1) {
69 GC_large_allocd_bytes += total_bytes;
70 if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
71 GC_max_large_allocd_bytes = GC_large_allocd_bytes;
72 }
73 result = h -> hb_body;
74 }
75 return result;
76 }
77
78
79 /* Allocate a large block of size lb bytes. Clear if appropriate. */
80 /* We hold the allocation lock. */
81 /* EXTRA_BYTES were already added to lb. */
GC_alloc_large_and_clear(size_t lb,int k,unsigned flags)82 ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
83 {
84 ptr_t result = GC_alloc_large(lb, k, flags);
85 word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
86
87 if (0 == result) return 0;
88 if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
89 /* Clear the whole block, in case of GC_realloc call. */
90 BZERO(result, n_blocks * HBLKSIZE);
91 }
92 return result;
93 }
94
95 /* allocate lb bytes for an object of kind k. */
96 /* Should not be used to directly to allocate */
97 /* objects such as STUBBORN objects that */
98 /* require special handling on allocation. */
99 /* First a version that assumes we already */
100 /* hold lock: */
GC_generic_malloc_inner(size_t lb,int k)101 void * GC_generic_malloc_inner(size_t lb, int k)
102 {
103 void *op;
104
105 if(SMALL_OBJ(lb)) {
106 struct obj_kind * kind = GC_obj_kinds + k;
107 size_t lg = GC_size_map[lb];
108 void ** opp = &(kind -> ok_freelist[lg]);
109
110 if( (op = *opp) == 0 ) {
111 if (GC_size_map[lb] == 0) {
112 if (!GC_is_initialized) GC_init_inner();
113 if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
114 return(GC_generic_malloc_inner(lb, k));
115 }
116 if (kind -> ok_reclaim_list == 0) {
117 if (!GC_alloc_reclaim_list(kind)) goto out;
118 }
119 op = GC_allocobj(lg, k);
120 if (op == 0) goto out;
121 }
122 *opp = obj_link(op);
123 obj_link(op) = 0;
124 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
125 } else {
126 op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
127 GC_bytes_allocd += lb;
128 }
129
130 out:
131 return op;
132 }
133
134 /* Allocate a composite object of size n bytes. The caller guarantees */
135 /* that pointers past the first page are not relevant. Caller holds */
136 /* allocation lock. */
GC_generic_malloc_inner_ignore_off_page(size_t lb,int k)137 void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
138 {
139 word lb_adjusted;
140 void * op;
141
142 if (lb <= HBLKSIZE)
143 return(GC_generic_malloc_inner(lb, k));
144 lb_adjusted = ADD_SLOP(lb);
145 op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
146 GC_bytes_allocd += lb_adjusted;
147 return op;
148 }
149
GC_generic_malloc(size_t lb,int k)150 void * GC_generic_malloc(size_t lb, int k)
151 {
152 void * result;
153 DCL_LOCK_STATE;
154
155 if (GC_have_errors) GC_print_all_errors();
156 GC_INVOKE_FINALIZERS();
157 if (SMALL_OBJ(lb)) {
158 LOCK();
159 result = GC_generic_malloc_inner((word)lb, k);
160 UNLOCK();
161 } else {
162 size_t lw;
163 size_t lb_rounded;
164 word n_blocks;
165 GC_bool init;
166 lw = ROUNDED_UP_WORDS(lb);
167 lb_rounded = WORDS_TO_BYTES(lw);
168 n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
169 init = GC_obj_kinds[k].ok_init;
170 LOCK();
171 result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
172 if (0 != result) {
173 if (GC_debugging_started) {
174 BZERO(result, n_blocks * HBLKSIZE);
175 } else {
176 # ifdef THREADS
177 /* Clear any memory that might be used for GC descriptors */
178 /* before we release the lock. */
179 ((word *)result)[0] = 0;
180 ((word *)result)[1] = 0;
181 ((word *)result)[lw-1] = 0;
182 ((word *)result)[lw-2] = 0;
183 # endif
184 }
185 }
186 GC_bytes_allocd += lb_rounded;
187 UNLOCK();
188 if (init && !GC_debugging_started && 0 != result) {
189 BZERO(result, n_blocks * HBLKSIZE);
190 }
191 }
192 if (0 == result) {
193 return((*GC_oom_fn)(lb));
194 } else {
195 return(result);
196 }
197 }
198
199
200 #define GENERAL_MALLOC(lb,k) \
201 GC_clear_stack(GC_generic_malloc(lb, k))
202 /* We make the GC_clear_stack_call a tail call, hoping to get more of */
203 /* the stack. */
204
205 /* Allocate lb bytes of atomic (pointerfree) data */
206 #ifdef THREAD_LOCAL_ALLOC
GC_core_malloc_atomic(size_t lb)207 void * GC_core_malloc_atomic(size_t lb)
208 #else
209 void * GC_malloc_atomic(size_t lb)
210 #endif
211 {
212 void *op;
213 void ** opp;
214 size_t lg;
215 DCL_LOCK_STATE;
216
217 if(SMALL_OBJ(lb)) {
218 lg = GC_size_map[lb];
219 opp = &(GC_aobjfreelist[lg]);
220 LOCK();
221 if( EXPECT((op = *opp) == 0, 0) ) {
222 UNLOCK();
223 return(GENERAL_MALLOC((word)lb, PTRFREE));
224 }
225 *opp = obj_link(op);
226 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
227 UNLOCK();
228 return((void *) op);
229 } else {
230 return(GENERAL_MALLOC((word)lb, PTRFREE));
231 }
232 }
233
234 /* provide a version of strdup() that uses the collector to allocate the
235 copy of the string */
236 # ifdef __STDC__
GC_strdup(const char * s)237 char *GC_strdup(const char *s)
238 # else
239 char *GC_strdup(s)
240 char *s;
241 #endif
242 {
243 char *copy;
244
245 if (s == NULL) return NULL;
246 if ((copy = GC_malloc_atomic(strlen(s) + 1)) == NULL) {
247 errno = ENOMEM;
248 return NULL;
249 }
250 strcpy(copy, s);
251 return copy;
252 }
253
254 /* Allocate lb bytes of composite (pointerful) data */
255 #ifdef THREAD_LOCAL_ALLOC
GC_core_malloc(size_t lb)256 void * GC_core_malloc(size_t lb)
257 #else
258 void * GC_malloc(size_t lb)
259 #endif
260 {
261 void *op;
262 void **opp;
263 size_t lg;
264 DCL_LOCK_STATE;
265
266 if(SMALL_OBJ(lb)) {
267 lg = GC_size_map[lb];
268 opp = (void **)&(GC_objfreelist[lg]);
269 LOCK();
270 if( EXPECT((op = *opp) == 0, 0) ) {
271 UNLOCK();
272 return(GENERAL_MALLOC((word)lb, NORMAL));
273 }
274 /* See above comment on signals. */
275 GC_ASSERT(0 == obj_link(op)
276 || (word)obj_link(op)
277 <= (word)GC_greatest_plausible_heap_addr
278 && (word)obj_link(op)
279 >= (word)GC_least_plausible_heap_addr);
280 *opp = obj_link(op);
281 obj_link(op) = 0;
282 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
283 UNLOCK();
284 return op;
285 } else {
286 return(GENERAL_MALLOC(lb, NORMAL));
287 }
288 }
289
290 # ifdef REDIRECT_MALLOC
291
292 /* Avoid unnecessary nested procedure calls here, by #defining some */
293 /* malloc replacements. Otherwise we end up saving a */
294 /* meaningless return address in the object. It also speeds things up, */
295 /* but it is admittedly quite ugly. */
296 # ifdef GC_ADD_CALLER
297 # define RA GC_RETURN_ADDR,
298 # else
299 # define RA
300 # endif
301 # define GC_debug_malloc_replacement(lb) \
302 GC_debug_malloc(lb, RA "unknown", 0)
303
malloc(size_t lb)304 void * malloc(size_t lb)
305 {
306 /* It might help to manually inline the GC_malloc call here. */
307 /* But any decent compiler should reduce the extra procedure call */
308 /* to at most a jump instruction in this case. */
309 # if defined(I386) && defined(GC_SOLARIS_THREADS)
310 /*
311 * Thread initialisation can call malloc before
312 * we're ready for it.
313 * It's not clear that this is enough to help matters.
314 * The thread implementation may well call malloc at other
315 * inopportune times.
316 */
317 if (!GC_is_initialized) return sbrk(lb);
318 # endif /* I386 && GC_SOLARIS_THREADS */
319 return((void *)REDIRECT_MALLOC(lb));
320 }
321
322 #ifdef GC_LINUX_THREADS
323 static ptr_t GC_libpthread_start = 0;
324 static ptr_t GC_libpthread_end = 0;
325 static ptr_t GC_libld_start = 0;
326 static ptr_t GC_libld_end = 0;
327 extern GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp);
328 /* From os_dep.c */
329
GC_init_lib_bounds(void)330 void GC_init_lib_bounds(void)
331 {
332 if (GC_libpthread_start != 0) return;
333 if (!GC_text_mapping("/lib/tls/libpthread-",
334 &GC_libpthread_start, &GC_libpthread_end)
335 && !GC_text_mapping("/lib/libpthread-",
336 &GC_libpthread_start, &GC_libpthread_end)) {
337 WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
338 /* This might still work with some versions of libpthread, */
339 /* so we don't abort. Perhaps we should. */
340 /* Generate message only once: */
341 GC_libpthread_start = (ptr_t)1;
342 }
343 if (!GC_text_mapping("/lib/ld-", &GC_libld_start, &GC_libld_end)) {
344 WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
345 }
346 }
347 #endif
348
calloc(size_t n,size_t lb)349 void * calloc(size_t n, size_t lb)
350 {
351 # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
352 /* libpthread allocated some memory that is only pointed to by */
353 /* mmapped thread stacks. Make sure it's not collectable. */
354 {
355 static GC_bool lib_bounds_set = FALSE;
356 ptr_t caller = (ptr_t)__builtin_return_address(0);
357 /* This test does not need to ensure memory visibility, since */
358 /* the bounds will be set when/if we create another thread. */
359 if (!lib_bounds_set) {
360 GC_init_lib_bounds();
361 lib_bounds_set = TRUE;
362 }
363 if (caller >= GC_libpthread_start && caller < GC_libpthread_end
364 || (caller >= GC_libld_start && caller < GC_libld_end))
365 return GC_malloc_uncollectable(n*lb);
366 /* The two ranges are actually usually adjacent, so there may */
367 /* be a way to speed this up. */
368 }
369 # endif
370 return((void *)REDIRECT_MALLOC(n*lb));
371 }
372
373 #ifndef strdup
374 # include <string.h>
strdup(const char * s)375 char *strdup(const char *s)
376 {
377 size_t len = strlen(s) + 1;
378 char * result = ((char *)REDIRECT_MALLOC(len+1));
379 if (result == 0) {
380 errno = ENOMEM;
381 return 0;
382 }
383 BCOPY(s, result, len+1);
384 return result;
385 }
386 #endif /* !defined(strdup) */
387 /* If strdup is macro defined, we assume that it actually calls malloc, */
388 /* and thus the right thing will happen even without overriding it. */
389 /* This seems to be true on most Linux systems. */
390
391 #undef GC_debug_malloc_replacement
392
393 # endif /* REDIRECT_MALLOC */
394
395 /* Explicitly deallocate an object p. */
GC_free(void * p)396 void GC_free(void * p)
397 {
398 struct hblk *h;
399 hdr *hhdr;
400 size_t sz; /* In bytes */
401 size_t ngranules; /* sz in granules */
402 void **flh;
403 int knd;
404 struct obj_kind * ok;
405 DCL_LOCK_STATE;
406
407 if (p == 0) return;
408 /* Required by ANSI. It's not my fault ... */
409 h = HBLKPTR(p);
410 hhdr = HDR(h);
411 sz = hhdr -> hb_sz;
412 ngranules = BYTES_TO_GRANULES(sz);
413 GC_ASSERT(GC_base(p) == p);
414 # if defined(REDIRECT_MALLOC) && \
415 (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
416 || defined(MSWIN32))
417 /* For Solaris, we have to redirect malloc calls during */
418 /* initialization. For the others, this seems to happen */
419 /* implicitly. */
420 /* Don't try to deallocate that memory. */
421 if (0 == hhdr) return;
422 # endif
423 knd = hhdr -> hb_obj_kind;
424 ok = &GC_obj_kinds[knd];
425 if (EXPECT((ngranules <= MAXOBJGRANULES), 1)) {
426 LOCK();
427 GC_bytes_freed += sz;
428 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
429 /* Its unnecessary to clear the mark bit. If the */
430 /* object is reallocated, it doesn't matter. O.w. the */
431 /* collector will do it, since it's on a free list. */
432 if (ok -> ok_init) {
433 BZERO((word *)p + 1, sz-sizeof(word));
434 }
435 flh = &(ok -> ok_freelist[ngranules]);
436 obj_link(p) = *flh;
437 *flh = (ptr_t)p;
438 UNLOCK();
439 } else {
440 LOCK();
441 GC_bytes_freed += sz;
442 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
443 GC_freehblk(h);
444 UNLOCK();
445 }
446 }
447
448 /* Explicitly deallocate an object p when we already hold lock. */
449 /* Only used for internally allocated objects, so we can take some */
450 /* shortcuts. */
451 #ifdef THREADS
GC_free_inner(void * p)452 void GC_free_inner(void * p)
453 {
454 struct hblk *h;
455 hdr *hhdr;
456 size_t sz; /* bytes */
457 size_t ngranules; /* sz in granules */
458 void ** flh;
459 int knd;
460 struct obj_kind * ok;
461 DCL_LOCK_STATE;
462
463 h = HBLKPTR(p);
464 hhdr = HDR(h);
465 knd = hhdr -> hb_obj_kind;
466 sz = hhdr -> hb_sz;
467 ngranules = BYTES_TO_GRANULES(sz);
468 ok = &GC_obj_kinds[knd];
469 if (ngranules <= MAXOBJGRANULES) {
470 GC_bytes_freed += sz;
471 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
472 if (ok -> ok_init) {
473 BZERO((word *)p + 1, sz-sizeof(word));
474 }
475 flh = &(ok -> ok_freelist[ngranules]);
476 obj_link(p) = *flh;
477 *flh = (ptr_t)p;
478 } else {
479 GC_bytes_freed += sz;
480 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
481 GC_freehblk(h);
482 }
483 }
484 #endif /* THREADS */
485
486 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
487 # define REDIRECT_FREE GC_free
488 # endif
489 # ifdef REDIRECT_FREE
free(void * p)490 void free(void * p)
491 {
492 # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
493 {
494 /* Don't bother with initialization checks. If nothing */
495 /* has been initialized, the check fails, and that's safe, */
496 /* since we haven't allocated uncollectable objects either. */
497 ptr_t caller = (ptr_t)__builtin_return_address(0);
498 /* This test does not need to ensure memory visibility, since */
499 /* the bounds will be set when/if we create another thread. */
500 if (caller >= GC_libpthread_start && caller > GC_libpthread_end) {
501 GC_free(p);
502 return;
503 }
504 }
505 # endif
506 # ifndef IGNORE_FREE
507 REDIRECT_FREE(p);
508 # endif
509 }
510 # endif /* REDIRECT_MALLOC */
511