1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17 #include "private/gc_priv.h"
18 #include "gc_inline.h" /* for GC_malloc_kind */
19
20 /*
21 * These are extra allocation routines which are likely to be less
22 * frequently used than those in malloc.c. They are separate in the
23 * hope that the .o file will be excluded from statically linked
24 * executables. We should probably break this up further.
25 */
26
27 #include <stdio.h>
28 #include <string.h>
29
30 #ifdef MSWINCE
31 # ifndef WIN32_LEAN_AND_MEAN
32 # define WIN32_LEAN_AND_MEAN 1
33 # endif
34 # define NOSERVICE
35 # include <windows.h>
36 #else
37 # include <errno.h>
38 #endif
39
40 /* Some externally visible but unadvertised variables to allow access to */
41 /* free lists from inlined allocators without including gc_priv.h */
42 /* or introducing dependencies on internal data structure layouts. */
43 #include "gc_alloc_ptrs.h"
44 void ** const GC_objfreelist_ptr = GC_objfreelist;
45 void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
46 void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
47 # ifdef GC_ATOMIC_UNCOLLECTABLE
48 void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
49 # endif
50
GC_get_kind_and_size(const void * p,size_t * psize)51 GC_API int GC_CALL GC_get_kind_and_size(const void * p, size_t * psize)
52 {
53 hdr * hhdr = HDR(p);
54
55 if (psize != NULL) {
56 *psize = (size_t)hhdr->hb_sz;
57 }
58 return hhdr -> hb_obj_kind;
59 }
60
GC_generic_or_special_malloc(size_t lb,int knd)61 GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_or_special_malloc(size_t lb,
62 int knd)
63 {
64 switch(knd) {
65 case PTRFREE:
66 case NORMAL:
67 return GC_malloc_kind(lb, knd);
68 case UNCOLLECTABLE:
69 # ifdef GC_ATOMIC_UNCOLLECTABLE
70 case AUNCOLLECTABLE:
71 # endif
72 return GC_generic_malloc_uncollectable(lb, knd);
73 default:
74 return GC_generic_malloc(lb, knd);
75 }
76 }
77
78 /* Change the size of the block pointed to by p to contain at least */
79 /* lb bytes. The object may be (and quite likely will be) moved. */
80 /* The kind (e.g. atomic) is the same as that of the old. */
81 /* Shrinking of large blocks is not implemented well. */
GC_realloc(void * p,size_t lb)82 GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
83 {
84 struct hblk * h;
85 hdr * hhdr;
86 void * result;
87 size_t sz; /* Current size in bytes */
88 size_t orig_sz; /* Original sz in bytes */
89 int obj_kind;
90
91 if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
92 if (0 == lb) /* and p != NULL */ {
93 # ifndef IGNORE_FREE
94 GC_free(p);
95 # endif
96 return NULL;
97 }
98 h = HBLKPTR(p);
99 hhdr = HDR(h);
100 sz = (size_t)hhdr->hb_sz;
101 obj_kind = hhdr -> hb_obj_kind;
102 orig_sz = sz;
103
104 if (sz > MAXOBJBYTES) {
105 /* Round it up to the next whole heap block */
106 word descr = GC_obj_kinds[obj_kind].ok_descriptor;
107
108 sz = (sz + HBLKSIZE-1) & ~HBLKMASK;
109 if (GC_obj_kinds[obj_kind].ok_relocate_descr)
110 descr += sz;
111 /* GC_realloc might be changing the block size while */
112 /* GC_reclaim_block or GC_clear_hdr_marks is examining it. */
113 /* The change to the size field is benign, in that GC_reclaim */
114 /* (and GC_clear_hdr_marks) would work correctly with either */
115 /* value, since we are not changing the number of objects in */
116 /* the block. But seeing a half-updated value (though unlikely */
117 /* to occur in practice) could be probably bad. */
118 /* Using unordered atomic accesses on the size and hb_descr */
119 /* fields would solve the issue. (The alternate solution might */
120 /* be to initially overallocate large objects, so we do not */
121 /* have to adjust the size in GC_realloc, if they still fit. */
122 /* But that is probably more expensive, since we may end up */
123 /* scanning a bunch of zeros during GC.) */
124 # ifdef AO_HAVE_store
125 GC_STATIC_ASSERT(sizeof(hhdr->hb_sz) == sizeof(AO_t));
126 AO_store((volatile AO_t *)&hhdr->hb_sz, (AO_t)sz);
127 AO_store((volatile AO_t *)&hhdr->hb_descr, (AO_t)descr);
128 # else
129 {
130 DCL_LOCK_STATE;
131
132 LOCK();
133 hhdr -> hb_sz = sz;
134 hhdr -> hb_descr = descr;
135 UNLOCK();
136 }
137 # endif
138
139 # ifdef MARK_BIT_PER_OBJ
140 GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
141 # endif
142 # ifdef MARK_BIT_PER_GRANULE
143 GC_ASSERT((hhdr -> hb_flags & LARGE_BLOCK) != 0
144 && hhdr -> hb_map[ANY_INDEX] == 1);
145 # endif
146 if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
147 /* Extra area is already cleared by GC_alloc_large_and_clear. */
148 }
149 if (ADD_SLOP(lb) <= sz) {
150 if (lb >= (sz >> 1)) {
151 if (orig_sz > lb) {
152 /* Clear unneeded part of object to avoid bogus pointer */
153 /* tracing. */
154 BZERO(((ptr_t)p) + lb, orig_sz - lb);
155 }
156 return(p);
157 }
158 /* shrink */
159 sz = lb;
160 }
161 result = GC_generic_or_special_malloc((word)lb, obj_kind);
162 if (result != NULL) {
163 /* In case of shrink, it could also return original object. */
164 /* But this gives the client warning of imminent disaster. */
165 BCOPY(p, result, sz);
166 # ifndef IGNORE_FREE
167 GC_free(p);
168 # endif
169 }
170 return result;
171 }
172
173 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
174 # define REDIRECT_REALLOC GC_realloc
175 # endif
176
177 # ifdef REDIRECT_REALLOC
178
179 /* As with malloc, avoid two levels of extra calls here. */
180 # define GC_debug_realloc_replacement(p, lb) \
181 GC_debug_realloc(p, lb, GC_DBG_EXTRAS)
182
183 # if !defined(REDIRECT_MALLOC_IN_HEADER)
realloc(void * p,size_t lb)184 void * realloc(void * p, size_t lb)
185 {
186 return(REDIRECT_REALLOC(p, lb));
187 }
188 # endif
189
190 # undef GC_debug_realloc_replacement
191 # endif /* REDIRECT_REALLOC */
192
193 /* Allocate memory such that only pointers to near the */
194 /* beginning of the object are considered. */
195 /* We avoid holding allocation lock while we clear the memory. */
196 GC_API GC_ATTR_MALLOC void * GC_CALL
GC_generic_malloc_ignore_off_page(size_t lb,int k)197 GC_generic_malloc_ignore_off_page(size_t lb, int k)
198 {
199 void *result;
200 size_t lg;
201 size_t lb_rounded;
202 word n_blocks;
203 GC_bool init;
204 DCL_LOCK_STATE;
205
206 if (SMALL_OBJ(lb))
207 return GC_generic_malloc(lb, k);
208 GC_ASSERT(k < MAXOBJKINDS);
209 lg = ROUNDED_UP_GRANULES(lb);
210 lb_rounded = GRANULES_TO_BYTES(lg);
211 n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
212 init = GC_obj_kinds[k].ok_init;
213 if (EXPECT(GC_have_errors, FALSE))
214 GC_print_all_errors();
215 GC_INVOKE_FINALIZERS();
216 GC_DBG_COLLECT_AT_MALLOC(lb);
217 LOCK();
218 result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
219 if (NULL == result) {
220 GC_oom_func oom_fn = GC_oom_fn;
221 UNLOCK();
222 return (*oom_fn)(lb);
223 }
224
225 if (GC_debugging_started) {
226 BZERO(result, n_blocks * HBLKSIZE);
227 } else {
228 # ifdef THREADS
229 /* Clear any memory that might be used for GC descriptors */
230 /* before we release the lock. */
231 ((word *)result)[0] = 0;
232 ((word *)result)[1] = 0;
233 ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
234 ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
235 # endif
236 }
237 GC_bytes_allocd += lb_rounded;
238 UNLOCK();
239 if (init && !GC_debugging_started) {
240 BZERO(result, n_blocks * HBLKSIZE);
241 }
242 return(result);
243 }
244
GC_malloc_ignore_off_page(size_t lb)245 GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
246 {
247 return GC_generic_malloc_ignore_off_page(lb, NORMAL);
248 }
249
250 GC_API GC_ATTR_MALLOC void * GC_CALL
GC_malloc_atomic_ignore_off_page(size_t lb)251 GC_malloc_atomic_ignore_off_page(size_t lb)
252 {
253 return GC_generic_malloc_ignore_off_page(lb, PTRFREE);
254 }
255
256 /* Increment GC_bytes_allocd from code that doesn't have direct access */
257 /* to GC_arrays. */
GC_incr_bytes_allocd(size_t n)258 GC_API void GC_CALL GC_incr_bytes_allocd(size_t n)
259 {
260 GC_bytes_allocd += n;
261 }
262
263 /* The same for GC_bytes_freed. */
GC_incr_bytes_freed(size_t n)264 GC_API void GC_CALL GC_incr_bytes_freed(size_t n)
265 {
266 GC_bytes_freed += n;
267 }
268
GC_get_expl_freed_bytes_since_gc(void)269 GC_API size_t GC_CALL GC_get_expl_freed_bytes_since_gc(void)
270 {
271 return (size_t)GC_bytes_freed;
272 }
273
274 # ifdef PARALLEL_MARK
275 STATIC volatile AO_t GC_bytes_allocd_tmp = 0;
276 /* Number of bytes of memory allocated since */
277 /* we released the GC lock. Instead of */
278 /* reacquiring the GC lock just to add this in, */
279 /* we add it in the next time we reacquire */
280 /* the lock. (Atomically adding it doesn't */
281 /* work, since we would have to atomically */
282 /* update it in GC_malloc, which is too */
283 /* expensive.) */
284 # endif /* PARALLEL_MARK */
285
286 /* Return a list of 1 or more objects of the indicated size, linked */
287 /* through the first word in the object. This has the advantage that */
288 /* it acquires the allocation lock only once, and may greatly reduce */
289 /* time wasted contending for the allocation lock. Typical usage would */
290 /* be in a thread that requires many items of the same size. It would */
291 /* keep its own free list in thread-local storage, and call */
292 /* GC_malloc_many or friends to replenish it. (We do not round up */
293 /* object sizes, since a call indicates the intention to consume many */
294 /* objects of exactly this size.) */
295 /* We assume that the size is a multiple of GRANULE_BYTES. */
296 /* We return the free-list by assigning it to *result, since it is */
297 /* not safe to return, e.g. a linked list of pointer-free objects, */
298 /* since the collector would not retain the entire list if it were */
299 /* invoked just as we were returning. */
300 /* Note that the client should usually clear the link field. */
GC_generic_malloc_many(size_t lb,int k,void ** result)301 GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
302 {
303 void *op;
304 void *p;
305 void **opp;
306 size_t lw; /* Length in words. */
307 size_t lg; /* Length in granules. */
308 signed_word my_bytes_allocd = 0;
309 struct obj_kind * ok = &(GC_obj_kinds[k]);
310 struct hblk ** rlh;
311 DCL_LOCK_STATE;
312
313 GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
314 /* Currently a single object is always allocated if manual VDB. */
315 /* TODO: GC_dirty should be called for each linked object (but */
316 /* the last one) to support multiple objects allocation. */
317 if (!SMALL_OBJ(lb) || GC_manual_vdb) {
318 op = GC_generic_malloc(lb, k);
319 if (EXPECT(0 != op, TRUE))
320 obj_link(op) = 0;
321 *result = op;
322 # ifndef GC_DISABLE_INCREMENTAL
323 if (GC_manual_vdb && GC_is_heap_ptr(result)) {
324 GC_dirty_inner(result);
325 REACHABLE_AFTER_DIRTY(op);
326 }
327 # endif
328 return;
329 }
330 GC_ASSERT(k < MAXOBJKINDS);
331 lw = BYTES_TO_WORDS(lb);
332 lg = BYTES_TO_GRANULES(lb);
333 if (EXPECT(GC_have_errors, FALSE))
334 GC_print_all_errors();
335 GC_INVOKE_FINALIZERS();
336 GC_DBG_COLLECT_AT_MALLOC(lb);
337 if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
338 LOCK();
339 /* Do our share of marking work */
340 if (GC_incremental && !GC_dont_gc) {
341 ENTER_GC();
342 GC_collect_a_little_inner(1);
343 EXIT_GC();
344 }
345 /* First see if we can reclaim a page of objects waiting to be */
346 /* reclaimed. */
347 rlh = ok -> ok_reclaim_list;
348 if (rlh != NULL) {
349 struct hblk * hbp;
350 hdr * hhdr;
351
352 while ((hbp = rlh[lg]) != NULL) {
353 hhdr = HDR(hbp);
354 rlh[lg] = hhdr -> hb_next;
355 GC_ASSERT(hhdr -> hb_sz == lb);
356 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
357 # ifdef PARALLEL_MARK
358 if (GC_parallel) {
359 signed_word my_bytes_allocd_tmp =
360 (signed_word)AO_load(&GC_bytes_allocd_tmp);
361 GC_ASSERT(my_bytes_allocd_tmp >= 0);
362 /* We only decrement it while holding the GC lock. */
363 /* Thus we can't accidentally adjust it down in more */
364 /* than one thread simultaneously. */
365
366 if (my_bytes_allocd_tmp != 0) {
367 (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
368 (AO_t)(-my_bytes_allocd_tmp));
369 GC_bytes_allocd += my_bytes_allocd_tmp;
370 }
371 GC_acquire_mark_lock();
372 ++ GC_fl_builder_count;
373 UNLOCK();
374 GC_release_mark_lock();
375 }
376 # endif
377 op = GC_reclaim_generic(hbp, hhdr, lb,
378 ok -> ok_init, 0, &my_bytes_allocd);
379 if (op != 0) {
380 # ifdef PARALLEL_MARK
381 if (GC_parallel) {
382 *result = op;
383 (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
384 (AO_t)my_bytes_allocd);
385 GC_acquire_mark_lock();
386 -- GC_fl_builder_count;
387 if (GC_fl_builder_count == 0) GC_notify_all_builder();
388 # ifdef THREAD_SANITIZER
389 GC_release_mark_lock();
390 LOCK();
391 GC_bytes_found += my_bytes_allocd;
392 UNLOCK();
393 # else
394 GC_bytes_found += my_bytes_allocd;
395 /* The result may be inaccurate. */
396 GC_release_mark_lock();
397 # endif
398 (void) GC_clear_stack(0);
399 return;
400 }
401 # endif
402 /* We also reclaimed memory, so we need to adjust */
403 /* that count. */
404 GC_bytes_found += my_bytes_allocd;
405 GC_bytes_allocd += my_bytes_allocd;
406 goto out;
407 }
408 # ifdef PARALLEL_MARK
409 if (GC_parallel) {
410 GC_acquire_mark_lock();
411 -- GC_fl_builder_count;
412 if (GC_fl_builder_count == 0) GC_notify_all_builder();
413 GC_release_mark_lock();
414 LOCK();
415 /* GC lock is needed for reclaim list access. We */
416 /* must decrement fl_builder_count before reacquiring */
417 /* the lock. Hopefully this path is rare. */
418
419 rlh = ok -> ok_reclaim_list; /* reload rlh after locking */
420 if (NULL == rlh) break;
421 }
422 # endif
423 }
424 }
425 /* Next try to use prefix of global free list if there is one. */
426 /* We don't refill it, but we need to use it up before allocating */
427 /* a new block ourselves. */
428 opp = &(GC_obj_kinds[k].ok_freelist[lg]);
429 if ( (op = *opp) != 0 ) {
430 *opp = 0;
431 my_bytes_allocd = 0;
432 for (p = op; p != 0; p = obj_link(p)) {
433 my_bytes_allocd += lb;
434 if ((word)my_bytes_allocd >= HBLKSIZE) {
435 *opp = obj_link(p);
436 obj_link(p) = 0;
437 break;
438 }
439 }
440 GC_bytes_allocd += my_bytes_allocd;
441 goto out;
442 }
443 /* Next try to allocate a new block worth of objects of this size. */
444 {
445 struct hblk *h = GC_allochblk(lb, k, 0);
446 if (h != 0) {
447 if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
448 GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
449 # ifdef PARALLEL_MARK
450 if (GC_parallel) {
451 GC_acquire_mark_lock();
452 ++ GC_fl_builder_count;
453 UNLOCK();
454 GC_release_mark_lock();
455
456 op = GC_build_fl(h, lw,
457 (ok -> ok_init || GC_debugging_started), 0);
458
459 *result = op;
460 GC_acquire_mark_lock();
461 -- GC_fl_builder_count;
462 if (GC_fl_builder_count == 0) GC_notify_all_builder();
463 GC_release_mark_lock();
464 (void) GC_clear_stack(0);
465 return;
466 }
467 # endif
468 op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
469 goto out;
470 }
471 }
472
473 /* As a last attempt, try allocating a single object. Note that */
474 /* this may trigger a collection or expand the heap. */
475 op = GC_generic_malloc_inner(lb, k);
476 if (0 != op) obj_link(op) = 0;
477
478 out:
479 *result = op;
480 UNLOCK();
481 (void) GC_clear_stack(0);
482 }
483
484 /* Note that the "atomic" version of this would be unsafe, since the */
485 /* links would not be seen by the collector. */
GC_malloc_many(size_t lb)486 GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_many(size_t lb)
487 {
488 void *result;
489
490 /* Add EXTRA_BYTES and round up to a multiple of a granule. */
491 lb = SIZET_SAT_ADD(lb, EXTRA_BYTES + GRANULE_BYTES - 1)
492 & ~(GRANULE_BYTES - 1);
493
494 GC_generic_malloc_many(lb, NORMAL, &result);
495 return result;
496 }
497
498 #include <limits.h>
499
500 /* Debug version is tricky and currently missing. */
GC_memalign(size_t align,size_t lb)501 GC_API GC_ATTR_MALLOC void * GC_CALL GC_memalign(size_t align, size_t lb)
502 {
503 size_t new_lb;
504 size_t offset;
505 ptr_t result;
506
507 if (align <= GRANULE_BYTES) return GC_malloc(lb);
508 if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
509 if (align > HBLKSIZE) {
510 return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */
511 }
512 return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
513 /* Will be HBLKSIZE aligned. */
514 }
515 /* We could also try to make sure that the real rounded-up object size */
516 /* is a multiple of align. That would be correct up to HBLKSIZE. */
517 new_lb = SIZET_SAT_ADD(lb, align - 1);
518 result = (ptr_t)GC_malloc(new_lb);
519 /* It is OK not to check result for NULL as in that case */
520 /* GC_memalign returns NULL too since (0 + 0 % align) is 0. */
521 offset = (word)result % align;
522 if (offset != 0) {
523 offset = align - offset;
524 if (!GC_all_interior_pointers) {
525 GC_STATIC_ASSERT(VALID_OFFSET_SZ <= HBLKSIZE);
526 GC_ASSERT(offset < VALID_OFFSET_SZ);
527 GC_register_displacement(offset);
528 }
529 }
530 result += offset;
531 GC_ASSERT((word)result % align == 0);
532 return result;
533 }
534
535 /* This one exists largely to redirect posix_memalign for leaks finding. */
GC_posix_memalign(void ** memptr,size_t align,size_t lb)536 GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
537 {
538 /* Check alignment properly. */
539 size_t align_minus_one = align - 1; /* to workaround a cppcheck warning */
540 if (align < sizeof(void *) || (align_minus_one & align) != 0) {
541 # ifdef MSWINCE
542 return ERROR_INVALID_PARAMETER;
543 # else
544 return EINVAL;
545 # endif
546 }
547
548 if ((*memptr = GC_memalign(align, lb)) == NULL) {
549 # ifdef MSWINCE
550 return ERROR_NOT_ENOUGH_MEMORY;
551 # else
552 return ENOMEM;
553 # endif
554 }
555 return 0;
556 }
557
558 /* provide a version of strdup() that uses the collector to allocate the
559 copy of the string */
GC_strdup(const char * s)560 GC_API GC_ATTR_MALLOC char * GC_CALL GC_strdup(const char *s)
561 {
562 char *copy;
563 size_t lb;
564 if (s == NULL) return NULL;
565 lb = strlen(s) + 1;
566 copy = (char *)GC_malloc_atomic(lb);
567 if (NULL == copy) {
568 # ifndef MSWINCE
569 errno = ENOMEM;
570 # endif
571 return NULL;
572 }
573 BCOPY(s, copy, lb);
574 return copy;
575 }
576
GC_strndup(const char * str,size_t size)577 GC_API GC_ATTR_MALLOC char * GC_CALL GC_strndup(const char *str, size_t size)
578 {
579 char *copy;
580 size_t len = strlen(str); /* str is expected to be non-NULL */
581 if (len > size)
582 len = size;
583 copy = (char *)GC_malloc_atomic(len + 1);
584 if (copy == NULL) {
585 # ifndef MSWINCE
586 errno = ENOMEM;
587 # endif
588 return NULL;
589 }
590 if (EXPECT(len > 0, TRUE))
591 BCOPY(str, copy, len);
592 copy[len] = '\0';
593 return copy;
594 }
595
596 #ifdef GC_REQUIRE_WCSDUP
597 # include <wchar.h> /* for wcslen() */
598
GC_wcsdup(const wchar_t * str)599 GC_API GC_ATTR_MALLOC wchar_t * GC_CALL GC_wcsdup(const wchar_t *str)
600 {
601 size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
602 wchar_t *copy = (wchar_t *)GC_malloc_atomic(lb);
603
604 if (copy == NULL) {
605 # ifndef MSWINCE
606 errno = ENOMEM;
607 # endif
608 return NULL;
609 }
610 BCOPY(str, copy, lb);
611 return copy;
612 }
613 #endif /* GC_REQUIRE_WCSDUP */
614
GC_malloc_stubborn(size_t lb)615 GC_API void * GC_CALL GC_malloc_stubborn(size_t lb)
616 {
617 return GC_malloc(lb);
618 }
619
GC_change_stubborn(const void * p GC_ATTR_UNUSED)620 GC_API void GC_CALL GC_change_stubborn(const void *p GC_ATTR_UNUSED)
621 {
622 /* Empty. */
623 }
624
GC_end_stubborn_change(const void * p)625 GC_API void GC_CALL GC_end_stubborn_change(const void *p)
626 {
627 GC_dirty(p); /* entire object */
628 }
629
GC_ptr_store_and_dirty(void * p,const void * q)630 GC_API void GC_CALL GC_ptr_store_and_dirty(void *p, const void *q)
631 {
632 *(const void **)p = q;
633 GC_dirty(p);
634 REACHABLE_AFTER_DIRTY(q);
635 }
636