1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 *
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 *
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
14 */
15 /* An incomplete test for the garbage collector. */
16 /* Some more obscure entry points are not tested at all. */
17 /* This must be compiled with the same flags used to build the */
18 /* GC. It uses GC internals to allow more precise results */
19 /* checking for some of the tests. */
20
21 # ifdef HAVE_CONFIG_H
22 # include "config.h"
23 # endif
24
25 # undef GC_BUILD
26
27 #if (defined(DBG_HDRS_ALL) || defined(MAKE_BACK_GRAPH)) && !defined(GC_DEBUG)
28 # define GC_DEBUG
29 #endif
30
31 #include "gc.h"
32
33 #ifndef NTHREADS /* Number of additional threads to fork. */
34 # define NTHREADS 5 /* excludes main thread, which also runs a test. */
35 /* Not respected by PCR test. */
36 #endif
37
38 # if defined(mips) && defined(SYSTYPE_BSD43)
39 /* MIPS RISCOS 4 */
40 # else
41 # include <stdlib.h>
42 # endif
43 # include <stdio.h>
44 # if defined(_WIN32_WCE) && !defined(__GNUC__)
45 # include <winbase.h>
46 /* # define assert ASSERT */
47 # else
48 # include <assert.h> /* Not normally used, but handy for debugging. */
49 # endif
50
51 # include "gc_typed.h"
52 # include "private/gc_priv.h" /* For output, locking, MIN_WORDS, */
53 /* some statistics and gcconfig.h. */
54
55 # if defined(MSWIN32) || defined(MSWINCE)
56 # include <windows.h>
57 # endif
58
59 #ifdef GC_PRINT_VERBOSE_STATS
60 # define print_stats VERBOSE
61 # define INIT_PRINT_STATS /* empty */
62 #else
63 /* Use own variable as GC_print_stats might not be exported. */
64 static int print_stats = 0;
65 # ifdef GC_READ_ENV_FILE
66 /* GETENV uses GC internal function in this case. */
67 # define INIT_PRINT_STATS /* empty */
68 # else
69 # define INIT_PRINT_STATS \
70 { \
71 if (0 != GETENV("GC_PRINT_VERBOSE_STATS")) \
72 print_stats = VERBOSE; \
73 else if (0 != GETENV("GC_PRINT_STATS")) \
74 print_stats = 1; \
75 }
76 # endif
77 #endif /* !GC_PRINT_VERBOSE_STATS */
78
79 # ifdef PCR
80 # include "th/PCR_ThCrSec.h"
81 # include "th/PCR_Th.h"
82 # define GC_printf printf
83 # endif
84
85 # if defined(GC_PTHREADS)
86 # include <pthread.h>
87 # endif
88
89 # if (!defined(THREADS) || !defined(HANDLE_FORK) \
90 || (defined(DARWIN) && defined(MPROTECT_VDB) \
91 && !defined(NO_INCREMENTAL) && !defined(MAKE_BACK_GRAPH))) \
92 && !defined(NO_TEST_HANDLE_FORK) && !defined(TEST_HANDLE_FORK) \
93 && !defined(TEST_FORK_WITHOUT_ATFORK)
94 # define NO_TEST_HANDLE_FORK
95 # endif
96
97 # ifndef NO_TEST_HANDLE_FORK
98 # include <unistd.h>
99 # ifdef HANDLE_FORK
100 # define INIT_FORK_SUPPORT GC_set_handle_fork(1)
101 /* Causes abort in GC_init on pthread_atfork failure. */
102 # elif !defined(TEST_FORK_WITHOUT_ATFORK)
103 # define INIT_FORK_SUPPORT GC_set_handle_fork(-1)
104 /* Passing -1 implies fork() should be as well manually */
105 /* surrounded with GC_atfork_prepare/parent/child. */
106 # endif
107 # endif
108
109 # ifndef INIT_FORK_SUPPORT
110 # define INIT_FORK_SUPPORT /* empty */
111 # endif
112
113 # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
114 static CRITICAL_SECTION incr_cs;
115 # endif
116
117 # include <stdarg.h>
118
119 #define CHECH_GCLIB_VERSION \
120 if (GC_get_version() != ((GC_VERSION_MAJOR<<16) \
121 | (GC_VERSION_MINOR<<8) \
122 | GC_VERSION_MICRO)) { \
123 GC_printf("libgc version mismatch\n"); \
124 exit(1); \
125 }
126
127 /* Call GC_INIT only on platforms on which we think we really need it, */
128 /* so that we can test automatic initialization on the rest. */
129 #if defined(CYGWIN32) || defined (AIX) || defined(DARWIN) \
130 || defined(PLATFORM_ANDROID) || defined(THREAD_LOCAL_ALLOC) \
131 || (defined(MSWINCE) && !defined(GC_WINMAIN_REDIRECT))
132 # define GC_OPT_INIT GC_INIT()
133 #else
134 # define GC_OPT_INIT /* empty */
135 #endif
136
137 #define GC_COND_INIT() \
138 INIT_FORK_SUPPORT; GC_OPT_INIT; CHECH_GCLIB_VERSION; INIT_PRINT_STATS
139
140 #define CHECK_OUT_OF_MEMORY(p) \
141 if ((p) == NULL) { \
142 GC_printf("Out of memory\n"); \
143 exit(1); \
144 }
145
146 /* Allocation Statistics. Incremented without synchronization. */
147 /* FIXME: We should be using synchronization. */
148 int stubborn_count = 0;
149 int uncollectable_count = 0;
150 int collectable_count = 0;
151 int atomic_count = 0;
152 int realloc_count = 0;
153
154 #if defined(GC_AMIGA_FASTALLOC) && defined(AMIGA)
155
156 void GC_amiga_free_all_mem(void);
Amiga_Fail(void)157 void Amiga_Fail(void){GC_amiga_free_all_mem();abort();}
158 # define FAIL Amiga_Fail()
GC_amiga_gctest_malloc_explicitly_typed(size_t lb,GC_descr d)159 void *GC_amiga_gctest_malloc_explicitly_typed(size_t lb, GC_descr d){
160 void *ret=GC_malloc_explicitly_typed(lb,d);
161 if(ret==NULL){
162 GC_gcollect();
163 ret=GC_malloc_explicitly_typed(lb,d);
164 if(ret==NULL){
165 GC_printf("Out of memory, (typed allocations are not directly "
166 "supported with the GC_AMIGA_FASTALLOC option.)\n");
167 FAIL;
168 }
169 }
170 return ret;
171 }
GC_amiga_gctest_calloc_explicitly_typed(size_t a,size_t lb,GC_descr d)172 void *GC_amiga_gctest_calloc_explicitly_typed(size_t a,size_t lb, GC_descr d){
173 void *ret=GC_calloc_explicitly_typed(a,lb,d);
174 if(ret==NULL){
175 GC_gcollect();
176 ret=GC_calloc_explicitly_typed(a,lb,d);
177 if(ret==NULL){
178 GC_printf("Out of memory, (typed allocations are not directly "
179 "supported with the GC_AMIGA_FASTALLOC option.)\n");
180 FAIL;
181 }
182 }
183 return ret;
184 }
185 # define GC_malloc_explicitly_typed(a,b) GC_amiga_gctest_malloc_explicitly_typed(a,b)
186 # define GC_calloc_explicitly_typed(a,b,c) GC_amiga_gctest_calloc_explicitly_typed(a,b,c)
187
188 #else /* !AMIGA_FASTALLOC */
189
190 # if defined(PCR) || defined(LINT2)
191 # define FAIL abort()
192 # else
193 # define FAIL ABORT("Test failed")
194 # endif
195
196 #endif /* !AMIGA_FASTALLOC */
197
198 /* AT_END may be defined to exercise the interior pointer test */
199 /* if the collector is configured with ALL_INTERIOR_POINTERS. */
200 /* As it stands, this test should succeed with either */
201 /* configuration. In the FIND_LEAK configuration, it should */
202 /* find lots of leaks, since we free almost nothing. */
203
204 struct SEXPR {
205 struct SEXPR * sexpr_car;
206 struct SEXPR * sexpr_cdr;
207 };
208
209
210 typedef struct SEXPR * sexpr;
211
212 # define INT_TO_SEXPR(x) ((sexpr)(GC_word)(x))
213 # define SEXPR_TO_INT(x) ((int)(GC_word)(x))
214
215 # undef nil
216 # define nil (INT_TO_SEXPR(0))
217 # define car(x) ((x) -> sexpr_car)
218 # define cdr(x) ((x) -> sexpr_cdr)
219 # define is_nil(x) ((x) == nil)
220
221
222 int extra_count = 0; /* Amount of space wasted in cons node */
223
224 /* Silly implementation of Lisp cons. Intentionally wastes lots of space */
225 /* to test collector. */
226 # ifdef VERY_SMALL_CONFIG
227 # define cons small_cons
228 # else
cons(sexpr x,sexpr y)229 sexpr cons (sexpr x, sexpr y)
230 {
231 sexpr r;
232 int *p;
233 int my_extra = extra_count;
234
235 stubborn_count++;
236 r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
237 CHECK_OUT_OF_MEMORY(r);
238 for (p = (int *)r;
239 (word)p < (word)r + my_extra + sizeof(struct SEXPR); p++) {
240 if (*p) {
241 GC_printf("Found nonzero at %p - allocator is broken\n",
242 (void *)p);
243 FAIL;
244 }
245 *p = (int)((13 << 12) + ((p - (int *)r) & 0xfff));
246 }
247 # ifdef AT_END
248 r = (sexpr)((char *)r + (my_extra & ~7));
249 # endif
250 r -> sexpr_car = x;
251 r -> sexpr_cdr = y;
252 my_extra++;
253 if ( my_extra >= 5000 ) {
254 extra_count = 0;
255 } else {
256 extra_count = my_extra;
257 }
258 GC_END_STUBBORN_CHANGE(r);
259 return(r);
260 }
261 # endif
262
263 #ifdef GC_GCJ_SUPPORT
264
265 #include "gc_mark.h"
266 #include "gc_gcj.h"
267
268 /* The following struct emulates the vtable in gcj. */
269 /* This assumes the default value of MARK_DESCR_OFFSET. */
270 struct fake_vtable {
271 void * dummy; /* class pointer in real gcj. */
272 GC_word descr;
273 };
274
275 struct fake_vtable gcj_class_struct1 = { 0, sizeof(struct SEXPR)
276 + sizeof(struct fake_vtable *) };
277 /* length based descriptor. */
278 struct fake_vtable gcj_class_struct2 =
279 { 0, ((GC_word)3 << (CPP_WORDSZ - 3)) | GC_DS_BITMAP};
280 /* Bitmap based descriptor. */
281
fake_gcj_mark_proc(word * addr,struct GC_ms_entry * mark_stack_ptr,struct GC_ms_entry * mark_stack_limit,word env)282 struct GC_ms_entry * fake_gcj_mark_proc(word * addr,
283 struct GC_ms_entry *mark_stack_ptr,
284 struct GC_ms_entry *mark_stack_limit,
285 word env )
286 {
287 sexpr x;
288 if (1 == env) {
289 /* Object allocated with debug allocator. */
290 addr = (word *)GC_USR_PTR_FROM_BASE(addr);
291 }
292 x = (sexpr)(addr + 1); /* Skip the vtable pointer. */
293 mark_stack_ptr = GC_MARK_AND_PUSH(
294 (void *)(x -> sexpr_cdr), mark_stack_ptr,
295 mark_stack_limit, (void * *)&(x -> sexpr_cdr));
296 mark_stack_ptr = GC_MARK_AND_PUSH(
297 (void *)(x -> sexpr_car), mark_stack_ptr,
298 mark_stack_limit, (void * *)&(x -> sexpr_car));
299 return(mark_stack_ptr);
300 }
301
302 #endif /* GC_GCJ_SUPPORT */
303
304
small_cons(sexpr x,sexpr y)305 sexpr small_cons (sexpr x, sexpr y)
306 {
307 sexpr r;
308
309 collectable_count++;
310 r = (sexpr) GC_MALLOC(sizeof(struct SEXPR));
311 CHECK_OUT_OF_MEMORY(r);
312 r -> sexpr_car = x;
313 r -> sexpr_cdr = y;
314 return(r);
315 }
316
small_cons_uncollectable(sexpr x,sexpr y)317 sexpr small_cons_uncollectable (sexpr x, sexpr y)
318 {
319 sexpr r;
320
321 uncollectable_count++;
322 r = (sexpr) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
323 CHECK_OUT_OF_MEMORY(r);
324 r -> sexpr_car = x;
325 r -> sexpr_cdr = (sexpr)(~(GC_word)y);
326 return(r);
327 }
328
329 #ifdef GC_GCJ_SUPPORT
330
331
gcj_cons(sexpr x,sexpr y)332 sexpr gcj_cons(sexpr x, sexpr y)
333 {
334 GC_word * r;
335 sexpr result;
336
337 r = (GC_word *) GC_GCJ_MALLOC(sizeof(struct SEXPR)
338 + sizeof(struct fake_vtable*),
339 &gcj_class_struct2);
340 CHECK_OUT_OF_MEMORY(r);
341 result = (sexpr)(r + 1);
342 result -> sexpr_car = x;
343 result -> sexpr_cdr = y;
344 return(result);
345 }
346 #endif
347
348 /* Return reverse(x) concatenated with y */
reverse1(sexpr x,sexpr y)349 sexpr reverse1(sexpr x, sexpr y)
350 {
351 if (is_nil(x)) {
352 return(y);
353 } else {
354 return( reverse1(cdr(x), cons(car(x), y)) );
355 }
356 }
357
reverse(sexpr x)358 sexpr reverse(sexpr x)
359 {
360 # ifdef TEST_WITH_SYSTEM_MALLOC
361 malloc(100000);
362 # endif
363 return( reverse1(x, nil) );
364 }
365
ints(int low,int up)366 sexpr ints(int low, int up)
367 {
368 if (low > up) {
369 return(nil);
370 } else {
371 return(small_cons(small_cons(INT_TO_SEXPR(low), nil), ints(low+1, up)));
372 }
373 }
374
375 #ifdef GC_GCJ_SUPPORT
376 /* Return reverse(x) concatenated with y */
gcj_reverse1(sexpr x,sexpr y)377 sexpr gcj_reverse1(sexpr x, sexpr y)
378 {
379 if (is_nil(x)) {
380 return(y);
381 } else {
382 return( gcj_reverse1(cdr(x), gcj_cons(car(x), y)) );
383 }
384 }
385
gcj_reverse(sexpr x)386 sexpr gcj_reverse(sexpr x)
387 {
388 return( gcj_reverse1(x, nil) );
389 }
390
gcj_ints(int low,int up)391 sexpr gcj_ints(int low, int up)
392 {
393 if (low > up) {
394 return(nil);
395 } else {
396 return(gcj_cons(gcj_cons(INT_TO_SEXPR(low), nil), gcj_ints(low+1, up)));
397 }
398 }
399 #endif /* GC_GCJ_SUPPORT */
400
401 /* To check uncollectible allocation we build lists with disguised cdr */
402 /* pointers, and make sure they don't go away. */
uncollectable_ints(int low,int up)403 sexpr uncollectable_ints(int low, int up)
404 {
405 if (low > up) {
406 return(nil);
407 } else {
408 return(small_cons_uncollectable(small_cons(INT_TO_SEXPR(low), nil),
409 uncollectable_ints(low+1, up)));
410 }
411 }
412
check_ints(sexpr list,int low,int up)413 void check_ints(sexpr list, int low, int up)
414 {
415 if (is_nil(list)) {
416 GC_printf("list is nil\n");
417 FAIL;
418 }
419 if (SEXPR_TO_INT(car(car(list))) != low) {
420 GC_printf(
421 "List reversal produced incorrect list - collector is broken\n");
422 FAIL;
423 }
424 if (low == up) {
425 if (cdr(list) != nil) {
426 GC_printf("List too long - collector is broken\n");
427 FAIL;
428 }
429 } else {
430 check_ints(cdr(list), low+1, up);
431 }
432 }
433
434 # define UNCOLLECTABLE_CDR(x) (sexpr)(~(GC_word)(cdr(x)))
435
check_uncollectable_ints(sexpr list,int low,int up)436 void check_uncollectable_ints(sexpr list, int low, int up)
437 {
438 if (SEXPR_TO_INT(car(car(list))) != low) {
439 GC_printf("Uncollectable list corrupted - collector is broken\n");
440 FAIL;
441 }
442 if (low == up) {
443 if (UNCOLLECTABLE_CDR(list) != nil) {
444 GC_printf("Uncollectable list too long - collector is broken\n");
445 FAIL;
446 }
447 } else {
448 check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
449 }
450 }
451
452 /* Not used, but useful for debugging: */
print_int_list(sexpr x)453 void print_int_list(sexpr x)
454 {
455 if (is_nil(x)) {
456 GC_printf("NIL\n");
457 } else {
458 GC_printf("(%d)", SEXPR_TO_INT(car(car(x))));
459 if (!is_nil(cdr(x))) {
460 GC_printf(", ");
461 print_int_list(cdr(x));
462 } else {
463 GC_printf("\n");
464 }
465 }
466 }
467
468 /* ditto: */
check_marks_int_list(sexpr x)469 void check_marks_int_list(sexpr x)
470 {
471 if (!GC_is_marked(x))
472 GC_printf("[unm:%p]", (void *)x);
473 else
474 GC_printf("[mkd:%p]", (void *)x);
475 if (is_nil(x)) {
476 GC_printf("NIL\n");
477 } else {
478 if (!GC_is_marked(car(x)))
479 GC_printf("[unm car:%p]", (void *)car(x));
480 GC_printf("(%d)", SEXPR_TO_INT(car(car(x))));
481 if (!is_nil(cdr(x))) {
482 GC_printf(", ");
483 check_marks_int_list(cdr(x));
484 } else {
485 GC_printf("\n");
486 }
487 }
488 }
489
490 /*
491 * A tiny list reversal test to check thread creation.
492 */
493 #ifdef THREADS
494
495 # ifdef VERY_SMALL_CONFIG
496 # define TINY_REVERSE_UPPER_VALUE 4
497 # else
498 # define TINY_REVERSE_UPPER_VALUE 10
499 # endif
500
501 # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
tiny_reverse_test(void * arg GC_ATTR_UNUSED)502 DWORD __stdcall tiny_reverse_test(void * arg GC_ATTR_UNUSED)
503 # else
504 void * tiny_reverse_test(void * arg GC_ATTR_UNUSED)
505 # endif
506 {
507 int i;
508 for (i = 0; i < 5; ++i) {
509 check_ints(reverse(reverse(ints(1, TINY_REVERSE_UPPER_VALUE))),
510 1, TINY_REVERSE_UPPER_VALUE);
511 }
512 return 0;
513 }
514
515 # if defined(GC_PTHREADS)
fork_a_thread(void)516 void fork_a_thread(void)
517 {
518 pthread_t t;
519 int code;
520 if ((code = pthread_create(&t, 0, tiny_reverse_test, 0)) != 0) {
521 GC_printf("Small thread creation failed %d\n", code);
522 FAIL;
523 }
524 if ((code = pthread_join(t, 0)) != 0) {
525 GC_printf("Small thread join failed %d\n", code);
526 FAIL;
527 }
528 }
529
530 # elif defined(GC_WIN32_THREADS)
fork_a_thread(void)531 void fork_a_thread(void)
532 {
533 DWORD thread_id;
534 HANDLE h;
535 h = GC_CreateThread((SECURITY_ATTRIBUTES *)NULL, (word)0,
536 tiny_reverse_test, NULL, (DWORD)0, &thread_id);
537 /* Explicitly specify types of the */
538 /* arguments to test the prototype. */
539 if (h == (HANDLE)NULL) {
540 GC_printf("Small thread creation failed %d\n",
541 (int)GetLastError());
542 FAIL;
543 }
544 if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
545 GC_printf("Small thread wait failed %d\n",
546 (int)GetLastError());
547 FAIL;
548 }
549 }
550
551 # endif
552
553 #endif
554
555 /* Try to force a to be strangely aligned */
556 struct {
557 char dummy;
558 sexpr aa;
559 } A;
560 #define a A.aa
561
562 /*
563 * Repeatedly reverse lists built out of very different sized cons cells.
564 * Check that we didn't lose anything.
565 */
reverse_test_inner(void * data)566 void *GC_CALLBACK reverse_test_inner(void *data)
567 {
568 int i;
569 sexpr b;
570 sexpr c;
571 sexpr d;
572 sexpr e;
573 sexpr *f, *g, *h;
574
575 if (data == 0) {
576 /* This stack frame is not guaranteed to be scanned. */
577 return GC_call_with_gc_active(reverse_test_inner, (void*)(word)1);
578 }
579
580 # if /*defined(MSWIN32) ||*/ defined(MACOS)
581 /* Win32S only allows 128K stacks */
582 # define BIG 1000
583 # elif defined(PCR)
584 /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
585 # define BIG 700
586 # elif defined(MSWINCE) || defined(RTEMS)
587 /* WinCE only allows 64K stacks */
588 # define BIG 500
589 # elif defined(OSF1)
590 /* OSF has limited stack space by default, and large frames. */
591 # define BIG 200
592 # elif defined(__MACH__) && defined(__ppc64__)
593 # define BIG 2500
594 # else
595 # define BIG 4500
596 # endif
597
598 A.dummy = 17;
599 a = ints(1, 49);
600 b = ints(1, 50);
601 c = ints(1, BIG);
602 d = uncollectable_ints(1, 100);
603 e = uncollectable_ints(1, 1);
604 /* Check that realloc updates object descriptors correctly */
605 collectable_count++;
606 f = (sexpr *)GC_MALLOC(4 * sizeof(sexpr));
607 realloc_count++;
608 f = (sexpr *)GC_REALLOC((void *)f, 6 * sizeof(sexpr));
609 CHECK_OUT_OF_MEMORY(f);
610 f[5] = ints(1,17);
611 collectable_count++;
612 g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
613 realloc_count++;
614 g = (sexpr *)GC_REALLOC((void *)g, 800 * sizeof(sexpr));
615 CHECK_OUT_OF_MEMORY(g);
616 g[799] = ints(1,18);
617 collectable_count++;
618 h = (sexpr *)GC_MALLOC(1025 * sizeof(sexpr));
619 realloc_count++;
620 h = (sexpr *)GC_REALLOC((void *)h, 2000 * sizeof(sexpr));
621 CHECK_OUT_OF_MEMORY(h);
622 # ifdef GC_GCJ_SUPPORT
623 h[1999] = gcj_ints(1,200);
624 for (i = 0; i < 51; ++i)
625 h[1999] = gcj_reverse(h[1999]);
626 /* Leave it as the reversed list for now. */
627 # else
628 h[1999] = ints(1,200);
629 # endif
630 /* Try to force some collections and reuse of small list elements */
631 for (i = 0; i < 10; i++) {
632 (void)ints(1, BIG);
633 }
634 /* Superficially test interior pointer recognition on stack */
635 c = (sexpr)((char *)c + sizeof(char *));
636 d = (sexpr)((char *)d + sizeof(char *));
637
638 GC_FREE((void *)e);
639
640 check_ints(b,1,50);
641 check_ints(a,1,49);
642 for (i = 0; i < 50; i++) {
643 check_ints(b,1,50);
644 b = reverse(reverse(b));
645 }
646 check_ints(b,1,50);
647 check_ints(a,1,49);
648 for (i = 0; i < 60; i++) {
649 # if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
650 if (i % 10 == 0) fork_a_thread();
651 # endif
652 /* This maintains the invariant that a always points to a list of */
653 /* 49 integers. Thus this is thread safe without locks, */
654 /* assuming atomic pointer assignments. */
655 a = reverse(reverse(a));
656 # if !defined(AT_END) && !defined(THREADS)
657 /* This is not thread safe, since realloc explicitly deallocates */
658 if (i & 1) {
659 a = (sexpr)GC_REALLOC((void *)a, 500);
660 } else {
661 a = (sexpr)GC_REALLOC((void *)a, 8200);
662 }
663 # endif
664 }
665 check_ints(a,1,49);
666 check_ints(b,1,50);
667
668 /* Restore c and d values. */
669 c = (sexpr)((char *)c - sizeof(char *));
670 d = (sexpr)((char *)d - sizeof(char *));
671
672 check_ints(c,1,BIG);
673 check_uncollectable_ints(d, 1, 100);
674 check_ints(f[5], 1,17);
675 check_ints(g[799], 1,18);
676 # ifdef GC_GCJ_SUPPORT
677 h[1999] = gcj_reverse(h[1999]);
678 # endif
679 check_ints(h[1999], 1,200);
680 # ifndef THREADS
681 a = 0;
682 # endif
683 *(sexpr volatile *)&b = 0;
684 *(sexpr volatile *)&c = 0;
685 return 0;
686 }
687
reverse_test(void)688 void reverse_test(void)
689 {
690 /* Test GC_do_blocking/GC_call_with_gc_active. */
691 (void)GC_do_blocking(reverse_test_inner, 0);
692 }
693
694 #undef a
695
696 /*
697 * The rest of this builds balanced binary trees, checks that they don't
698 * disappear, and tests finalization.
699 */
700 typedef struct treenode {
701 int level;
702 struct treenode * lchild;
703 struct treenode * rchild;
704 } tn;
705
706 int finalizable_count = 0;
707 int finalized_count = 0;
708 volatile int dropped_something = 0;
709
finalizer(void * obj,void * client_data)710 void GC_CALLBACK finalizer(void * obj, void * client_data)
711 {
712 tn * t = (tn *)obj;
713
714 # ifdef PCR
715 PCR_ThCrSec_EnterSys();
716 # endif
717 # if defined(GC_PTHREADS)
718 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
719 pthread_mutex_lock(&incr_lock);
720 # elif defined(GC_WIN32_THREADS)
721 EnterCriticalSection(&incr_cs);
722 # endif
723 if ((int)(GC_word)client_data != t -> level) {
724 GC_printf("Wrong finalization data - collector is broken\n");
725 FAIL;
726 }
727 finalized_count++;
728 t -> level = -1; /* detect duplicate finalization immediately */
729 # ifdef PCR
730 PCR_ThCrSec_ExitSys();
731 # endif
732 # if defined(GC_PTHREADS)
733 pthread_mutex_unlock(&incr_lock);
734 # elif defined(GC_WIN32_THREADS)
735 LeaveCriticalSection(&incr_cs);
736 # endif
737 }
738
739 size_t counter = 0;
740
741 # define MAX_FINALIZED (NTHREADS*4000)
742
743 # if !defined(MACOS)
744 GC_FAR GC_word live_indicators[MAX_FINALIZED] = {0};
745 # ifndef GC_LONG_REFS_NOT_NEEDED
746 GC_FAR void *live_long_refs[MAX_FINALIZED] = { NULL };
747 # endif
748 #else
749 /* Too big for THINK_C. have to allocate it dynamically. */
750 GC_word *live_indicators = 0;
751 # ifndef GC_LONG_REFS_NOT_NEEDED
752 # define GC_LONG_REFS_NOT_NEEDED
753 # endif
754 #endif
755
756 int live_indicators_count = 0;
757
mktree(int n)758 tn * mktree(int n)
759 {
760 tn * result = (tn *)GC_MALLOC(sizeof(tn));
761
762 collectable_count++;
763 # if defined(MACOS)
764 /* get around static data limitations. */
765 if (!live_indicators) {
766 live_indicators =
767 (GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
768 CHECK_OUT_OF_MEMORY(live_indicators);
769 }
770 # endif
771 if (n == 0) return(0);
772 CHECK_OUT_OF_MEMORY(result);
773 result -> level = n;
774 result -> lchild = mktree(n-1);
775 result -> rchild = mktree(n-1);
776 if (counter++ % 17 == 0 && n >= 2) {
777 tn * tmp;
778
779 CHECK_OUT_OF_MEMORY(result->lchild);
780 tmp = result -> lchild -> rchild;
781 CHECK_OUT_OF_MEMORY(result->rchild);
782 result -> lchild -> rchild = result -> rchild -> lchild;
783 result -> rchild -> lchild = tmp;
784 }
785 if (counter++ % 119 == 0) {
786 # ifndef GC_NO_FINALIZATION
787 int my_index;
788 void *new_link;
789 # endif
790
791 {
792 # ifdef PCR
793 PCR_ThCrSec_EnterSys();
794 # endif
795 # if defined(GC_PTHREADS)
796 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
797 pthread_mutex_lock(&incr_lock);
798 # elif defined(GC_WIN32_THREADS)
799 EnterCriticalSection(&incr_cs);
800 # endif
801 /* Losing a count here causes erroneous report of failure. */
802 finalizable_count++;
803 # ifndef GC_NO_FINALIZATION
804 my_index = live_indicators_count++;
805 # endif
806 # ifdef PCR
807 PCR_ThCrSec_ExitSys();
808 # endif
809 # if defined(GC_PTHREADS)
810 pthread_mutex_unlock(&incr_lock);
811 # elif defined(GC_WIN32_THREADS)
812 LeaveCriticalSection(&incr_cs);
813 # endif
814 }
815
816 # ifndef GC_NO_FINALIZATION
817 GC_REGISTER_FINALIZER((void *)result, finalizer, (void *)(GC_word)n,
818 (GC_finalization_proc *)0, (void * *)0);
819 if (my_index >= MAX_FINALIZED) {
820 GC_printf("live_indicators overflowed\n");
821 FAIL;
822 }
823 live_indicators[my_index] = 13;
824 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
825 (void * *)(&(live_indicators[my_index])), result) != 0) {
826 GC_printf("GC_general_register_disappearing_link failed\n");
827 FAIL;
828 }
829 if (GC_move_disappearing_link((void **)(&(live_indicators[my_index])),
830 (void **)(&(live_indicators[my_index]))) != GC_SUCCESS) {
831 GC_printf("GC_move_disappearing_link(link,link) failed\n");
832 FAIL;
833 }
834 new_link = (void *)live_indicators[my_index];
835 if (GC_move_disappearing_link((void **)(&(live_indicators[my_index])),
836 &new_link) != GC_SUCCESS) {
837 GC_printf("GC_move_disappearing_link(new_link) failed\n");
838 FAIL;
839 }
840 if (GC_unregister_disappearing_link(&new_link) == 0) {
841 GC_printf("GC_unregister_disappearing_link failed\n");
842 FAIL;
843 }
844 if (GC_move_disappearing_link((void **)(&(live_indicators[my_index])),
845 &new_link) != GC_NOT_FOUND) {
846 GC_printf("GC_move_disappearing_link(new_link) failed 2\n");
847 FAIL;
848 }
849 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
850 (void * *)(&(live_indicators[my_index])), result) != 0) {
851 GC_printf("GC_general_register_disappearing_link failed 2\n");
852 FAIL;
853 }
854 # ifndef GC_LONG_REFS_NOT_NEEDED
855 if (GC_REGISTER_LONG_LINK(&live_long_refs[my_index], result) != 0) {
856 GC_printf("GC_register_long_link failed\n");
857 FAIL;
858 }
859 if (GC_move_long_link(&live_long_refs[my_index],
860 &live_long_refs[my_index]) != GC_SUCCESS) {
861 GC_printf("GC_move_long_link(link,link) failed\n");
862 FAIL;
863 }
864 new_link = live_long_refs[my_index];
865 if (GC_move_long_link(&live_long_refs[my_index],
866 &new_link) != GC_SUCCESS) {
867 GC_printf("GC_move_long_link(new_link) failed\n");
868 FAIL;
869 }
870 if (GC_unregister_long_link(&new_link) == 0) {
871 GC_printf("GC_unregister_long_link failed\n");
872 FAIL;
873 }
874 if (GC_move_long_link(&live_long_refs[my_index],
875 &new_link) != GC_NOT_FOUND) {
876 GC_printf("GC_move_long_link(new_link) failed 2\n");
877 FAIL;
878 }
879 if (GC_REGISTER_LONG_LINK(&live_long_refs[my_index], result) != 0) {
880 GC_printf("GC_register_long_link failed 2\n");
881 FAIL;
882 }
883 # endif
884 # endif
885 GC_reachable_here(result);
886 }
887 return(result);
888 }
889
chktree(tn * t,int n)890 void chktree(tn *t, int n)
891 {
892 if (n == 0 && t != 0) {
893 GC_printf("Clobbered a leaf - collector is broken\n");
894 FAIL;
895 }
896 if (n == 0) return;
897 if (t -> level != n) {
898 GC_printf("Lost a node at level %d - collector is broken\n", n);
899 FAIL;
900 }
901 if (counter++ % 373 == 0) {
902 collectable_count++;
903 (void) GC_MALLOC(counter%5001);
904 }
905 chktree(t -> lchild, n-1);
906 if (counter++ % 73 == 0) {
907 collectable_count++;
908 (void) GC_MALLOC(counter%373);
909 }
910 chktree(t -> rchild, n-1);
911 }
912
913
914 #if defined(GC_PTHREADS)
915 pthread_key_t fl_key;
916
alloc8bytes(void)917 void * alloc8bytes(void)
918 {
919 # if defined(SMALL_CONFIG) || defined(GC_DEBUG)
920 collectable_count++;
921 return(GC_MALLOC(8));
922 # else
923 void ** my_free_list_ptr;
924 void * my_free_list;
925
926 my_free_list_ptr = (void **)pthread_getspecific(fl_key);
927 if (my_free_list_ptr == 0) {
928 uncollectable_count++;
929 my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
930 CHECK_OUT_OF_MEMORY(my_free_list_ptr);
931 if (pthread_setspecific(fl_key, my_free_list_ptr) != 0) {
932 GC_printf("pthread_setspecific failed\n");
933 FAIL;
934 }
935 }
936 my_free_list = *my_free_list_ptr;
937 if (my_free_list == 0) {
938 my_free_list = GC_malloc_many(8);
939 CHECK_OUT_OF_MEMORY(my_free_list);
940 }
941 *my_free_list_ptr = GC_NEXT(my_free_list);
942 GC_NEXT(my_free_list) = 0;
943 collectable_count++;
944 return(my_free_list);
945 # endif
946 }
947
948 #else
949 # define alloc8bytes() GC_MALLOC_ATOMIC(8)
950 #endif
951
alloc_small(int n)952 void alloc_small(int n)
953 {
954 int i;
955
956 for (i = 0; i < n; i += 8) {
957 atomic_count++;
958 if (alloc8bytes() == 0) {
959 GC_printf("Out of memory\n");
960 FAIL;
961 }
962 }
963 }
964
965 # if defined(THREADS) && defined(GC_DEBUG)
966 # ifdef VERY_SMALL_CONFIG
967 # define TREE_HEIGHT 12
968 # else
969 # define TREE_HEIGHT 15
970 # endif
971 # else
972 # ifdef VERY_SMALL_CONFIG
973 # define TREE_HEIGHT 13
974 # else
975 # define TREE_HEIGHT 16
976 # endif
977 # endif
tree_test(void)978 void tree_test(void)
979 {
980 tn * root;
981 int i;
982
983 root = mktree(TREE_HEIGHT);
984 # ifndef VERY_SMALL_CONFIG
985 alloc_small(5000000);
986 # endif
987 chktree(root, TREE_HEIGHT);
988 if (finalized_count && ! dropped_something) {
989 GC_printf("Premature finalization - collector is broken\n");
990 FAIL;
991 }
992 dropped_something = 1;
993 GC_noop1((word)root); /* Root needs to remain live until */
994 /* dropped_something is set. */
995 root = mktree(TREE_HEIGHT);
996 chktree(root, TREE_HEIGHT);
997 for (i = TREE_HEIGHT; i >= 0; i--) {
998 root = mktree(i);
999 chktree(root, i);
1000 }
1001 # ifndef VERY_SMALL_CONFIG
1002 alloc_small(5000000);
1003 # endif
1004 }
1005
1006 unsigned n_tests = 0;
1007
1008 const GC_word bm_huge[10] = {
1009 0xffffffff,
1010 0xffffffff,
1011 0xffffffff,
1012 0xffffffff,
1013 0xffffffff,
1014 0xffffffff,
1015 0xffffffff,
1016 0xffffffff,
1017 0xffffffff,
1018 0x00ffffff,
1019 };
1020
1021 /* A very simple test of explicitly typed allocation */
typed_test(void)1022 void typed_test(void)
1023 {
1024 GC_word * old, * new;
1025 GC_word bm3 = 0x3;
1026 GC_word bm2 = 0x2;
1027 GC_word bm_large = 0xf7ff7fff;
1028 GC_descr d1 = GC_make_descriptor(&bm3, 2);
1029 GC_descr d2 = GC_make_descriptor(&bm2, 2);
1030 GC_descr d3 = GC_make_descriptor(&bm_large, 32);
1031 GC_descr d4 = GC_make_descriptor(bm_huge, 320);
1032 GC_word * x = (GC_word *)GC_malloc_explicitly_typed(2000, d4);
1033 int i;
1034
1035 # ifndef LINT
1036 (void)GC_make_descriptor(&bm_large, 32);
1037 # endif
1038 collectable_count++;
1039 old = 0;
1040 for (i = 0; i < 4000; i++) {
1041 collectable_count++;
1042 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d1);
1043 CHECK_OUT_OF_MEMORY(new);
1044 if (0 != new[0] || 0 != new[1]) {
1045 GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
1046 FAIL;
1047 }
1048 new[0] = 17;
1049 new[1] = (GC_word)old;
1050 old = new;
1051 collectable_count++;
1052 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d2);
1053 CHECK_OUT_OF_MEMORY(new);
1054 new[0] = 17;
1055 new[1] = (GC_word)old;
1056 old = new;
1057 collectable_count++;
1058 new = (GC_word *) GC_malloc_explicitly_typed(33 * sizeof(GC_word), d3);
1059 CHECK_OUT_OF_MEMORY(new);
1060 new[0] = 17;
1061 new[1] = (GC_word)old;
1062 old = new;
1063 collectable_count++;
1064 new = (GC_word *) GC_calloc_explicitly_typed(4, 2 * sizeof(GC_word),
1065 d1);
1066 CHECK_OUT_OF_MEMORY(new);
1067 new[0] = 17;
1068 new[1] = (GC_word)old;
1069 old = new;
1070 collectable_count++;
1071 if (i & 0xff) {
1072 new = (GC_word *) GC_calloc_explicitly_typed(7, 3 * sizeof(GC_word),
1073 d2);
1074 } else {
1075 new = (GC_word *) GC_calloc_explicitly_typed(1001,
1076 3 * sizeof(GC_word),
1077 d2);
1078 if (new && (0 != new[0] || 0 != new[1])) {
1079 GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
1080 FAIL;
1081 }
1082 }
1083 CHECK_OUT_OF_MEMORY(new);
1084 new[0] = 17;
1085 new[1] = (GC_word)old;
1086 old = new;
1087 }
1088 for (i = 0; i < 20000; i++) {
1089 if (new[0] != 17) {
1090 GC_printf("typed alloc failed at %lu\n", (unsigned long)i);
1091 FAIL;
1092 }
1093 new[0] = 0;
1094 old = new;
1095 new = (GC_word *)(old[1]);
1096 }
1097 GC_gcollect();
1098 GC_noop1((word)x);
1099 }
1100
1101 int fail_count = 0;
1102
fail_proc1(void * x GC_ATTR_UNUSED)1103 void GC_CALLBACK fail_proc1(void *x GC_ATTR_UNUSED)
1104 {
1105 fail_count++;
1106 }
1107
uniq(void * p,...)1108 static void uniq(void *p, ...) {
1109 va_list a;
1110 void *q[100];
1111 int n = 0, i, j;
1112 q[n++] = p;
1113 va_start(a,p);
1114 for (;(q[n] = va_arg(a,void *)) != NULL;n++) ;
1115 va_end(a);
1116 for (i=0; i<n; i++)
1117 for (j=0; j<i; j++)
1118 if (q[i] == q[j]) {
1119 GC_printf(
1120 "Apparently failed to mark from some function arguments.\n"
1121 "Perhaps GC_push_regs was configured incorrectly?\n"
1122 );
1123 FAIL;
1124 }
1125 }
1126
1127 #ifdef THREADS
1128 # define TEST_FAIL_COUNT(n) 1
1129 #else
1130 # define TEST_FAIL_COUNT(n) (fail_count >= (n))
1131 #endif
1132
inc_int_counter(void * pcounter)1133 void * GC_CALLBACK inc_int_counter(void *pcounter)
1134 {
1135 ++(*(int *)pcounter);
1136 return NULL;
1137 }
1138
run_one_test(void)1139 void run_one_test(void)
1140 {
1141 # ifndef DBG_HDRS_ALL
1142 char *x;
1143 char **z;
1144 # ifdef LINT
1145 char *y = 0;
1146 # else
1147 char *y = (char *)(GC_word)fail_proc1;
1148 # endif
1149 CLOCK_TYPE typed_time;
1150 # endif
1151 CLOCK_TYPE start_time;
1152 CLOCK_TYPE reverse_time;
1153 CLOCK_TYPE tree_time;
1154 unsigned long time_diff;
1155
1156 # ifdef FIND_LEAK
1157 GC_printf(
1158 "This test program is not designed for leak detection mode\n");
1159 GC_printf("Expect lots of problems\n");
1160 # endif
1161 GC_FREE(0);
1162 # ifdef THREADS
1163 if (!GC_thread_is_registered()) {
1164 GC_printf("Current thread is not registered with GC\n");
1165 FAIL;
1166 }
1167 # endif
1168 # ifndef DBG_HDRS_ALL
1169 collectable_count += 3;
1170 if ((GC_size(GC_malloc(7)) != 8 &&
1171 GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word))
1172 || GC_size(GC_malloc(15)) != 16) {
1173 GC_printf("GC_size produced unexpected results\n");
1174 FAIL;
1175 }
1176 collectable_count += 1;
1177 if (GC_size(GC_malloc(0)) != MIN_WORDS * sizeof(GC_word)) {
1178 GC_printf("GC_malloc(0) failed: GC_size returns %lu\n",
1179 (unsigned long)GC_size(GC_malloc(0)));
1180 FAIL;
1181 }
1182 collectable_count += 1;
1183 if (GC_size(GC_malloc_uncollectable(0)) != MIN_WORDS * sizeof(GC_word)) {
1184 GC_printf("GC_malloc_uncollectable(0) failed\n");
1185 FAIL;
1186 }
1187 GC_is_valid_displacement_print_proc = fail_proc1;
1188 GC_is_visible_print_proc = fail_proc1;
1189 collectable_count += 1;
1190 x = GC_malloc(16);
1191 if (GC_base(GC_PTR_ADD(x, 13)) != x) {
1192 GC_printf("GC_base(heap ptr) produced incorrect result\n");
1193 FAIL;
1194 }
1195 if (!GC_is_heap_ptr(x)) {
1196 GC_printf("GC_is_heap_ptr(heap_ptr) produced incorrect result\n");
1197 FAIL;
1198 }
1199 if (GC_is_heap_ptr(&x)) {
1200 GC_printf("GC_is_heap_ptr(&local_var) produced incorrect result\n");
1201 FAIL;
1202 }
1203 if (GC_is_heap_ptr(&fail_count) || GC_is_heap_ptr(NULL)) {
1204 GC_printf("GC_is_heap_ptr(&global_var) produced incorrect result\n");
1205 FAIL;
1206 }
1207 (void)GC_PRE_INCR(x, 0);
1208 (void)GC_POST_INCR(x);
1209 (void)GC_POST_DECR(x);
1210 if (GC_base(x) != x) {
1211 GC_printf("Bad INCR/DECR result\n");
1212 FAIL;
1213 }
1214 # ifndef PCR
1215 if (GC_base(y) != 0) {
1216 GC_printf("GC_base(fn_ptr) produced incorrect result\n");
1217 FAIL;
1218 }
1219 # endif
1220 if (GC_same_obj(x+5, x) != x + 5) {
1221 GC_printf("GC_same_obj produced incorrect result\n");
1222 FAIL;
1223 }
1224 if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
1225 GC_printf("GC_is_visible produced incorrect result\n");
1226 FAIL;
1227 }
1228 z = GC_malloc(8);
1229 CHECK_OUT_OF_MEMORY(z);
1230 GC_PTR_STORE(z, x);
1231 if (*z != x) {
1232 GC_printf("GC_PTR_STORE failed: %p != %p\n", (void *)(*z), (void *)x);
1233 FAIL;
1234 }
1235 if (!TEST_FAIL_COUNT(1)) {
1236 # if!(defined(POWERPC) || defined(IA64)) || defined(M68K)
1237 /* On POWERPCs function pointers point to a descriptor in the */
1238 /* data segment, so there should have been no failures. */
1239 /* The same applies to IA64. Something similar seems to */
1240 /* be going on with NetBSD/M68K. */
1241 GC_printf("GC_is_visible produced wrong failure indication\n");
1242 FAIL;
1243 # endif
1244 }
1245 if (GC_is_valid_displacement(y) != y
1246 || GC_is_valid_displacement(x) != x
1247 || GC_is_valid_displacement(x + 3) != x + 3) {
1248 GC_printf("GC_is_valid_displacement produced incorrect result\n");
1249 FAIL;
1250 }
1251 {
1252 size_t i;
1253
1254 (void)GC_malloc(17);
1255 for (i = sizeof(GC_word); i < 512; i *= 2) {
1256 GC_word result = (GC_word) GC_memalign(i, 17);
1257 if (result % i != 0 || result == 0 || *(int *)result != 0) FAIL;
1258 }
1259 }
1260 # ifndef ALL_INTERIOR_POINTERS
1261 # if defined(RS6000) || defined(POWERPC)
1262 if (!TEST_FAIL_COUNT(1))
1263 # else
1264 if (!TEST_FAIL_COUNT(GC_get_all_interior_pointers() ? 1 : 2))
1265 # endif
1266 {
1267 GC_printf(
1268 "GC_is_valid_displacement produced wrong failure indication\n");
1269 FAIL;
1270 }
1271 # endif
1272 # endif /* DBG_HDRS_ALL */
1273 /* Test floating point alignment */
1274 collectable_count += 2;
1275 {
1276 double *dp = GC_MALLOC(sizeof(double));
1277 CHECK_OUT_OF_MEMORY(dp);
1278 *dp = 1.0;
1279 dp = GC_MALLOC(sizeof(double));
1280 CHECK_OUT_OF_MEMORY(dp);
1281 *dp = 1.0;
1282 }
1283 /* Test size 0 allocation a bit more */
1284 {
1285 size_t i;
1286 for (i = 0; i < 10000; ++i) {
1287 (void)GC_MALLOC(0);
1288 GC_FREE(GC_MALLOC(0));
1289 (void)GC_MALLOC_ATOMIC(0);
1290 GC_FREE(GC_MALLOC_ATOMIC(0));
1291 }
1292 }
1293 # ifdef GC_GCJ_SUPPORT
1294 GC_REGISTER_DISPLACEMENT(sizeof(struct fake_vtable *));
1295 GC_init_gcj_malloc(0, (void *)(GC_word)fake_gcj_mark_proc);
1296 # endif
1297 /* Make sure that fn arguments are visible to the collector. */
1298 uniq(
1299 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1300 (GC_gcollect(),GC_malloc(12)),
1301 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1302 (GC_gcollect(),GC_malloc(12)),
1303 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1304 (GC_gcollect(),GC_malloc(12)),
1305 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1306 (GC_gcollect(),GC_malloc(12)),
1307 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1308 (GC_gcollect(),GC_malloc(12)),
1309 (void *)0);
1310 /* GC_malloc(0) must return NULL or something we can deallocate. */
1311 GC_free(GC_malloc(0));
1312 GC_free(GC_malloc_atomic(0));
1313 GC_free(GC_malloc(0));
1314 GC_free(GC_malloc_atomic(0));
1315 # ifndef NO_TEST_HANDLE_FORK
1316 GC_atfork_prepare();
1317 if (fork() != 0) {
1318 GC_atfork_parent();
1319 if (print_stats)
1320 GC_log_printf("Forked child process (or failed)\n");
1321 } else {
1322 GC_atfork_child();
1323 if (print_stats)
1324 GC_log_printf("Started a child process\n");
1325 # ifdef THREADS
1326 # ifdef PARALLEL_MARK
1327 GC_gcollect(); /* no parallel markers */
1328 # endif
1329 GC_start_mark_threads();
1330 # endif
1331 GC_gcollect();
1332 # ifdef THREADS
1333 tiny_reverse_test(0);
1334 GC_gcollect();
1335 # endif
1336 if (print_stats)
1337 GC_log_printf("Finished a child process\n");
1338 exit(0);
1339 }
1340 # endif
1341 /* Repeated list reversal test. */
1342 GET_TIME(start_time);
1343 reverse_test();
1344 if (print_stats) {
1345 GET_TIME(reverse_time);
1346 time_diff = MS_TIME_DIFF(reverse_time, start_time);
1347 GC_log_printf("-------------Finished reverse_test at time %u (%p)\n",
1348 (unsigned) time_diff, (void *)&start_time);
1349 }
1350 # ifndef DBG_HDRS_ALL
1351 typed_test();
1352 if (print_stats) {
1353 GET_TIME(typed_time);
1354 time_diff = MS_TIME_DIFF(typed_time, start_time);
1355 GC_log_printf("-------------Finished typed_test at time %u (%p)\n",
1356 (unsigned) time_diff, (void *)&start_time);
1357 }
1358 # endif /* DBG_HDRS_ALL */
1359 tree_test();
1360 if (print_stats) {
1361 GET_TIME(tree_time);
1362 time_diff = MS_TIME_DIFF(tree_time, start_time);
1363 GC_log_printf("-------------Finished tree_test at time %u (%p)\n",
1364 (unsigned) time_diff, (void *)&start_time);
1365 }
1366 /* Run reverse_test a second time, so we hopefully notice corruption. */
1367 reverse_test();
1368 if (print_stats) {
1369 GET_TIME(reverse_time);
1370 time_diff = MS_TIME_DIFF(reverse_time, start_time);
1371 GC_log_printf(
1372 "-------------Finished second reverse_test at time %u (%p)\n",
1373 (unsigned)time_diff, (void *)&start_time);
1374 }
1375 /* GC_allocate_ml and GC_need_to_lock are no longer exported, and */
1376 /* AO_fetch_and_add1() may be unavailable to update a counter. */
1377 (void)GC_call_with_alloc_lock(inc_int_counter, &n_tests);
1378 if (print_stats)
1379 GC_log_printf("Finished %p\n", (void *)&start_time);
1380 }
1381
1382 #define NUMBER_ROUND_UP(v, bound) ((((v) + (bound) - 1) / (bound)) * (bound))
1383
check_heap_stats(void)1384 void check_heap_stats(void)
1385 {
1386 size_t max_heap_sz;
1387 int i;
1388 # ifndef GC_NO_FINALIZATION
1389 int still_live;
1390 # ifndef GC_LONG_REFS_NOT_NEEDED
1391 int still_long_live = 0;
1392 # endif
1393 # ifdef FINALIZE_ON_DEMAND
1394 int late_finalize_count = 0;
1395 # endif
1396 # endif
1397
1398 # ifdef VERY_SMALL_CONFIG
1399 /* The upper bounds are a guess, which has been empirically */
1400 /* adjusted. On low end uniprocessors with incremental GC */
1401 /* these may be particularly dubious, since empirically the */
1402 /* heap tends to grow largely as a result of the GC not */
1403 /* getting enough cycles. */
1404 # if CPP_WORDSZ == 64
1405 max_heap_sz = 4500000;
1406 # else
1407 max_heap_sz = 2800000;
1408 # endif
1409 # else
1410 # if CPP_WORDSZ == 64
1411 max_heap_sz = 23000000;
1412 # else
1413 max_heap_sz = 16000000;
1414 # endif
1415 # endif
1416 # ifdef GC_DEBUG
1417 max_heap_sz *= 2;
1418 # ifdef SAVE_CALL_CHAIN
1419 max_heap_sz *= 3;
1420 # ifdef SAVE_CALL_COUNT
1421 max_heap_sz += max_heap_sz * SAVE_CALL_COUNT/4;
1422 # endif
1423 # endif
1424 # endif
1425 max_heap_sz *= n_tests;
1426 # if defined(USE_MMAP) || defined(MSWIN32)
1427 max_heap_sz = NUMBER_ROUND_UP(max_heap_sz, 4 * 1024 * 1024);
1428 # endif
1429 /* Garbage collect repeatedly so that all inaccessible objects */
1430 /* can be finalized. */
1431 while (GC_collect_a_little()) { }
1432 for (i = 0; i < 16; i++) {
1433 GC_gcollect();
1434 # ifndef GC_NO_FINALIZATION
1435 # ifdef FINALIZE_ON_DEMAND
1436 late_finalize_count +=
1437 # endif
1438 GC_invoke_finalizers();
1439 # endif
1440 }
1441 if (print_stats) {
1442 struct GC_stack_base sb;
1443 int res = GC_get_stack_base(&sb);
1444
1445 if (res == GC_SUCCESS) {
1446 GC_log_printf("Primordial thread stack bottom: %p\n", sb.mem_base);
1447 } else if (res == GC_UNIMPLEMENTED) {
1448 GC_log_printf("GC_get_stack_base() unimplemented\n");
1449 } else {
1450 GC_printf("GC_get_stack_base() failed: %d\n", res);
1451 FAIL;
1452 }
1453 }
1454 GC_printf("Completed %u tests\n", n_tests);
1455 GC_printf("Allocated %d collectable objects\n", collectable_count);
1456 GC_printf("Allocated %d uncollectable objects\n",
1457 uncollectable_count);
1458 GC_printf("Allocated %d atomic objects\n", atomic_count);
1459 GC_printf("Allocated %d stubborn objects\n", stubborn_count);
1460 GC_printf("Finalized %d/%d objects - ",
1461 finalized_count, finalizable_count);
1462 # ifndef GC_NO_FINALIZATION
1463 # ifdef FINALIZE_ON_DEMAND
1464 if (finalized_count != late_finalize_count) {
1465 GC_printf("Demand finalization error\n");
1466 FAIL;
1467 }
1468 # endif
1469 if (finalized_count > finalizable_count
1470 || finalized_count < finalizable_count/2) {
1471 GC_printf("finalization is probably broken\n");
1472 FAIL;
1473 } else {
1474 GC_printf("finalization is probably ok\n");
1475 }
1476 still_live = 0;
1477 for (i = 0; i < MAX_FINALIZED; i++) {
1478 if (live_indicators[i] != 0) {
1479 still_live++;
1480 }
1481 # ifndef GC_LONG_REFS_NOT_NEEDED
1482 if (live_long_refs[i] != NULL) {
1483 still_long_live++;
1484 }
1485 # endif
1486 }
1487 i = finalizable_count - finalized_count - still_live;
1488 if (0 != i) {
1489 GC_printf("%d disappearing links remain and %d more objects "
1490 "were not finalized\n", still_live, i);
1491 if (i > 10) {
1492 GC_printf("\tVery suspicious!\n");
1493 } else {
1494 GC_printf("\tSlightly suspicious, but probably OK\n");
1495 }
1496 }
1497 # ifndef GC_LONG_REFS_NOT_NEEDED
1498 if (0 != still_long_live) {
1499 GC_printf("%d 'long' links remain\n", still_long_live);
1500 }
1501 # endif
1502 # endif
1503 GC_printf("Total number of bytes allocated is %lu\n",
1504 (unsigned long)GC_get_total_bytes());
1505 GC_printf("Final heap size is %lu bytes\n",
1506 (unsigned long)GC_get_heap_size());
1507 if (GC_get_total_bytes() < (size_t)n_tests *
1508 # ifdef VERY_SMALL_CONFIG
1509 2700000
1510 # else
1511 33500000
1512 # endif
1513 ) {
1514 GC_printf("Incorrect execution - missed some allocations\n");
1515 FAIL;
1516 }
1517 if (GC_get_heap_size() + GC_get_unmapped_bytes() > max_heap_sz) {
1518 GC_printf("Unexpected heap growth - collector may be broken"
1519 " (heapsize: %lu, expected: %lu)\n",
1520 (unsigned long)(GC_get_heap_size() + GC_get_unmapped_bytes()),
1521 (unsigned long)max_heap_sz);
1522 FAIL;
1523 }
1524
1525 # ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
1526 /* Get global counters (just to check the functions work). */
1527 GC_get_heap_usage_safe(NULL, NULL, NULL, NULL, NULL);
1528 {
1529 struct GC_prof_stats_s stats;
1530 (void)GC_get_prof_stats(&stats, sizeof(stats));
1531 # ifdef THREADS
1532 (void)GC_get_prof_stats_unsafe(&stats, sizeof(stats));
1533 # endif
1534 }
1535 # endif
1536
1537 # ifdef THREADS
1538 GC_unregister_my_thread(); /* just to check it works (for main) */
1539 # endif
1540 GC_printf("Completed %u collections", (unsigned)GC_get_gc_no());
1541 # ifdef PARALLEL_MARK
1542 GC_printf(" (using %d marker threads)", GC_get_parallel() + 1);
1543 # endif
1544 GC_printf("\n" "Collector appears to work\n");
1545 }
1546
1547 #if defined(MACOS)
SetMinimumStack(long minSize)1548 void SetMinimumStack(long minSize)
1549 {
1550 long newApplLimit;
1551
1552 if (minSize > LMGetDefltStack())
1553 {
1554 newApplLimit = (long) GetApplLimit()
1555 - (minSize - LMGetDefltStack());
1556 SetApplLimit((Ptr) newApplLimit);
1557 MaxApplZone();
1558 }
1559 }
1560
1561 #define cMinStackSpace (512L * 1024L)
1562
1563 #endif
1564
warn_proc(char * msg,GC_word p)1565 void GC_CALLBACK warn_proc(char *msg, GC_word p)
1566 {
1567 GC_printf(msg, (unsigned long)p);
1568 /*FAIL;*/
1569 }
1570
1571 #if defined(MSWINCE) && defined(UNDER_CE)
1572 # define WINMAIN_LPTSTR LPWSTR
1573 #else
1574 # define WINMAIN_LPTSTR LPSTR
1575 #endif
1576
1577 #if !defined(PCR) && !defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS) \
1578 || defined(LINT)
1579 #if defined(MSWIN32) && !defined(__MINGW32__) || defined(MSWINCE)
WinMain(HINSTANCE instance GC_ATTR_UNUSED,HINSTANCE prev GC_ATTR_UNUSED,WINMAIN_LPTSTR cmd GC_ATTR_UNUSED,int n GC_ATTR_UNUSED)1580 int APIENTRY WinMain(HINSTANCE instance GC_ATTR_UNUSED,
1581 HINSTANCE prev GC_ATTR_UNUSED,
1582 WINMAIN_LPTSTR cmd GC_ATTR_UNUSED,
1583 int n GC_ATTR_UNUSED)
1584 #elif defined(RTEMS)
1585 # include <bsp.h>
1586 # define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
1587 # define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
1588 # define CONFIGURE_RTEMS_INIT_TASKS_TABLE
1589 # define CONFIGURE_MAXIMUM_TASKS 1
1590 # define CONFIGURE_INIT
1591 # define CONFIGURE_INIT_TASK_STACK_SIZE (64*1024)
1592 # include <rtems/confdefs.h>
1593 rtems_task Init(rtems_task_argument ignord)
1594 #else
1595 int main(void)
1596 #endif
1597 {
1598 n_tests = 0;
1599 # if defined(MACOS)
1600 /* Make sure we have lots and lots of stack space. */
1601 SetMinimumStack(cMinStackSpace);
1602 /* Cheat and let stdio initialize toolbox for us. */
1603 printf("Testing GC Macintosh port\n");
1604 # endif
1605 GC_COND_INIT();
1606 GC_set_warn_proc(warn_proc);
1607 # if (defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(GWW_VDB)) \
1608 && !defined(MAKE_BACK_GRAPH) && !defined(NO_INCREMENTAL)
1609 GC_enable_incremental();
1610 GC_printf("Switched to incremental mode\n");
1611 # if defined(MPROTECT_VDB)
1612 GC_printf("Emulating dirty bits with mprotect/signals\n");
1613 # else
1614 # ifdef PROC_VDB
1615 GC_printf("Reading dirty bits from /proc\n");
1616 # elif defined(GWW_VDB)
1617 GC_printf("Using GetWriteWatch-based implementation\n");
1618 # else
1619 GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
1620 # endif
1621 # endif
1622 # endif
1623 run_one_test();
1624 check_heap_stats();
1625 # ifndef MSWINCE
1626 fflush(stdout);
1627 # endif
1628 # ifdef MSWIN32
1629 GC_win32_free_heap();
1630 # endif
1631 # ifdef RTEMS
1632 exit(0);
1633 # else
1634 return(0);
1635 # endif
1636 }
1637 # endif
1638
1639 #if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
1640
thr_run_one_test(void * arg GC_ATTR_UNUSED)1641 DWORD __stdcall thr_run_one_test(void * arg GC_ATTR_UNUSED)
1642 {
1643 run_one_test();
1644 return 0;
1645 }
1646
1647 #ifdef MSWINCE
1648 HANDLE win_created_h;
1649 HWND win_handle;
1650
window_proc(HWND hwnd,UINT uMsg,WPARAM wParam,LPARAM lParam)1651 LRESULT CALLBACK window_proc(HWND hwnd, UINT uMsg, WPARAM wParam,
1652 LPARAM lParam)
1653 {
1654 LRESULT ret = 0;
1655 switch (uMsg) {
1656 case WM_HIBERNATE:
1657 GC_printf("Received WM_HIBERNATE, calling GC_gcollect\n");
1658 /* Force "unmap as much memory as possible" mode. */
1659 GC_gcollect_and_unmap();
1660 break;
1661 case WM_CLOSE:
1662 GC_printf("Received WM_CLOSE, closing window\n");
1663 DestroyWindow(hwnd);
1664 break;
1665 case WM_DESTROY:
1666 PostQuitMessage(0);
1667 break;
1668 default:
1669 ret = DefWindowProc(hwnd, uMsg, wParam, lParam);
1670 break;
1671 }
1672 return ret;
1673 }
1674
thr_window(void * arg GC_ATTR_UNUSED)1675 DWORD __stdcall thr_window(void * arg GC_ATTR_UNUSED)
1676 {
1677 WNDCLASS win_class = {
1678 CS_NOCLOSE,
1679 window_proc,
1680 0,
1681 0,
1682 GetModuleHandle(NULL),
1683 NULL,
1684 NULL,
1685 (HBRUSH)(COLOR_APPWORKSPACE+1),
1686 NULL,
1687 TEXT("GCtestWindow")
1688 };
1689 MSG msg;
1690
1691 if (!RegisterClass(&win_class))
1692 FAIL;
1693
1694 win_handle = CreateWindowEx(
1695 0,
1696 TEXT("GCtestWindow"),
1697 TEXT("GCtest"),
1698 0,
1699 CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT,
1700 NULL,
1701 NULL,
1702 GetModuleHandle(NULL),
1703 NULL);
1704
1705 if (win_handle == NULL)
1706 FAIL;
1707
1708 SetEvent(win_created_h);
1709
1710 ShowWindow(win_handle, SW_SHOW);
1711 UpdateWindow(win_handle);
1712
1713 while (GetMessage(&msg, NULL, 0, 0)) {
1714 TranslateMessage(&msg);
1715 DispatchMessage(&msg);
1716 }
1717
1718 return 0;
1719 }
1720 #endif
1721
WinMain(HINSTANCE instance GC_ATTR_UNUSED,HINSTANCE prev GC_ATTR_UNUSED,WINMAIN_LPTSTR cmd GC_ATTR_UNUSED,int n GC_ATTR_UNUSED)1722 int APIENTRY WinMain(HINSTANCE instance GC_ATTR_UNUSED,
1723 HINSTANCE prev GC_ATTR_UNUSED,
1724 WINMAIN_LPTSTR cmd GC_ATTR_UNUSED,
1725 int n GC_ATTR_UNUSED)
1726 {
1727 # if NTHREADS > 0
1728 HANDLE h[NTHREADS];
1729 int i;
1730 # endif
1731 # ifdef MSWINCE
1732 HANDLE win_thr_h;
1733 # endif
1734 DWORD thread_id;
1735 # if defined(GC_DLL) && !defined(GC_NO_THREADS_DISCOVERY) \
1736 && !defined(MSWINCE) && !defined(THREAD_LOCAL_ALLOC) \
1737 && !defined(PARALLEL_MARK)
1738 GC_use_threads_discovery();
1739 /* Test with implicit thread registration if possible. */
1740 GC_printf("Using DllMain to track threads\n");
1741 # endif
1742 GC_COND_INIT();
1743 # if !defined(MAKE_BACK_GRAPH) && !defined(NO_INCREMENTAL)
1744 GC_enable_incremental();
1745 # endif
1746 InitializeCriticalSection(&incr_cs);
1747 GC_set_warn_proc(warn_proc);
1748 # ifdef MSWINCE
1749 win_created_h = CreateEvent(NULL, FALSE, FALSE, NULL);
1750 if (win_created_h == (HANDLE)NULL) {
1751 GC_printf("Event creation failed %d\n", (int)GetLastError());
1752 FAIL;
1753 }
1754 win_thr_h = GC_CreateThread(NULL, 0, thr_window, 0, 0, &thread_id);
1755 if (win_thr_h == (HANDLE)NULL) {
1756 GC_printf("Thread creation failed %d\n", (int)GetLastError());
1757 FAIL;
1758 }
1759 if (WaitForSingleObject(win_created_h, INFINITE) != WAIT_OBJECT_0)
1760 FAIL;
1761 CloseHandle(win_created_h);
1762 # endif
1763 # if NTHREADS > 0
1764 for (i = 0; i < NTHREADS; i++) {
1765 h[i] = GC_CreateThread(NULL, 0, thr_run_one_test, 0, 0, &thread_id);
1766 if (h[i] == (HANDLE)NULL) {
1767 GC_printf("Thread creation failed %d\n", (int)GetLastError());
1768 FAIL;
1769 }
1770 }
1771 # endif /* NTHREADS > 0 */
1772 run_one_test();
1773 # if NTHREADS > 0
1774 for (i = 0; i < NTHREADS; i++) {
1775 if (WaitForSingleObject(h[i], INFINITE) != WAIT_OBJECT_0) {
1776 GC_printf("Thread wait failed %d\n", (int)GetLastError());
1777 FAIL;
1778 }
1779 }
1780 # endif /* NTHREADS > 0 */
1781 # ifdef MSWINCE
1782 PostMessage(win_handle, WM_CLOSE, 0, 0);
1783 if (WaitForSingleObject(win_thr_h, INFINITE) != WAIT_OBJECT_0)
1784 FAIL;
1785 # endif
1786 check_heap_stats();
1787 return(0);
1788 }
1789
1790 #endif /* GC_WIN32_THREADS */
1791
1792
1793 #ifdef PCR
test(void)1794 int test(void)
1795 {
1796 PCR_Th_T * th1;
1797 PCR_Th_T * th2;
1798 int code;
1799
1800 n_tests = 0;
1801 /* GC_enable_incremental(); */
1802 GC_set_warn_proc(warn_proc);
1803 th1 = PCR_Th_Fork(run_one_test, 0);
1804 th2 = PCR_Th_Fork(run_one_test, 0);
1805 run_one_test();
1806 if (PCR_Th_T_Join(th1, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1807 != PCR_ERes_okay || code != 0) {
1808 GC_printf("Thread 1 failed\n");
1809 }
1810 if (PCR_Th_T_Join(th2, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1811 != PCR_ERes_okay || code != 0) {
1812 GC_printf("Thread 2 failed\n");
1813 }
1814 check_heap_stats();
1815 return(0);
1816 }
1817 #endif
1818
1819 #if defined(GC_PTHREADS)
thr_run_one_test(void * arg GC_ATTR_UNUSED)1820 void * thr_run_one_test(void * arg GC_ATTR_UNUSED)
1821 {
1822 run_one_test();
1823 return(0);
1824 }
1825
1826 #ifdef GC_DEBUG
1827 # define GC_free GC_debug_free
1828 #endif
1829
main(void)1830 int main(void)
1831 {
1832 pthread_t th[NTHREADS];
1833 pthread_attr_t attr;
1834 int code;
1835 int i;
1836 # ifdef GC_IRIX_THREADS
1837 /* Force a larger stack to be preallocated */
1838 /* Since the initial can't always grow later. */
1839 *((volatile char *)&code - 1024*1024) = 0; /* Require 1 MB */
1840 # endif /* GC_IRIX_THREADS */
1841 # if defined(GC_HPUX_THREADS)
1842 /* Default stack size is too small, especially with the 64 bit ABI */
1843 /* Increase it. */
1844 if (pthread_default_stacksize_np(1024*1024, 0) != 0) {
1845 GC_printf("pthread_default_stacksize_np failed\n");
1846 }
1847 # endif /* GC_HPUX_THREADS */
1848 # ifdef PTW32_STATIC_LIB
1849 pthread_win32_process_attach_np ();
1850 pthread_win32_thread_attach_np ();
1851 # endif
1852 # if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY) \
1853 && !defined(DARWIN_DONT_PARSE_STACK) && !defined(THREAD_LOCAL_ALLOC)
1854 /* Test with the Darwin implicit thread registration. */
1855 GC_use_threads_discovery();
1856 GC_printf("Using Darwin task-threads-based world stop and push\n");
1857 # endif
1858 GC_COND_INIT();
1859
1860 if ((code = pthread_attr_init(&attr)) != 0) {
1861 GC_printf("pthread_attr_init failed, error=%d\n", code);
1862 FAIL;
1863 }
1864 # if defined(GC_IRIX_THREADS) || defined(GC_FREEBSD_THREADS) \
1865 || defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS) \
1866 || defined(GC_OPENBSD_THREADS)
1867 if ((code = pthread_attr_setstacksize(&attr, 1000 * 1024)) != 0) {
1868 GC_printf("pthread_attr_setstacksize failed, error=%d\n", code);
1869 FAIL;
1870 }
1871 # endif
1872 n_tests = 0;
1873 # if (defined(MPROTECT_VDB)) && !defined(REDIRECT_MALLOC) \
1874 && !defined(MAKE_BACK_GRAPH) && !defined(USE_PROC_FOR_LIBRARIES) \
1875 && !defined(NO_INCREMENTAL)
1876 GC_enable_incremental();
1877 GC_printf("Switched to incremental mode\n");
1878 # if defined(MPROTECT_VDB)
1879 GC_printf("Emulating dirty bits with mprotect/signals\n");
1880 # else
1881 # ifdef PROC_VDB
1882 GC_printf("Reading dirty bits from /proc\n");
1883 # else
1884 GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
1885 # endif
1886 # endif
1887 # endif
1888 GC_set_warn_proc(warn_proc);
1889 if ((code = pthread_key_create(&fl_key, 0)) != 0) {
1890 GC_printf("Key creation failed %d\n", code);
1891 FAIL;
1892 }
1893 for (i = 0; i < NTHREADS; ++i) {
1894 if ((code = pthread_create(th+i, &attr, thr_run_one_test, 0)) != 0) {
1895 GC_printf("Thread %d creation failed %d\n", i, code);
1896 FAIL;
1897 }
1898 }
1899 run_one_test();
1900 for (i = 0; i < NTHREADS; ++i) {
1901 if ((code = pthread_join(th[i], 0)) != 0) {
1902 GC_printf("Thread %d failed %d\n", i, code);
1903 FAIL;
1904 }
1905 }
1906 check_heap_stats();
1907 (void)fflush(stdout);
1908 (void)pthread_attr_destroy(&attr);
1909 # ifdef PTW32_STATIC_LIB
1910 pthread_win32_thread_detach_np ();
1911 pthread_win32_process_detach_np ();
1912 # endif
1913 return(0);
1914 }
1915 #endif /* GC_PTHREADS */
1916