1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 *
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 *
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
14 */
15 /* An incomplete test for the garbage collector. */
16 /* Some more obscure entry points are not tested at all. */
17 /* This must be compiled with the same flags used to build the */
18 /* GC. It uses GC internals to allow more precise results */
19 /* checking for some of the tests. */
20
21 # ifdef HAVE_CONFIG_H
22 # include "private/config.h"
23 # endif
24
25 # undef GC_BUILD
26
27 #if (defined(DBG_HDRS_ALL) || defined(MAKE_BACK_GRAPH)) && !defined(GC_DEBUG)
28 # define GC_DEBUG
29 #endif
30
31 #include "gc.h"
32
33 #ifndef NTHREADS /* Number of additional threads to fork. */
34 # define NTHREADS 5 /* excludes main thread, which also runs a test. */
35 /* Not respected by PCR test. */
36 #endif
37
38 # if defined(mips) && defined(SYSTYPE_BSD43)
39 /* MIPS RISCOS 4 */
40 # else
41 # include <stdlib.h>
42 # endif
43 # include <stdio.h>
44 # if defined(_WIN32_WCE) && !defined(__GNUC__)
45 # include <winbase.h>
46 /* # define assert ASSERT */
47 # else
48 # include <assert.h> /* Not normally used, but handy for debugging. */
49 # endif
50
51 # include "gc_typed.h"
52 # include "private/gc_priv.h" /* For output, locking, MIN_WORDS, */
53 /* some statistics and gcconfig.h. */
54
55 # if defined(MSWIN32) || defined(MSWINCE)
56 # include <windows.h>
57 # endif
58
59 # ifdef GC_DLL
60 # ifdef GC_PRINT_VERBOSE_STATS
61 # define GC_print_stats VERBOSE
62 # else
63 # define GC_print_stats 0 /* Not exported from DLL */
64 /* Redefine to 1 to generate output. */
65 # endif
66 # endif
67
68 # ifdef PCR
69 # include "th/PCR_ThCrSec.h"
70 # include "th/PCR_Th.h"
71 # define GC_printf printf
72 # endif
73
74 # if defined(GC_PTHREADS)
75 # include <pthread.h>
76 # endif
77
78 # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
79 static CRITICAL_SECTION incr_cs;
80 # endif
81
82 # include <stdarg.h>
83
84 /* Call GC_INIT only on platforms on which we think we really need it, */
85 /* so that we can test automatic initialization on the rest. */
86 #if defined(CYGWIN32) || defined (AIX) || defined(DARWIN) \
87 || defined(THREAD_LOCAL_ALLOC) \
88 || (defined(MSWINCE) && !defined(GC_WINMAIN_REDIRECT))
89 # define GC_COND_INIT() GC_INIT()
90 #else
91 # define GC_COND_INIT()
92 #endif
93
94 /* Allocation Statistics. Incremented without synchronization. */
95 /* FIXME: We should be using synchronization. */
96 int stubborn_count = 0;
97 int uncollectable_count = 0;
98 int collectable_count = 0;
99 int atomic_count = 0;
100 int realloc_count = 0;
101
102 #if defined(GC_AMIGA_FASTALLOC) && defined(AMIGA)
103
104 void GC_amiga_free_all_mem(void);
Amiga_Fail(void)105 void Amiga_Fail(void){GC_amiga_free_all_mem();abort();}
106 # define FAIL (void)Amiga_Fail()
GC_amiga_gctest_malloc_explicitly_typed(size_t lb,GC_descr d)107 void *GC_amiga_gctest_malloc_explicitly_typed(size_t lb, GC_descr d){
108 void *ret=GC_malloc_explicitly_typed(lb,d);
109 if(ret==NULL){
110 if(!GC_dont_gc){
111 GC_gcollect();
112 ret=GC_malloc_explicitly_typed(lb,d);
113 }
114 if(ret==NULL){
115 GC_printf("Out of memory, (typed allocations are not directly "
116 "supported with the GC_AMIGA_FASTALLOC option.)\n");
117 FAIL;
118 }
119 }
120 return ret;
121 }
GC_amiga_gctest_calloc_explicitly_typed(size_t a,size_t lb,GC_descr d)122 void *GC_amiga_gctest_calloc_explicitly_typed(size_t a,size_t lb, GC_descr d){
123 void *ret=GC_calloc_explicitly_typed(a,lb,d);
124 if(ret==NULL){
125 if(!GC_dont_gc){
126 GC_gcollect();
127 ret=GC_calloc_explicitly_typed(a,lb,d);
128 }
129 if(ret==NULL){
130 GC_printf("Out of memory, (typed allocations are not directly "
131 "supported with the GC_AMIGA_FASTALLOC option.)\n");
132 FAIL;
133 }
134 }
135 return ret;
136 }
137 # define GC_malloc_explicitly_typed(a,b) GC_amiga_gctest_malloc_explicitly_typed(a,b)
138 # define GC_calloc_explicitly_typed(a,b,c) GC_amiga_gctest_calloc_explicitly_typed(a,b,c)
139
140 #else /* !AMIGA_FASTALLOC */
141
142 # ifdef PCR
143 # define FAIL (void)abort()
144 # else
145 # define FAIL ABORT("Test failed")
146 # endif
147
148 #endif /* !AMIGA_FASTALLOC */
149
150 /* AT_END may be defined to exercise the interior pointer test */
151 /* if the collector is configured with ALL_INTERIOR_POINTERS. */
152 /* As it stands, this test should succeed with either */
153 /* configuration. In the FIND_LEAK configuration, it should */
154 /* find lots of leaks, since we free almost nothing. */
155
156 struct SEXPR {
157 struct SEXPR * sexpr_car;
158 struct SEXPR * sexpr_cdr;
159 };
160
161
162 typedef struct SEXPR * sexpr;
163
164 # define INT_TO_SEXPR(x) ((sexpr)(GC_word)(x))
165 # define SEXPR_TO_INT(x) ((int)(GC_word)(x))
166
167 # undef nil
168 # define nil (INT_TO_SEXPR(0))
169 # define car(x) ((x) -> sexpr_car)
170 # define cdr(x) ((x) -> sexpr_cdr)
171 # define is_nil(x) ((x) == nil)
172
173
174 int extra_count = 0; /* Amount of space wasted in cons node */
175
176 /* Silly implementation of Lisp cons. Intentionally wastes lots of space */
177 /* to test collector. */
178 # ifdef VERY_SMALL_CONFIG
179 # define cons small_cons
180 # else
cons(sexpr x,sexpr y)181 sexpr cons (sexpr x, sexpr y)
182 {
183 sexpr r;
184 int *p;
185 int my_extra = extra_count;
186
187 stubborn_count++;
188 r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
189 if (r == 0) {
190 GC_printf("Out of memory\n");
191 exit(1);
192 }
193 for (p = (int *)r;
194 ((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
195 if (*p) {
196 GC_printf("Found nonzero at %p - allocator is broken\n", p);
197 FAIL;
198 }
199 *p = (int)((13 << 12) + ((p - (int *)r) & 0xfff));
200 }
201 # ifdef AT_END
202 r = (sexpr)((char *)r + (my_extra & ~7));
203 # endif
204 r -> sexpr_car = x;
205 r -> sexpr_cdr = y;
206 my_extra++;
207 if ( my_extra >= 5000 ) {
208 extra_count = 0;
209 } else {
210 extra_count = my_extra;
211 }
212 GC_END_STUBBORN_CHANGE((char *)r);
213 return(r);
214 }
215 # endif
216
217 #ifdef GC_GCJ_SUPPORT
218
219 #include "gc_mark.h"
220 #include "gc_gcj.h"
221
222 /* The following struct emulates the vtable in gcj. */
223 /* This assumes the default value of MARK_DESCR_OFFSET. */
224 struct fake_vtable {
225 void * dummy; /* class pointer in real gcj. */
226 GC_word descr;
227 };
228
229 struct fake_vtable gcj_class_struct1 = { 0, sizeof(struct SEXPR)
230 + sizeof(struct fake_vtable *) };
231 /* length based descriptor. */
232 struct fake_vtable gcj_class_struct2 =
233 { 0, ((GC_word)3 << (CPP_WORDSZ - 3)) | GC_DS_BITMAP};
234 /* Bitmap based descriptor. */
235
fake_gcj_mark_proc(word * addr,struct GC_ms_entry * mark_stack_ptr,struct GC_ms_entry * mark_stack_limit,word env)236 struct GC_ms_entry * fake_gcj_mark_proc(word * addr,
237 struct GC_ms_entry *mark_stack_ptr,
238 struct GC_ms_entry *mark_stack_limit,
239 word env )
240 {
241 sexpr x;
242 if (1 == env) {
243 /* Object allocated with debug allocator. */
244 addr = (word *)GC_USR_PTR_FROM_BASE(addr);
245 }
246 x = (sexpr)(addr + 1); /* Skip the vtable pointer. */
247 mark_stack_ptr = GC_MARK_AND_PUSH(
248 (void *)(x -> sexpr_cdr), mark_stack_ptr,
249 mark_stack_limit, (void * *)&(x -> sexpr_cdr));
250 mark_stack_ptr = GC_MARK_AND_PUSH(
251 (void *)(x -> sexpr_car), mark_stack_ptr,
252 mark_stack_limit, (void * *)&(x -> sexpr_car));
253 return(mark_stack_ptr);
254 }
255
256 #endif /* GC_GCJ_SUPPORT */
257
258
small_cons(sexpr x,sexpr y)259 sexpr small_cons (sexpr x, sexpr y)
260 {
261 sexpr r;
262
263 collectable_count++;
264 r = (sexpr) GC_MALLOC(sizeof(struct SEXPR));
265 if (r == 0) {
266 GC_printf("Out of memory\n");
267 exit(1);
268 }
269 r -> sexpr_car = x;
270 r -> sexpr_cdr = y;
271 return(r);
272 }
273
small_cons_uncollectable(sexpr x,sexpr y)274 sexpr small_cons_uncollectable (sexpr x, sexpr y)
275 {
276 sexpr r;
277
278 uncollectable_count++;
279 r = (sexpr) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
280 if (r == 0) {
281 GC_printf("Out of memory\n");
282 exit(1);
283 }
284 r -> sexpr_car = x;
285 r -> sexpr_cdr = (sexpr)(~(GC_word)y);
286 return(r);
287 }
288
289 #ifdef GC_GCJ_SUPPORT
290
291
gcj_cons(sexpr x,sexpr y)292 sexpr gcj_cons(sexpr x, sexpr y)
293 {
294 GC_word * r;
295 sexpr result;
296
297 r = (GC_word *) GC_GCJ_MALLOC(sizeof(struct SEXPR)
298 + sizeof(struct fake_vtable*),
299 &gcj_class_struct2);
300 if (r == 0) {
301 GC_printf("Out of memory\n");
302 exit(1);
303 }
304 result = (sexpr)(r + 1);
305 result -> sexpr_car = x;
306 result -> sexpr_cdr = y;
307 return(result);
308 }
309 #endif
310
311 /* Return reverse(x) concatenated with y */
reverse1(sexpr x,sexpr y)312 sexpr reverse1(sexpr x, sexpr y)
313 {
314 if (is_nil(x)) {
315 return(y);
316 } else {
317 return( reverse1(cdr(x), cons(car(x), y)) );
318 }
319 }
320
reverse(sexpr x)321 sexpr reverse(sexpr x)
322 {
323 # ifdef TEST_WITH_SYSTEM_MALLOC
324 malloc(100000);
325 # endif
326 return( reverse1(x, nil) );
327 }
328
ints(int low,int up)329 sexpr ints(int low, int up)
330 {
331 if (low > up) {
332 return(nil);
333 } else {
334 return(small_cons(small_cons(INT_TO_SEXPR(low), nil), ints(low+1, up)));
335 }
336 }
337
338 #ifdef GC_GCJ_SUPPORT
339 /* Return reverse(x) concatenated with y */
gcj_reverse1(sexpr x,sexpr y)340 sexpr gcj_reverse1(sexpr x, sexpr y)
341 {
342 if (is_nil(x)) {
343 return(y);
344 } else {
345 return( gcj_reverse1(cdr(x), gcj_cons(car(x), y)) );
346 }
347 }
348
gcj_reverse(sexpr x)349 sexpr gcj_reverse(sexpr x)
350 {
351 return( gcj_reverse1(x, nil) );
352 }
353
gcj_ints(int low,int up)354 sexpr gcj_ints(int low, int up)
355 {
356 if (low > up) {
357 return(nil);
358 } else {
359 return(gcj_cons(gcj_cons(INT_TO_SEXPR(low), nil), gcj_ints(low+1, up)));
360 }
361 }
362 #endif /* GC_GCJ_SUPPORT */
363
364 /* To check uncollectable allocation we build lists with disguised cdr */
365 /* pointers, and make sure they don't go away. */
uncollectable_ints(int low,int up)366 sexpr uncollectable_ints(int low, int up)
367 {
368 if (low > up) {
369 return(nil);
370 } else {
371 return(small_cons_uncollectable(small_cons(INT_TO_SEXPR(low), nil),
372 uncollectable_ints(low+1, up)));
373 }
374 }
375
check_ints(sexpr list,int low,int up)376 void check_ints(sexpr list, int low, int up)
377 {
378 if (SEXPR_TO_INT(car(car(list))) != low) {
379 GC_printf(
380 "List reversal produced incorrect list - collector is broken\n");
381 FAIL;
382 }
383 if (low == up) {
384 if (cdr(list) != nil) {
385 GC_printf("List too long - collector is broken\n");
386 FAIL;
387 }
388 } else {
389 check_ints(cdr(list), low+1, up);
390 }
391 }
392
393 # define UNCOLLECTABLE_CDR(x) (sexpr)(~(GC_word)(cdr(x)))
394
check_uncollectable_ints(sexpr list,int low,int up)395 void check_uncollectable_ints(sexpr list, int low, int up)
396 {
397 if (SEXPR_TO_INT(car(car(list))) != low) {
398 GC_printf("Uncollectable list corrupted - collector is broken\n");
399 FAIL;
400 }
401 if (low == up) {
402 if (UNCOLLECTABLE_CDR(list) != nil) {
403 GC_printf("Uncollectable list too long - collector is broken\n");
404 FAIL;
405 }
406 } else {
407 check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
408 }
409 }
410
411 /* Not used, but useful for debugging: */
print_int_list(sexpr x)412 void print_int_list(sexpr x)
413 {
414 if (is_nil(x)) {
415 GC_printf("NIL\n");
416 } else {
417 GC_printf("(%d)", SEXPR_TO_INT(car(car(x))));
418 if (!is_nil(cdr(x))) {
419 GC_printf(", ");
420 print_int_list(cdr(x));
421 } else {
422 GC_printf("\n");
423 }
424 }
425 }
426
427 /* ditto: */
check_marks_int_list(sexpr x)428 void check_marks_int_list(sexpr x)
429 {
430 if (!GC_is_marked((ptr_t)x))
431 GC_printf("[unm:%p]", x);
432 else
433 GC_printf("[mkd:%p]", x);
434 if (is_nil(x)) {
435 GC_printf("NIL\n");
436 } else {
437 if (!GC_is_marked((ptr_t)car(x)))
438 GC_printf("[unm car:%p]", car(x));
439 GC_printf("(%d)", SEXPR_TO_INT(car(car(x))));
440 if (!is_nil(cdr(x))) {
441 GC_printf(", ");
442 check_marks_int_list(cdr(x));
443 } else {
444 GC_printf("\n");
445 }
446 }
447 }
448
449 /*
450 * A tiny list reversal test to check thread creation.
451 */
452 #ifdef THREADS
453
454 # ifdef VERY_SMALL_CONFIG
455 # define TINY_REVERSE_UPPER_VALUE 4
456 # else
457 # define TINY_REVERSE_UPPER_VALUE 10
458 # endif
459
460 # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
tiny_reverse_test(void * arg)461 DWORD __stdcall tiny_reverse_test(void * arg)
462 # else
463 void * tiny_reverse_test(void * arg)
464 # endif
465 {
466 int i;
467 for (i = 0; i < 5; ++i) {
468 check_ints(reverse(reverse(ints(1, TINY_REVERSE_UPPER_VALUE))),
469 1, TINY_REVERSE_UPPER_VALUE);
470 }
471 return 0;
472 }
473
474 # if defined(GC_PTHREADS)
fork_a_thread(void)475 void fork_a_thread(void)
476 {
477 pthread_t t;
478 int code;
479 if ((code = pthread_create(&t, 0, tiny_reverse_test, 0)) != 0) {
480 GC_printf("Small thread creation failed %d\n", code);
481 FAIL;
482 }
483 if ((code = pthread_join(t, 0)) != 0) {
484 GC_printf("Small thread join failed %d\n", code);
485 FAIL;
486 }
487 }
488
489 # elif defined(GC_WIN32_THREADS)
fork_a_thread(void)490 void fork_a_thread(void)
491 {
492 DWORD thread_id;
493 HANDLE h;
494 h = GC_CreateThread(NULL, 0, tiny_reverse_test, 0, 0, &thread_id);
495 if (h == (HANDLE)NULL) {
496 GC_printf("Small thread creation failed %d\n",
497 (int)GetLastError());
498 FAIL;
499 }
500 if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
501 GC_printf("Small thread wait failed %d\n",
502 (int)GetLastError());
503 FAIL;
504 }
505 }
506
507 # endif
508
509 #endif
510
511 /* Try to force a to be strangely aligned */
512 struct {
513 char dummy;
514 sexpr aa;
515 } A;
516 #define a A.aa
517
518 /*
519 * Repeatedly reverse lists built out of very different sized cons cells.
520 * Check that we didn't lose anything.
521 */
reverse_test_inner(void * data)522 void *GC_CALLBACK reverse_test_inner(void *data)
523 {
524 int i;
525 sexpr b;
526 sexpr c;
527 sexpr d;
528 sexpr e;
529 sexpr *f, *g, *h;
530
531 if (data == 0) {
532 /* This stack frame is not guaranteed to be scanned. */
533 return GC_call_with_gc_active(reverse_test_inner, (void*)(word)1);
534 }
535
536 # if /*defined(MSWIN32) ||*/ defined(MACOS)
537 /* Win32S only allows 128K stacks */
538 # define BIG 1000
539 # elif defined(PCR)
540 /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
541 # define BIG 700
542 # elif defined(MSWINCE) || defined(RTEMS)
543 /* WinCE only allows 64K stacks */
544 # define BIG 500
545 # elif defined(OSF1)
546 /* OSF has limited stack space by default, and large frames. */
547 # define BIG 200
548 # elif defined(__MACH__) && defined(__ppc64__)
549 # define BIG 2500
550 # else
551 # define BIG 4500
552 # endif
553
554 A.dummy = 17;
555 a = ints(1, 49);
556 b = ints(1, 50);
557 c = ints(1, BIG);
558 d = uncollectable_ints(1, 100);
559 e = uncollectable_ints(1, 1);
560 /* Check that realloc updates object descriptors correctly */
561 collectable_count++;
562 f = (sexpr *)GC_MALLOC(4 * sizeof(sexpr));
563 realloc_count++;
564 f = (sexpr *)GC_REALLOC((void *)f, 6 * sizeof(sexpr));
565 f[5] = ints(1,17);
566 collectable_count++;
567 g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
568 realloc_count++;
569 g = (sexpr *)GC_REALLOC((void *)g, 800 * sizeof(sexpr));
570 g[799] = ints(1,18);
571 collectable_count++;
572 h = (sexpr *)GC_MALLOC(1025 * sizeof(sexpr));
573 realloc_count++;
574 h = (sexpr *)GC_REALLOC((void *)h, 2000 * sizeof(sexpr));
575 # ifdef GC_GCJ_SUPPORT
576 h[1999] = gcj_ints(1,200);
577 for (i = 0; i < 51; ++i)
578 h[1999] = gcj_reverse(h[1999]);
579 /* Leave it as the reveresed list for now. */
580 # else
581 h[1999] = ints(1,200);
582 # endif
583 /* Try to force some collections and reuse of small list elements */
584 for (i = 0; i < 10; i++) {
585 (void)ints(1, BIG);
586 }
587 /* Superficially test interior pointer recognition on stack */
588 c = (sexpr)((char *)c + sizeof(char *));
589 d = (sexpr)((char *)d + sizeof(char *));
590
591 GC_FREE((void *)e);
592
593 check_ints(b,1,50);
594 check_ints(a,1,49);
595 for (i = 0; i < 50; i++) {
596 check_ints(b,1,50);
597 b = reverse(reverse(b));
598 }
599 check_ints(b,1,50);
600 check_ints(a,1,49);
601 for (i = 0; i < 60; i++) {
602 # if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
603 if (i % 10 == 0) fork_a_thread();
604 # endif
605 /* This maintains the invariant that a always points to a list of */
606 /* 49 integers. Thus this is thread safe without locks, */
607 /* assuming atomic pointer assignments. */
608 a = reverse(reverse(a));
609 # if !defined(AT_END) && !defined(THREADS)
610 /* This is not thread safe, since realloc explicitly deallocates */
611 if (i & 1) {
612 a = (sexpr)GC_REALLOC((void *)a, 500);
613 } else {
614 a = (sexpr)GC_REALLOC((void *)a, 8200);
615 }
616 # endif
617 }
618 check_ints(a,1,49);
619 check_ints(b,1,50);
620
621 /* Restore c and d values. */
622 c = (sexpr)((char *)c - sizeof(char *));
623 d = (sexpr)((char *)d - sizeof(char *));
624
625 check_ints(c,1,BIG);
626 check_uncollectable_ints(d, 1, 100);
627 check_ints(f[5], 1,17);
628 check_ints(g[799], 1,18);
629 # ifdef GC_GCJ_SUPPORT
630 h[1999] = gcj_reverse(h[1999]);
631 # endif
632 check_ints(h[1999], 1,200);
633 # ifndef THREADS
634 a = 0;
635 # endif
636 *(volatile void **)&b = 0;
637 *(volatile void **)&c = 0;
638 return 0;
639 }
640
reverse_test(void)641 void reverse_test(void)
642 {
643 /* Test GC_do_blocking/GC_call_with_gc_active. */
644 (void)GC_do_blocking(reverse_test_inner, 0);
645 }
646
647 #undef a
648
649 /*
650 * The rest of this builds balanced binary trees, checks that they don't
651 * disappear, and tests finalization.
652 */
653 typedef struct treenode {
654 int level;
655 struct treenode * lchild;
656 struct treenode * rchild;
657 } tn;
658
659 int finalizable_count = 0;
660 int finalized_count = 0;
661 volatile int dropped_something = 0;
662
finalizer(void * obj,void * client_data)663 void GC_CALLBACK finalizer(void * obj, void * client_data)
664 {
665 tn * t = (tn *)obj;
666
667 # ifdef PCR
668 PCR_ThCrSec_EnterSys();
669 # endif
670 # if defined(GC_PTHREADS)
671 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
672 pthread_mutex_lock(&incr_lock);
673 # elif defined(GC_WIN32_THREADS)
674 EnterCriticalSection(&incr_cs);
675 # endif
676 if ((int)(GC_word)client_data != t -> level) {
677 GC_printf("Wrong finalization data - collector is broken\n");
678 FAIL;
679 }
680 finalized_count++;
681 t -> level = -1; /* detect duplicate finalization immediately */
682 # ifdef PCR
683 PCR_ThCrSec_ExitSys();
684 # endif
685 # if defined(GC_PTHREADS)
686 pthread_mutex_unlock(&incr_lock);
687 # elif defined(GC_WIN32_THREADS)
688 LeaveCriticalSection(&incr_cs);
689 # endif
690 }
691
692 size_t counter = 0;
693
694 # define MAX_FINALIZED (NTHREADS*4000)
695
696 # if !defined(MACOS)
697 GC_FAR GC_word live_indicators[MAX_FINALIZED] = {0};
698 #else
699 /* Too big for THINK_C. have to allocate it dynamically. */
700 GC_word *live_indicators = 0;
701 #endif
702
703 int live_indicators_count = 0;
704
mktree(int n)705 tn * mktree(int n)
706 {
707 tn * result = (tn *)GC_MALLOC(sizeof(tn));
708
709 collectable_count++;
710 # if defined(MACOS)
711 /* get around static data limitations. */
712 if (!live_indicators)
713 live_indicators =
714 (GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
715 if (!live_indicators) {
716 GC_printf("Out of memory\n");
717 exit(1);
718 }
719 # endif
720 if (n == 0) return(0);
721 if (result == 0) {
722 GC_printf("Out of memory\n");
723 exit(1);
724 }
725 result -> level = n;
726 result -> lchild = mktree(n-1);
727 result -> rchild = mktree(n-1);
728 if (counter++ % 17 == 0 && n >= 2) {
729 tn * tmp = result -> lchild -> rchild;
730
731 result -> lchild -> rchild = result -> rchild -> lchild;
732 result -> rchild -> lchild = tmp;
733 }
734 if (counter++ % 119 == 0) {
735 int my_index;
736
737 {
738 # ifdef PCR
739 PCR_ThCrSec_EnterSys();
740 # endif
741 # if defined(GC_PTHREADS)
742 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
743 pthread_mutex_lock(&incr_lock);
744 # elif defined(GC_WIN32_THREADS)
745 EnterCriticalSection(&incr_cs);
746 # endif
747 /* Losing a count here causes erroneous report of failure. */
748 finalizable_count++;
749 my_index = live_indicators_count++;
750 # ifdef PCR
751 PCR_ThCrSec_ExitSys();
752 # endif
753 # if defined(GC_PTHREADS)
754 pthread_mutex_unlock(&incr_lock);
755 # elif defined(GC_WIN32_THREADS)
756 LeaveCriticalSection(&incr_cs);
757 # endif
758 }
759
760 GC_REGISTER_FINALIZER((void *)result, finalizer, (void *)(GC_word)n,
761 (GC_finalization_proc *)0, (void * *)0);
762 if (my_index >= MAX_FINALIZED) {
763 GC_printf("live_indicators overflowed\n");
764 FAIL;
765 }
766 live_indicators[my_index] = 13;
767 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
768 (void * *)(&(live_indicators[my_index])),
769 (void *)result) != 0) {
770 GC_printf("GC_general_register_disappearing_link failed\n");
771 FAIL;
772 }
773 if (GC_unregister_disappearing_link(
774 (void * *)
775 (&(live_indicators[my_index]))) == 0) {
776 GC_printf("GC_unregister_disappearing_link failed\n");
777 FAIL;
778 }
779 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
780 (void * *)(&(live_indicators[my_index])),
781 (void *)result) != 0) {
782 GC_printf("GC_general_register_disappearing_link failed 2\n");
783 FAIL;
784 }
785 GC_reachable_here(result);
786 }
787 return(result);
788 }
789
chktree(tn * t,int n)790 void chktree(tn *t, int n)
791 {
792 if (n == 0 && t != 0) {
793 GC_printf("Clobbered a leaf - collector is broken\n");
794 FAIL;
795 }
796 if (n == 0) return;
797 if (t -> level != n) {
798 GC_printf("Lost a node at level %d - collector is broken\n", n);
799 FAIL;
800 }
801 if (counter++ % 373 == 0) {
802 collectable_count++;
803 (void) GC_MALLOC(counter%5001);
804 }
805 chktree(t -> lchild, n-1);
806 if (counter++ % 73 == 0) {
807 collectable_count++;
808 (void) GC_MALLOC(counter%373);
809 }
810 chktree(t -> rchild, n-1);
811 }
812
813
814 #if defined(GC_PTHREADS)
815 pthread_key_t fl_key;
816
alloc8bytes(void)817 void * alloc8bytes(void)
818 {
819 # if defined(SMALL_CONFIG) || defined(GC_DEBUG)
820 collectable_count++;
821 return(GC_MALLOC(8));
822 # else
823 void ** my_free_list_ptr;
824 void * my_free_list;
825
826 my_free_list_ptr = (void **)pthread_getspecific(fl_key);
827 if (my_free_list_ptr == 0) {
828 uncollectable_count++;
829 my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
830 if (pthread_setspecific(fl_key, my_free_list_ptr) != 0) {
831 GC_printf("pthread_setspecific failed\n");
832 FAIL;
833 }
834 }
835 my_free_list = *my_free_list_ptr;
836 if (my_free_list == 0) {
837 my_free_list = GC_malloc_many(8);
838 if (my_free_list == 0) {
839 GC_printf("alloc8bytes out of memory\n");
840 FAIL;
841 }
842 }
843 *my_free_list_ptr = GC_NEXT(my_free_list);
844 GC_NEXT(my_free_list) = 0;
845 collectable_count++;
846 return(my_free_list);
847 # endif
848 }
849
850 #else
851 # define alloc8bytes() GC_MALLOC_ATOMIC(8)
852 #endif
853
alloc_small(int n)854 void alloc_small(int n)
855 {
856 int i;
857
858 for (i = 0; i < n; i += 8) {
859 atomic_count++;
860 if (alloc8bytes() == 0) {
861 GC_printf("Out of memory\n");
862 FAIL;
863 }
864 }
865 }
866
867 # if defined(THREADS) && defined(GC_DEBUG)
868 # ifdef VERY_SMALL_CONFIG
869 # define TREE_HEIGHT 12
870 # else
871 # define TREE_HEIGHT 15
872 # endif
873 # else
874 # ifdef VERY_SMALL_CONFIG
875 # define TREE_HEIGHT 13
876 # else
877 # define TREE_HEIGHT 16
878 # endif
879 # endif
tree_test(void)880 void tree_test(void)
881 {
882 tn * root;
883 int i;
884
885 root = mktree(TREE_HEIGHT);
886 # ifndef VERY_SMALL_CONFIG
887 alloc_small(5000000);
888 # endif
889 chktree(root, TREE_HEIGHT);
890 if (finalized_count && ! dropped_something) {
891 GC_printf("Premature finalization - collector is broken\n");
892 FAIL;
893 }
894 dropped_something = 1;
895 GC_noop1((word)root); /* Root needs to remain live until */
896 /* dropped_something is set. */
897 root = mktree(TREE_HEIGHT);
898 chktree(root, TREE_HEIGHT);
899 for (i = TREE_HEIGHT; i >= 0; i--) {
900 root = mktree(i);
901 chktree(root, i);
902 }
903 # ifndef VERY_SMALL_CONFIG
904 alloc_small(5000000);
905 # endif
906 }
907
908 unsigned n_tests = 0;
909
910 GC_word bm_huge[10] = {
911 0xffffffff,
912 0xffffffff,
913 0xffffffff,
914 0xffffffff,
915 0xffffffff,
916 0xffffffff,
917 0xffffffff,
918 0xffffffff,
919 0xffffffff,
920 0x00ffffff,
921 };
922
923 /* A very simple test of explicitly typed allocation */
typed_test(void)924 void typed_test(void)
925 {
926 GC_word * old, * new;
927 GC_word bm3 = 0x3;
928 GC_word bm2 = 0x2;
929 GC_word bm_large = 0xf7ff7fff;
930 GC_descr d1 = GC_make_descriptor(&bm3, 2);
931 GC_descr d2 = GC_make_descriptor(&bm2, 2);
932 GC_descr d3 = GC_make_descriptor(&bm_large, 32);
933 GC_descr d4 = GC_make_descriptor(bm_huge, 320);
934 GC_word * x = (GC_word *)GC_malloc_explicitly_typed(2000, d4);
935 int i;
936
937 # ifndef LINT
938 (void)GC_make_descriptor(&bm_large, 32);
939 # endif
940 collectable_count++;
941 old = 0;
942 for (i = 0; i < 4000; i++) {
943 collectable_count++;
944 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d1);
945 if (0 != new[0] || 0 != new[1]) {
946 GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
947 FAIL;
948 }
949 new[0] = 17;
950 new[1] = (GC_word)old;
951 old = new;
952 collectable_count++;
953 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d2);
954 new[0] = 17;
955 new[1] = (GC_word)old;
956 old = new;
957 collectable_count++;
958 new = (GC_word *) GC_malloc_explicitly_typed(33 * sizeof(GC_word), d3);
959 new[0] = 17;
960 new[1] = (GC_word)old;
961 old = new;
962 collectable_count++;
963 new = (GC_word *) GC_calloc_explicitly_typed(4, 2 * sizeof(GC_word),
964 d1);
965 new[0] = 17;
966 new[1] = (GC_word)old;
967 old = new;
968 collectable_count++;
969 if (i & 0xff) {
970 new = (GC_word *) GC_calloc_explicitly_typed(7, 3 * sizeof(GC_word),
971 d2);
972 } else {
973 new = (GC_word *) GC_calloc_explicitly_typed(1001,
974 3 * sizeof(GC_word),
975 d2);
976 if (0 != new[0] || 0 != new[1]) {
977 GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
978 FAIL;
979 }
980 }
981 new[0] = 17;
982 new[1] = (GC_word)old;
983 old = new;
984 }
985 for (i = 0; i < 20000; i++) {
986 if (new[0] != 17) {
987 GC_printf("typed alloc failed at %lu\n", (unsigned long)i);
988 FAIL;
989 }
990 new[0] = 0;
991 old = new;
992 new = (GC_word *)(old[1]);
993 }
994 GC_gcollect();
995 GC_noop1((word)x);
996 }
997
998 int fail_count = 0;
999
1000 /*ARGSUSED*/
fail_proc1(void * x)1001 void GC_CALLBACK fail_proc1(void * x)
1002 {
1003 fail_count++;
1004 }
1005
uniq(void * p,...)1006 static void uniq(void *p, ...) {
1007 va_list a;
1008 void *q[100];
1009 int n = 0, i, j;
1010 q[n++] = p;
1011 va_start(a,p);
1012 for (;(q[n] = va_arg(a,void *)) != NULL;n++) ;
1013 va_end(a);
1014 for (i=0; i<n; i++)
1015 for (j=0; j<i; j++)
1016 if (q[i] == q[j]) {
1017 GC_printf(
1018 "Apparently failed to mark from some function arguments.\n"
1019 "Perhaps GC_push_regs was configured incorrectly?\n"
1020 );
1021 FAIL;
1022 }
1023 }
1024
1025 #ifdef THREADS
1026 # define TEST_FAIL_COUNT(n) 1
1027 #else
1028 # define TEST_FAIL_COUNT(n) (fail_count >= (n))
1029 #endif
1030
inc_int_counter(void * pcounter)1031 void * GC_CALLBACK inc_int_counter(void *pcounter)
1032 {
1033 ++(*(int *)pcounter);
1034 return NULL;
1035 }
1036
run_one_test(void)1037 void run_one_test(void)
1038 {
1039 # ifndef DBG_HDRS_ALL
1040 char *x;
1041 char **z;
1042 # ifdef LINT
1043 char *y = 0;
1044 # else
1045 char *y = (char *)(GC_word)fail_proc1;
1046 # endif
1047 CLOCK_TYPE typed_time;
1048 # endif
1049 CLOCK_TYPE start_time;
1050 CLOCK_TYPE reverse_time;
1051 CLOCK_TYPE tree_time;
1052 unsigned long time_diff;
1053
1054 # ifdef FIND_LEAK
1055 GC_printf(
1056 "This test program is not designed for leak detection mode\n");
1057 GC_printf("Expect lots of problems\n");
1058 # endif
1059 GC_FREE(0);
1060 # ifndef DBG_HDRS_ALL
1061 collectable_count += 3;
1062 if ((GC_size(GC_malloc(7)) != 8 &&
1063 GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word))
1064 || GC_size(GC_malloc(15)) != 16) {
1065 GC_printf("GC_size produced unexpected results\n");
1066 FAIL;
1067 }
1068 collectable_count += 1;
1069 if (GC_size(GC_malloc(0)) != MIN_WORDS * sizeof(GC_word)) {
1070 GC_printf("GC_malloc(0) failed: GC_size returns %ld\n",
1071 (unsigned long)GC_size(GC_malloc(0)));
1072 FAIL;
1073 }
1074 collectable_count += 1;
1075 if (GC_size(GC_malloc_uncollectable(0)) != MIN_WORDS * sizeof(GC_word)) {
1076 GC_printf("GC_malloc_uncollectable(0) failed\n");
1077 FAIL;
1078 }
1079 GC_is_valid_displacement_print_proc = fail_proc1;
1080 GC_is_visible_print_proc = fail_proc1;
1081 collectable_count += 1;
1082 x = GC_malloc(16);
1083 if (GC_base(GC_PTR_ADD(x, 13)) != x) {
1084 GC_printf("GC_base(heap ptr) produced incorrect result\n");
1085 FAIL;
1086 }
1087 (void)GC_PRE_INCR(x, 0);
1088 (void)GC_POST_INCR(x);
1089 (void)GC_POST_DECR(x);
1090 if (GC_base(x) != x) {
1091 GC_printf("Bad INCR/DECR result\n");
1092 FAIL;
1093 }
1094 # ifndef PCR
1095 if (GC_base(y) != 0) {
1096 GC_printf("GC_base(fn_ptr) produced incorrect result\n");
1097 FAIL;
1098 }
1099 # endif
1100 if (GC_same_obj(x+5, x) != x + 5) {
1101 GC_printf("GC_same_obj produced incorrect result\n");
1102 FAIL;
1103 }
1104 if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
1105 GC_printf("GC_is_visible produced incorrect result\n");
1106 FAIL;
1107 }
1108 z = GC_malloc(8);
1109 GC_PTR_STORE(z, x);
1110 if (*z != x) {
1111 GC_printf("GC_PTR_STORE failed: %p != %p\n", *z, x);
1112 FAIL;
1113 }
1114 if (!TEST_FAIL_COUNT(1)) {
1115 # if!(defined(POWERPC) || defined(IA64)) || defined(M68K)
1116 /* On POWERPCs function pointers point to a descriptor in the */
1117 /* data segment, so there should have been no failures. */
1118 /* The same applies to IA64. Something similar seems to */
1119 /* be going on with NetBSD/M68K. */
1120 GC_printf("GC_is_visible produced wrong failure indication\n");
1121 FAIL;
1122 # endif
1123 }
1124 if (GC_is_valid_displacement(y) != y
1125 || GC_is_valid_displacement(x) != x
1126 || GC_is_valid_displacement(x + 3) != x + 3) {
1127 GC_printf("GC_is_valid_displacement produced incorrect result\n");
1128 FAIL;
1129 }
1130 {
1131 size_t i;
1132
1133 GC_malloc(17);
1134 for (i = sizeof(GC_word); i < 512; i *= 2) {
1135 GC_word result = (GC_word) GC_memalign(i, 17);
1136 if (result % i != 0 || result == 0 || *(int *)result != 0) FAIL;
1137 }
1138 }
1139 # ifndef ALL_INTERIOR_POINTERS
1140 # if defined(RS6000) || defined(POWERPC)
1141 if (!TEST_FAIL_COUNT(1))
1142 # else
1143 if (!TEST_FAIL_COUNT(GC_get_all_interior_pointers() ? 1 : 2))
1144 # endif
1145 {
1146 GC_printf(
1147 "GC_is_valid_displacement produced wrong failure indication\n");
1148 FAIL;
1149 }
1150 # endif
1151 # endif /* DBG_HDRS_ALL */
1152 /* Test floating point alignment */
1153 collectable_count += 2;
1154 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
1155 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
1156 /* Test size 0 allocation a bit more */
1157 {
1158 size_t i;
1159 for (i = 0; i < 10000; ++i) {
1160 GC_MALLOC(0);
1161 GC_FREE(GC_MALLOC(0));
1162 GC_MALLOC_ATOMIC(0);
1163 GC_FREE(GC_MALLOC_ATOMIC(0));
1164 }
1165 }
1166 # ifdef GC_GCJ_SUPPORT
1167 GC_REGISTER_DISPLACEMENT(sizeof(struct fake_vtable *));
1168 GC_init_gcj_malloc(0, (void *)(GC_word)fake_gcj_mark_proc);
1169 # endif
1170 /* Make sure that fn arguments are visible to the collector. */
1171 uniq(
1172 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1173 (GC_gcollect(),GC_malloc(12)),
1174 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1175 (GC_gcollect(),GC_malloc(12)),
1176 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1177 (GC_gcollect(),GC_malloc(12)),
1178 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1179 (GC_gcollect(),GC_malloc(12)),
1180 GC_malloc(12), GC_malloc(12), GC_malloc(12),
1181 (GC_gcollect(),GC_malloc(12)),
1182 (void *)0);
1183 /* GC_malloc(0) must return NULL or something we can deallocate. */
1184 GC_free(GC_malloc(0));
1185 GC_free(GC_malloc_atomic(0));
1186 GC_free(GC_malloc(0));
1187 GC_free(GC_malloc_atomic(0));
1188 /* Repeated list reversal test. */
1189 GET_TIME(start_time);
1190 reverse_test();
1191 if (GC_print_stats) {
1192 GET_TIME(reverse_time);
1193 time_diff = MS_TIME_DIFF(reverse_time, start_time);
1194 GC_log_printf("-------------Finished reverse_test at time %u (%p)\n",
1195 (unsigned) time_diff, &start_time);
1196 }
1197 # ifndef DBG_HDRS_ALL
1198 typed_test();
1199 if (GC_print_stats) {
1200 GET_TIME(typed_time);
1201 time_diff = MS_TIME_DIFF(typed_time, start_time);
1202 GC_log_printf("-------------Finished typed_test at time %u (%p)\n",
1203 (unsigned) time_diff, &start_time);
1204 }
1205 # endif /* DBG_HDRS_ALL */
1206 tree_test();
1207 if (GC_print_stats) {
1208 GET_TIME(tree_time);
1209 time_diff = MS_TIME_DIFF(tree_time, start_time);
1210 GC_log_printf("-------------Finished tree_test at time %u (%p)\n",
1211 (unsigned) time_diff, &start_time);
1212 }
1213 /* Run reverse_test a second time, so we hopefully notice corruption. */
1214 reverse_test();
1215 if (GC_print_stats) {
1216 GET_TIME(reverse_time);
1217 time_diff = MS_TIME_DIFF(reverse_time, start_time);
1218 GC_log_printf(
1219 "-------------Finished second reverse_test at time %u (%p)\n",
1220 (unsigned)time_diff, &start_time);
1221 }
1222 /* GC_allocate_ml and GC_need_to_lock are no longer exported, and */
1223 /* AO_fetch_and_add1() may be unavailable to update a counter. */
1224 (void)GC_call_with_alloc_lock(inc_int_counter, &n_tests);
1225 # if defined(THREADS) && defined(HANDLE_FORK)
1226 if (fork() == 0) {
1227 GC_gcollect();
1228 tiny_reverse_test(0);
1229 GC_gcollect();
1230 if (GC_print_stats)
1231 GC_log_printf("Finished a child process\n");
1232 exit(0);
1233 }
1234 # endif
1235 if (GC_print_stats)
1236 GC_log_printf("Finished %p\n", &start_time);
1237 }
1238
1239 #define NUMBER_ROUND_UP(v, bound) ((((v) + (bound) - 1) / (bound)) * (bound))
1240
check_heap_stats(void)1241 void check_heap_stats(void)
1242 {
1243 size_t max_heap_sz;
1244 int i;
1245 int still_live;
1246 # ifdef FINALIZE_ON_DEMAND
1247 int late_finalize_count = 0;
1248 # endif
1249
1250 # ifdef VERY_SMALL_CONFIG
1251 /* The upper bounds are a guess, which has been empirically */
1252 /* adjusted. On low end uniprocessors with incremental GC */
1253 /* these may be particularly dubious, since empirically the */
1254 /* heap tends to grow largely as a result of the GC not */
1255 /* getting enough cycles. */
1256 # if CPP_WORDSZ == 64
1257 max_heap_sz = 4500000;
1258 # else
1259 max_heap_sz = 2800000;
1260 # endif
1261 # else
1262 # if CPP_WORDSZ == 64
1263 max_heap_sz = 23000000;
1264 # else
1265 max_heap_sz = 15000000;
1266 # endif
1267 # endif
1268 # ifdef GC_DEBUG
1269 max_heap_sz *= 2;
1270 # ifdef SAVE_CALL_CHAIN
1271 max_heap_sz *= 3;
1272 # ifdef SAVE_CALL_COUNT
1273 max_heap_sz += max_heap_sz * SAVE_CALL_COUNT/4;
1274 # endif
1275 # endif
1276 # endif
1277 max_heap_sz *= n_tests;
1278 # ifdef USE_MMAP
1279 max_heap_sz = NUMBER_ROUND_UP(max_heap_sz, 4 * 1024 * 1024);
1280 # endif
1281 /* Garbage collect repeatedly so that all inaccessible objects */
1282 /* can be finalized. */
1283 while (GC_collect_a_little()) { }
1284 for (i = 0; i < 16; i++) {
1285 GC_gcollect();
1286 # ifdef FINALIZE_ON_DEMAND
1287 late_finalize_count +=
1288 # endif
1289 GC_invoke_finalizers();
1290 }
1291 if (GC_print_stats) {
1292 GC_log_printf("Primordial thread stack bottom: %p\n",
1293 GC_stackbottom);
1294 }
1295 GC_printf("Completed %u tests\n", n_tests);
1296 GC_printf("Allocated %d collectable objects\n", collectable_count);
1297 GC_printf("Allocated %d uncollectable objects\n",
1298 uncollectable_count);
1299 GC_printf("Allocated %d atomic objects\n", atomic_count);
1300 GC_printf("Allocated %d stubborn objects\n", stubborn_count);
1301 GC_printf("Finalized %d/%d objects - ",
1302 finalized_count, finalizable_count);
1303 # ifdef FINALIZE_ON_DEMAND
1304 if (finalized_count != late_finalize_count) {
1305 GC_printf("Demand finalization error\n");
1306 FAIL;
1307 }
1308 # endif
1309 if (finalized_count > finalizable_count
1310 || finalized_count < finalizable_count/2) {
1311 GC_printf("finalization is probably broken\n");
1312 FAIL;
1313 } else {
1314 GC_printf("finalization is probably ok\n");
1315 }
1316 still_live = 0;
1317 for (i = 0; i < MAX_FINALIZED; i++) {
1318 if (live_indicators[i] != 0) {
1319 still_live++;
1320 }
1321 }
1322 i = finalizable_count - finalized_count - still_live;
1323 if (0 != i) {
1324 GC_printf("%d disappearing links remain and %d more objects "
1325 "were not finalized\n", still_live, i);
1326 if (i > 10) {
1327 GC_printf("\tVery suspicious!\n");
1328 } else {
1329 GC_printf("\tSlightly suspicious, but probably OK\n");
1330 }
1331 }
1332 GC_printf("Total number of bytes allocated is %lu\n",
1333 (unsigned long)GC_get_total_bytes());
1334 GC_printf("Final heap size is %lu bytes\n",
1335 (unsigned long)GC_get_heap_size());
1336 if (GC_get_total_bytes() < n_tests *
1337 # ifdef VERY_SMALL_CONFIG
1338 2700000
1339 # else
1340 33500000
1341 # endif
1342 ) {
1343 GC_printf("Incorrect execution - missed some allocations\n");
1344 FAIL;
1345 }
1346 if (GC_get_heap_size() + GC_get_unmapped_bytes() > max_heap_sz) {
1347 GC_printf("Unexpected heap growth - collector may be broken"
1348 " (heapsize: %lu, expected: %u)\n",
1349 (unsigned long)(GC_get_heap_size() + GC_get_unmapped_bytes()),
1350 max_heap_sz);
1351 FAIL;
1352 }
1353 # ifdef THREADS
1354 GC_unregister_my_thread(); /* just to check it works (for main) */
1355 # endif
1356 GC_printf("Collector appears to work\n");
1357 }
1358
1359 #if defined(MACOS)
SetMinimumStack(long minSize)1360 void SetMinimumStack(long minSize)
1361 {
1362 long newApplLimit;
1363
1364 if (minSize > LMGetDefltStack())
1365 {
1366 newApplLimit = (long) GetApplLimit()
1367 - (minSize - LMGetDefltStack());
1368 SetApplLimit((Ptr) newApplLimit);
1369 MaxApplZone();
1370 }
1371 }
1372
1373 #define cMinStackSpace (512L * 1024L)
1374
1375 #endif
1376
warn_proc(char * msg,GC_word p)1377 void GC_CALLBACK warn_proc(char *msg, GC_word p)
1378 {
1379 GC_printf(msg, (unsigned long)p);
1380 /*FAIL;*/
1381 }
1382
1383 #if defined(MSWINCE) && defined(UNDER_CE)
1384 # define WINMAIN_LPTSTR LPWSTR
1385 #else
1386 # define WINMAIN_LPTSTR LPSTR
1387 #endif
1388
1389 #if !defined(PCR) && !defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS) \
1390 || defined(LINT)
1391 #if defined(MSWIN32) && !defined(__MINGW32__) || defined(MSWINCE)
WinMain(HINSTANCE instance,HINSTANCE prev,WINMAIN_LPTSTR cmd,int n)1392 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev,
1393 WINMAIN_LPTSTR cmd, int n)
1394 #elif defined(RTEMS)
1395 # include <bsp.h>
1396 # define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
1397 # define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
1398 # define CONFIGURE_RTEMS_INIT_TASKS_TABLE
1399 # define CONFIGURE_MAXIMUM_TASKS 1
1400 # define CONFIGURE_INIT
1401 # define CONFIGURE_INIT_TASK_STACK_SIZE (64*1024)
1402 # include <rtems/confdefs.h>
1403 rtems_task Init(rtems_task_argument ignord)
1404 #else
1405 int main(void)
1406 #endif
1407 {
1408 n_tests = 0;
1409 # if defined(MACOS)
1410 /* Make sure we have lots and lots of stack space. */
1411 SetMinimumStack(cMinStackSpace);
1412 /* Cheat and let stdio initialize toolbox for us. */
1413 printf("Testing GC Macintosh port\n");
1414 # endif
1415 GC_COND_INIT();
1416 GC_set_warn_proc(warn_proc);
1417 # if (defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(GWW_VDB)) \
1418 && !defined(MAKE_BACK_GRAPH) && !defined(NO_INCREMENTAL)
1419 GC_enable_incremental();
1420 GC_printf("Switched to incremental mode\n");
1421 # if defined(MPROTECT_VDB)
1422 GC_printf("Emulating dirty bits with mprotect/signals\n");
1423 # else
1424 # ifdef PROC_VDB
1425 GC_printf("Reading dirty bits from /proc\n");
1426 # elif defined(GWW_VDB)
1427 GC_printf("Using GetWriteWatch-based implementation\n");
1428 # else
1429 GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
1430 # endif
1431 # endif
1432 # endif
1433 run_one_test();
1434 check_heap_stats();
1435 # ifndef MSWINCE
1436 fflush(stdout);
1437 # endif
1438 # ifdef LINT
1439 /* Entry points we should be testing, but aren't. */
1440 /* Some can be tested by defining GC_DEBUG at the top of this file */
1441 /* This is a bit SunOS4 specific. */
1442 GC_noop(GC_expand_hp, GC_add_roots, GC_clear_roots,
1443 GC_register_disappearing_link,
1444 GC_register_finalizer_ignore_self,
1445 GC_debug_register_displacement, GC_debug_change_stubborn,
1446 GC_debug_end_stubborn_change, GC_debug_malloc_uncollectable,
1447 GC_debug_free, GC_debug_realloc,
1448 GC_generic_malloc_words_small, GC_init,
1449 GC_malloc_ignore_off_page, GC_malloc_atomic_ignore_off_page,
1450 GC_set_max_heap_size, GC_get_bytes_since_gc,
1451 GC_get_total_bytes, GC_pre_incr, GC_post_incr);
1452 # endif
1453 # ifdef MSWIN32
1454 GC_win32_free_heap();
1455 # endif
1456 return(0);
1457 }
1458 # endif
1459
1460 #if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
1461
thr_run_one_test(void * arg)1462 DWORD __stdcall thr_run_one_test(void *arg)
1463 {
1464 run_one_test();
1465 return 0;
1466 }
1467
1468 #ifdef MSWINCE
1469 HANDLE win_created_h;
1470 HWND win_handle;
1471
window_proc(HWND hwnd,UINT uMsg,WPARAM wParam,LPARAM lParam)1472 LRESULT CALLBACK window_proc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
1473 {
1474 LRESULT ret = 0;
1475 switch (uMsg) {
1476 case WM_HIBERNATE:
1477 GC_printf("Received WM_HIBERNATE, calling GC_gcollect\n");
1478 /* Force "unmap as much memory as possible" mode. */
1479 GC_gcollect_and_unmap();
1480 break;
1481 case WM_CLOSE:
1482 GC_printf("Received WM_CLOSE, closing window\n");
1483 DestroyWindow(hwnd);
1484 break;
1485 case WM_DESTROY:
1486 PostQuitMessage(0);
1487 break;
1488 default:
1489 ret = DefWindowProc(hwnd, uMsg, wParam, lParam);
1490 break;
1491 }
1492 return ret;
1493 }
1494
thr_window(void * arg)1495 DWORD __stdcall thr_window(void *arg)
1496 {
1497 WNDCLASS win_class = {
1498 CS_NOCLOSE,
1499 window_proc,
1500 0,
1501 0,
1502 GetModuleHandle(NULL),
1503 NULL,
1504 NULL,
1505 (HBRUSH)(COLOR_APPWORKSPACE+1),
1506 NULL,
1507 TEXT("GCtestWindow")
1508 };
1509 MSG msg;
1510
1511 if (!RegisterClass(&win_class))
1512 FAIL;
1513
1514 win_handle = CreateWindowEx(
1515 0,
1516 TEXT("GCtestWindow"),
1517 TEXT("GCtest"),
1518 0,
1519 CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT,
1520 NULL,
1521 NULL,
1522 GetModuleHandle(NULL),
1523 NULL);
1524
1525 if (win_handle == NULL)
1526 FAIL;
1527
1528 SetEvent(win_created_h);
1529
1530 ShowWindow(win_handle, SW_SHOW);
1531 UpdateWindow(win_handle);
1532
1533 while (GetMessage(&msg, NULL, 0, 0)) {
1534 TranslateMessage(&msg);
1535 DispatchMessage(&msg);
1536 }
1537
1538 return 0;
1539 }
1540 #endif
1541
WinMain(HINSTANCE instance,HINSTANCE prev,WINMAIN_LPTSTR cmd,int n)1542 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev,
1543 WINMAIN_LPTSTR cmd, int n)
1544 {
1545 # if NTHREADS > 0
1546 HANDLE h[NTHREADS];
1547 int i;
1548 # endif
1549 # ifdef MSWINCE
1550 HANDLE win_thr_h;
1551 # endif
1552 DWORD thread_id;
1553 # if defined(GC_DLL) && !defined(GC_NO_THREADS_DISCOVERY) \
1554 && !defined(MSWINCE) && !defined(THREAD_LOCAL_ALLOC) \
1555 && !defined(PARALLEL_MARK)
1556 GC_use_threads_discovery();
1557 /* Test with implicit thread registration if possible. */
1558 GC_printf("Using DllMain to track threads\n");
1559 # endif
1560 GC_COND_INIT();
1561 # if !defined(MAKE_BACK_GRAPH) && !defined(NO_INCREMENTAL)
1562 GC_enable_incremental();
1563 # endif
1564 InitializeCriticalSection(&incr_cs);
1565 GC_set_warn_proc(warn_proc);
1566 # ifdef MSWINCE
1567 win_created_h = CreateEvent(NULL, FALSE, FALSE, NULL);
1568 if (win_created_h == (HANDLE)NULL) {
1569 GC_printf("Event creation failed %d\n", (int)GetLastError());
1570 FAIL;
1571 }
1572 win_thr_h = GC_CreateThread(NULL, 0, thr_window, 0, 0, &thread_id);
1573 if (win_thr_h == (HANDLE)NULL) {
1574 GC_printf("Thread creation failed %d\n", (int)GetLastError());
1575 FAIL;
1576 }
1577 if (WaitForSingleObject(win_created_h, INFINITE) != WAIT_OBJECT_0)
1578 FAIL;
1579 CloseHandle(win_created_h);
1580 # endif
1581 # if NTHREADS > 0
1582 for (i = 0; i < NTHREADS; i++) {
1583 h[i] = GC_CreateThread(NULL, 0, thr_run_one_test, 0, 0, &thread_id);
1584 if (h[i] == (HANDLE)NULL) {
1585 GC_printf("Thread creation failed %d\n", (int)GetLastError());
1586 FAIL;
1587 }
1588 }
1589 # endif /* NTHREADS > 0 */
1590 run_one_test();
1591 # if NTHREADS > 0
1592 for (i = 0; i < NTHREADS; i++) {
1593 if (WaitForSingleObject(h[i], INFINITE) != WAIT_OBJECT_0) {
1594 GC_printf("Thread wait failed %d\n", (int)GetLastError());
1595 FAIL;
1596 }
1597 }
1598 # endif /* NTHREADS > 0 */
1599 # ifdef MSWINCE
1600 PostMessage(win_handle, WM_CLOSE, 0, 0);
1601 if (WaitForSingleObject(win_thr_h, INFINITE) != WAIT_OBJECT_0)
1602 FAIL;
1603 # endif
1604 check_heap_stats();
1605 return(0);
1606 }
1607
1608 #endif /* GC_WIN32_THREADS */
1609
1610
1611 #ifdef PCR
test(void)1612 int test(void)
1613 {
1614 PCR_Th_T * th1;
1615 PCR_Th_T * th2;
1616 int code;
1617
1618 n_tests = 0;
1619 /* GC_enable_incremental(); */
1620 GC_set_warn_proc(warn_proc);
1621 th1 = PCR_Th_Fork(run_one_test, 0);
1622 th2 = PCR_Th_Fork(run_one_test, 0);
1623 run_one_test();
1624 if (PCR_Th_T_Join(th1, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1625 != PCR_ERes_okay || code != 0) {
1626 GC_printf("Thread 1 failed\n");
1627 }
1628 if (PCR_Th_T_Join(th2, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1629 != PCR_ERes_okay || code != 0) {
1630 GC_printf("Thread 2 failed\n");
1631 }
1632 check_heap_stats();
1633 return(0);
1634 }
1635 #endif
1636
1637 #if defined(GC_PTHREADS)
thr_run_one_test(void * arg)1638 void * thr_run_one_test(void * arg)
1639 {
1640 run_one_test();
1641 return(0);
1642 }
1643
1644 #ifdef GC_DEBUG
1645 # define GC_free GC_debug_free
1646 #endif
1647
main(void)1648 int main(void)
1649 {
1650 pthread_t th[NTHREADS];
1651 pthread_attr_t attr;
1652 int code;
1653 int i;
1654 # ifdef GC_IRIX_THREADS
1655 /* Force a larger stack to be preallocated */
1656 /* Since the initial can't always grow later. */
1657 *((volatile char *)&code - 1024*1024) = 0; /* Require 1 MB */
1658 # endif /* GC_IRIX_THREADS */
1659 # if defined(GC_HPUX_THREADS)
1660 /* Default stack size is too small, especially with the 64 bit ABI */
1661 /* Increase it. */
1662 if (pthread_default_stacksize_np(1024*1024, 0) != 0) {
1663 GC_printf("pthread_default_stacksize_np failed\n");
1664 }
1665 # endif /* GC_HPUX_THREADS */
1666 # ifdef PTW32_STATIC_LIB
1667 pthread_win32_process_attach_np ();
1668 pthread_win32_thread_attach_np ();
1669 # endif
1670 # if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY) \
1671 && !defined(DARWIN_DONT_PARSE_STACK) && !defined(THREAD_LOCAL_ALLOC)
1672 /* Test with the Darwin implicit thread registration. */
1673 GC_use_threads_discovery();
1674 GC_printf("Using Darwin task-threads-based world stop and push\n");
1675 # endif
1676 GC_COND_INIT();
1677
1678 pthread_attr_init(&attr);
1679 # if defined(GC_IRIX_THREADS) || defined(GC_FREEBSD_THREADS) \
1680 || defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS) \
1681 || defined(GC_OPENBSD_THREADS)
1682 pthread_attr_setstacksize(&attr, 1000000);
1683 # endif
1684 n_tests = 0;
1685 # if (defined(MPROTECT_VDB)) && !defined(REDIRECT_MALLOC) \
1686 && !defined(MAKE_BACK_GRAPH) && !defined(USE_PROC_FOR_LIBRARIES) \
1687 && !defined(NO_INCREMENTAL)
1688 GC_enable_incremental();
1689 GC_printf("Switched to incremental mode\n");
1690 # if defined(MPROTECT_VDB)
1691 GC_printf("Emulating dirty bits with mprotect/signals\n");
1692 # else
1693 # ifdef PROC_VDB
1694 GC_printf("Reading dirty bits from /proc\n");
1695 # else
1696 GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
1697 # endif
1698 # endif
1699 # endif
1700 GC_set_warn_proc(warn_proc);
1701 if ((code = pthread_key_create(&fl_key, 0)) != 0) {
1702 GC_printf("Key creation failed %d\n", code);
1703 FAIL;
1704 }
1705 for (i = 0; i < NTHREADS; ++i) {
1706 if ((code = pthread_create(th+i, &attr, thr_run_one_test, 0)) != 0) {
1707 GC_printf("Thread %d creation failed %d\n", i, code);
1708 FAIL;
1709 }
1710 }
1711 run_one_test();
1712 for (i = 0; i < NTHREADS; ++i) {
1713 if ((code = pthread_join(th[i], 0)) != 0) {
1714 GC_printf("Thread %d failed %d\n", i, code);
1715 FAIL;
1716 }
1717 }
1718 check_heap_stats();
1719 (void)fflush(stdout);
1720 pthread_attr_destroy(&attr);
1721 GC_printf("Completed %u collections\n", (unsigned)GC_get_gc_no());
1722 # ifdef PTW32_STATIC_LIB
1723 pthread_win32_thread_detach_np ();
1724 pthread_win32_process_detach_np ();
1725 # endif
1726 return(0);
1727 }
1728 #endif /* GC_PTHREADS */
1729